diff --git a/docs/.vuepress/configs/navbar.ts b/docs/.vuepress/configs/navbar.ts index d89917ce..b457dc3e 100644 --- a/docs/.vuepress/configs/navbar.ts +++ b/docs/.vuepress/configs/navbar.ts @@ -27,7 +27,12 @@ export const navbarEn: NavbarOptions = [ { text: "Clients", children: [ - { text: "KurrentDB clients", link: "/clients/grpc/getting-started" }, + { text: ".NET", link: "/clients/grpc/dotnet/getting-started.html" }, + { text: "Python", link: "/clients/grpc/python/getting-started.html" }, + { text: "Node.js", link: "/clients/grpc/nodejs/getting-started.html" }, + { text: "Java", link: "/clients/grpc/java/getting-started.html" }, + { text: "Go", link: "/clients/grpc/go/getting-started.html" }, + { text: "Rust", link: "/clients/grpc/rust/getting-started.html" }, ], }, { text: "HTTP API", children: ver.linksFor("http-api", false) }, diff --git a/docs/.vuepress/configs/sidebar.ts b/docs/.vuepress/configs/sidebar.ts index 109df14d..3437d49d 100644 --- a/docs/.vuepress/configs/sidebar.ts +++ b/docs/.vuepress/configs/sidebar.ts @@ -36,7 +36,12 @@ export const sidebarEn: EsSidebarOptions = { }, ], - "/clients/grpc/": "structure", + "/clients/grpc/dotnet/": "structure", + "/clients/grpc/python/": "structure", + "/clients/grpc/nodejs/": "structure", + "/clients/grpc/java/": "structure", + "/clients/grpc/go/": "structure", + "/clients/grpc/rust/": "structure", "/cloud/": "structure", ...ver.getSidebars(), "/clients/tcp/dotnet/21.2/": "structure", diff --git a/docs/.vuepress/lib/samples.ts b/docs/.vuepress/lib/samples.ts index 14b78611..2cf04694 100644 --- a/docs/.vuepress/lib/samples.ts +++ b/docs/.vuepress/lib/samples.ts @@ -1,6 +1,7 @@ import {logger, path} from 'vuepress/utils'; import {type ResolvedImport} from "../markdown/xode/types"; import version from "./version"; +import * as fs from 'fs'; const base = "../../samples"; @@ -8,10 +9,12 @@ export function resolveMultiSamplesPath(src: string): ResolvedImport[] { const split = src.split(':'); const cat = split.length < 2 ? undefined : split[0]; const paths = split.length === 1 ? src : split[1]; - return paths.split(';').map(x => { - const r = resolveSamplesPath(x, cat); - return {label: r.label, importPath: r.path}; - }) + return paths.split(';') + .filter(x => x.trim() !== '') // Filter out empty strings + .map(x => { + const r = resolveSamplesPath(x, cat); + return {label: r.label, importPath: r.path}; + }) } export function resolveSamplesPath(src: string, srcCat: string | undefined) { @@ -19,9 +22,17 @@ export function resolveSamplesPath(src: string, srcCat: string | undefined) { return {label: "", path: s} }; - const ext = src.split('.').pop()!; + // Handle empty src + if (!src || src.trim() === '') { + console.warn(`Empty source path provided, srcCat: "${srcCat}"`); + return def(src); + } + + const srcParts = src.split('.'); + const ext = srcParts.length > 1 ? srcParts.pop()! : ''; const pseudo = src.split('/'); const includesCat = pseudo[0].startsWith('@'); + if (!includesCat && srcCat === undefined) return def(src); const cats: Record> = { @@ -78,18 +89,36 @@ export function resolveSamplesPath(src: string, srcCat: string | undefined) { } let lang = cat[ext] ?? cat["default"]; - if (lang === undefined && cat.path === undefined) { - logger.warn(`Unknown extension ${ext} in ${cat}`); + if (lang === undefined) { + // If no extension match and no default, try to find by partial match or return default + logger.warn(`Unknown extension "${ext}" in category "${catName}". Available extensions: ${Object.keys(cat).join(', ')}`); return def(src); } + // If we don't have an extension but we have a default, use it + if (ext === '' && cat["default"]) { + lang = cat["default"]; + } + const samplesVersion = isVersion ? pseudo[1] : lang.version; const langPath = samplesVersion !== undefined ? `${lang.path}/${samplesVersion}` : lang.path; const toReplace = isVersion ? `${pseudo[0]}/${pseudo[1]}` : `${pseudo[0]}`; const p = includesCat ? src.replace(toReplace, `${base}/${langPath}`) : `${base}/${langPath}/${src}`; + const resolvedPath = path.resolve(__dirname, p); + + // Check if the resolved path is a directory, and if so, warn and return the original src + try { + const stat = fs.statSync(resolvedPath); + if (stat.isDirectory()) { + logger.warn(`Resolved path is a directory, not a file: ${resolvedPath}`); + return def(src); + } + } catch (error) { + // File doesn't exist, which is handled elsewhere + } - return {label: lang.label, path: path.resolve(__dirname, p)}; + return {label: lang.label, path: resolvedPath}; } -export const projectionSamplesPath = "https://raw.githubusercontent.com/kurrent-io/KurrentDB/53f84e55ea56ccfb981aff0e432581d72c23fbf6/samples/http-api/data/"; +export const projectionSamplesPath = "https://raw.githubusercontent.com/kurrent-io/KurrentDB/53f84e55ea56ccfb981aff0e432581d72c23fbf6/samples/http-api/data/"; \ No newline at end of file diff --git a/docs/.vuepress/public/_redirects b/docs/.vuepress/public/_redirects index 12a123d9..698d56ae 100644 --- a/docs/.vuepress/public/_redirects +++ b/docs/.vuepress/public/_redirects @@ -25,7 +25,7 @@ # ###################### # redirect for internet search on "esdb .net client -/clients/dotnet/5.0/connecting.html /clients/grpc/getting-started.html#connecting-to-eventstoredb 301 +/clients/dotnet/5.0/connecting.html /clients/grpc/dotnet/getting-started.html#connecting-to-eventstoredb 301 # TCP Clients /clients/dotnet/21.2/migration-to-gRPC.html#appending-events /clients/tcp/dotnet/21.2/migration-to-gRPC.html#appending-events 301 @@ -53,6 +53,13 @@ /clients/http-api/generated/v5/docs/introduction/reading-streams.html /http-api/v5/#reading-streams-and-events 301 /clients/http-api/generated/v5/docs/introduction/optimistic-concurrency-and-idempotence.html /http-api/v5/#optimistic-concurrency-and-idempotence 301 +/clients/grpc/python /clients/grpc/python/getting-started.html 301 +/clients/grpc/rust /clients/grpc/rust/getting-started.html 301 +/clients/grpc/nodejs /clients/grpc/nodejs/getting-started.html 301 +/clients/grpc/java /clients/grpc/java/getting-started.html 301 +/clients/grpc/dotnet /clients/grpc/dotnet/getting-started.html 301 +/clients/grpc/go /clients/grpc/go/getting-started.html 301 + /server/generated/v5/http-api/persistent-subscriptions.html /http-api/v5/persistent.html 301 /server/generated/v5/http-api/reading-subscribing-events.html /http-api/v5/#reading-an-event-from-a-stream 301 /server/v5/samples/http-api/event.json /http-api/v5/api.html 301 diff --git a/docs/clients/grpc/README.md b/docs/clients/grpc/README.md index 402258e4..f486fe59 100644 --- a/docs/clients/grpc/README.md +++ b/docs/clients/grpc/README.md @@ -1,10 +1,24 @@ --- -index: false +sitemap: + priority: 0 + changefreq: monthly breadcrumbExclude: true --- -# Clients +# KurrentDB Clients -Learn how to use the KurrentDB client libraries to interact with the database. +KurrentDB offers official client libraries for multiple programming languages, making it easy to build applications that work with event-native workloads. - \ No newline at end of file +Select your client library to view the documentation: + +.NET [.NET](/clients/grpc/dotnet/getting-started.md) + +Python [Python](/clients/grpc/python/getting-started.md) + +Node.js [Node.js](/clients/grpc/nodejs/getting-started.md) + +Java [Java](/clients/grpc/java/getting-started.md) + +Go [Go](/clients/grpc/go/getting-started.md) + +Rust [Rust](/clients/grpc/rust/getting-started.md) \ No newline at end of file diff --git a/docs/clients/grpc/appending-events.md b/docs/clients/grpc/appending-events.md index a2adb1bd..9bd7784c 100644 --- a/docs/clients/grpc/appending-events.md +++ b/docs/clients/grpc/appending-events.md @@ -1,83 +1,21 @@ --- -order: 2 +sitemap: + priority: 0 + changefreq: monthly --- # Appending events -When you start working with KurrentDB, it is empty. The first meaningful operation is to add one or more events to the database using one of the available client SDKs. +To redirect you to the right page, please select a client: -::: tip -Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. -::: +.NET [.NET](/clients/grpc/dotnet/appending-events.md) -## Append your first event +Python [Python](/clients/grpc/python/appending-events.md) -The simplest way to append an event to KurrentDB is to create an `EventData` object and call `AppendToStream` method. +Node.js [Node.js](/clients/grpc/nodejs/appending-events.md) -@[code{append-to-stream}](@grpc:appending_events.py;appending-events.js;appending-events.ts;appending_events/AppendingEvents.java;appending-events/Program.cs;appendingEvents.go;appending_events.rs) +Java [Java](/clients/grpc/java/appending-events.md) -`AppendToStream` takes a collection of `EventData`, which allows you to save more than one event in a single batch. - -Outside the example above, other options exist for dealing with different scenarios. - -::: tip -If you are new to Event Sourcing, please study the [Handling concurrency](#handling-concurrency) section below. -::: - -## Working with EventData - -Events appended to KurrentDB must be wrapped in an `EventData` object. This allows you to specify the event's content, the type of event, and whether it's in JSON format. In its simplest form, you need three arguments: **eventId**, **type**, and **data**. - -### eventId - -This takes the format of a `Uuid` and is used to uniquely identify the event you are trying to append. If two events with the same `Uuid` are appended to the same stream in quick succession, KurrentDB will only append one of the events to the stream. - -For example, the following code will only append a single event: - -@[code{append-duplicate-event}](@grpc:appending_events.py;appending-events.js;appending-events.ts;appending_events/AppendingEvents.java;appending-events/Program.cs;appendingEvents.go;appending_events.rs) - -![Duplicate Event](./images/duplicate-event.png) - -### type - -Each event should be supplied with an event type. This unique string is used to identify the type of event you are saving. - -It is common to see the explicit event code type name used as the type as it makes serialising and de-serialising of the event easy. However, we recommend against this as it couples the storage to the type and will make it more difficult if you need to version the event at a later date. - -### data - -Representation of your event data. It is recommended that you store your events as JSON objects. This allows you to take advantage of all of KurrentDB's functionality, such as projections. That said, you can save events using whatever format suits your workflow. Eventually, the data will be stored as encoded bytes. - -### metadata - -Storing additional information alongside your event that is part of the event itself is standard practice. This can be correlation IDs, timestamps, access information, etc. KurrentDB allows you to store a separate byte array containing this information to keep it separate. - -### isJson - -Simple boolean field to tell KurrentDB if the event is stored as json, true by default. - -## Handling concurrency - -When appending events to a stream, you can supply a *stream state* or *stream revision*. Your client uses this to inform KurrentDB of the state or version you expect the stream to be in when appending an event. If the stream isn't in that state, an exception will be thrown. - -For example, if you try to append the same record twice, expecting both times that the stream doesn't exist, you will get an exception on the second: - -@[code{append-with-no-stream}](@grpc:appending_events.py;appending-events.js;appending-events.ts;appending_events/AppendingEvents.java;appending-events/Program.cs;appendingEvents.go;appending_events.rs) - -There are three available stream states: -- `Any` -- `NoStream` -- `StreamExists` - -This check can be used to implement optimistic concurrency. When retrieving a stream from KurrentDB, note the current version number. When you save it back, you can determine if somebody else has modified the record in the meantime. - -@[code{append-with-concurrency-check}](@grpc:appending_events.py;appending-events.js;appending-events.ts;appending_events/AppendingEvents.java;appending-events/Program.cs;appendingEvents.go;appending_events.rs) - - - -## User credentials - -You can provide user credentials to append the data as follows. This will override the default credentials set on the connection. - -@[code{overriding-user-credentials}](@grpc:appending_events.py;appending-events.js;appending-events.ts;appending_events/AppendingEvents.java;appending-events/Program.cs;appendingEvents.go;appending_events.rs) +Go [Go](/clients/grpc/go/appending-events.md) +Rust [Rust](/clients/grpc/rust/appending-events.md) \ No newline at end of file diff --git a/docs/clients/grpc/authentication.md b/docs/clients/grpc/authentication.md index eaf5bbb8..81e26cb2 100644 --- a/docs/clients/grpc/authentication.md +++ b/docs/clients/grpc/authentication.md @@ -1,67 +1,21 @@ --- -title: Authentication -order: 7 +sitemap: + priority: 0 + changefreq: monthly --- -## Client x.509 certificate +# Authentication -X.509 certificates are digital certificates that use the X.509 public key infrastructure (PKI) standard to verify the identity of clients and servers. They play a crucial role in establishing a secure connection by providing a way to authenticate identities and establish trust. +To redirect you to the right page, please select a client: -### Prerequisites +.NET [.NET](/clients/grpc/dotnet/authentication.md) -1. KurrentDB 25.0 or greater, or EventStoreDB 24.10. -2. A commercial license with the User Certificates entitlement. -3. A valid x.509 certificate, which can be created using version `1.3` or higher of the [gencert tool](https://github.com/kurrent-io/es-gencert-cli). -4. The server must run in secure mode. See [Security Options](@server/security/protocol-security.md) for more information. -5. [Enable User Certificates plugin on the server](@server/security/user-authentication.md#user-x509-certificates) +Python [Python](/clients/grpc/python/authentication.md) -#### Generate user certificates +Node.js [Node.js](/clients/grpc/nodejs/authentication.md) -The following command uses the [gencert tool](https://github.com/kurrent-io/es-gencert-cli) to generate a user certificate for the user `admin` that will expire in 10 days: - -::: tabs#os -@tab bash -```bash -./es-gencert-cli create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key -``` -@tab PowerShell -```powershell -.\es-gencert-cli.exe create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key -``` -::: - -### Connect to KurrentDB using an x.509 certificate - -To connect to KurrentDB using an x.509 certificate, you need to provide the -certificate and the private key to the client. If both username/password and -certificate authentication data are supplied, the client prioritizes user -credentials for authentication. The client will throw an error if the -certificate and the key are not both provided. - -::: tip -Please note that currently, password-protected private key files are not supported. -::: - -The client supports the following parameters: - -| Parameter | Description | -|----------------|--------------------------------------------------------------------------------| -| `userCertFile` | The file containing the X.509 user certificate in PEM format. | -| `userKeyFile` | The file containing the user certificate’s matching private key in PEM format. | - -To authenticate, include these two parameters in your connection string or constructor when initializing the client. - -Check the samples for the following clients: - -::: code-tabs -@tab TypeScript -@[code{client-with-user-certificates}](@grpc:user-certificates.ts) -@tab Java -@[code{client-with-user-certificates}](@grpc:authentication/UserCertificate.java) -@tab C## -@[code{client-with-user-certificates}](@grpc:user-certificates/Program.cs) -@tab Go -@[code{client-with-user-certificates}](@grpc:/userCertificates.go) -::: +Java [Java](/clients/grpc/java/authentication.md) +Go [Go](/clients/grpc/go/authentication.md) +Rust [Rust](/clients/grpc/rust/authentication.md) \ No newline at end of file diff --git a/docs/clients/grpc/delete-stream.md b/docs/clients/grpc/delete-stream.md index 270a9749..9aebfea8 100644 --- a/docs/clients/grpc/delete-stream.md +++ b/docs/clients/grpc/delete-stream.md @@ -1,98 +1,21 @@ --- -order: 9 +sitemap: + priority: 0 + changefreq: monthly --- -# Deleting events +# Deleting Events -In KurrentDB, you can delete events and streams either partially or completely. Settings like $maxAge and $maxCount help control how long events are kept or how many events are stored in a stream, but they won't delete the entire stream. -When you need to fully remove a stream, KurrentDB offers two options: Soft Delete and Hard Delete. +To redirect you to the right page, please select a client: -## Soft delete +.NET [.NET](/clients/grpc/dotnet/delete-stream.md) -Soft delete in KurrentDB allows you to mark a stream for deletion without completely removing it, so you can still add new events later. While you can do this through the UI, using code is often better for automating the process, -handling many streams at once, or including custom rules. Code is especially helpful for large-scale deletions or when you need to integrate soft deletes into other workflows. +Python [Python](/clients/grpc/python/delete-stream.md) -::: tabs#lang -@tab Python -```python -client.delete_stream(stream_name, current_version=6) -``` -@tab JavaScript -```javascript -await client.deleteStream(streamName); -``` -@tab TypeScript -```typescript -await client.deleteStream(streamName); -``` -@tab Java -```java -client.deleteStream(streamName, DeleteStreamOptions.get()).get(); -``` -@tab C## -```csharp -await client.DeleteAsync(streamName, StreamState.Any); -``` -@tab Go -```go -options := esdb.DeleteStreamOptions{ - ExpectedRevision: esdb.Any{}, -} -_, err = client.DeleteStream(context.Background(), streamName, options) -``` -@tab Rust -```rust -let options = DeleteStreamOptions::default(); -client - .delete_stream(stream_name, &options) - .await?; -``` -::: +Node.js [Node.js](/clients/grpc/nodejs/delete-stream.md) -::: note -Clicking the delete button in the UI performs a soft delete, -setting the TruncateBefore value to remove all events up to a certain point. -While this marks the events for deletion, actual removal occurs during the next scavenging process. -The stream can still be reopened by appending new events. -::: +Java [Java](/clients/grpc/java/delete-stream.md) -## Hard delete +Go [Go](/clients/grpc/go/delete-stream.md) -Hard delete in KurrentDB permanently removes a stream and its events. While you can use the HTTP API, code is often better for automating the process, managing multiple streams, and ensuring precise control. Code is especially useful when you need to integrate hard delete into larger workflows or apply specific conditions. Note that when a stream is hard deleted, you cannot reuse the stream name, it will raise an exception if you try to append to it again. - -::: tabs#lang -@tab Python -```python -client.tombstone_stream(stream_name, current_version=4) -``` -@tab JavaScript -```javascript -await client.tombstoneStream(streamName); -``` -@tab TypeScript -```typescript -await client.tombstoneStream(streamName); -``` -@tab Java -```java -client.tombstoneStream(streamName, DeleteStreamOptions.get()).get(); -``` -@tab C## -```csharp -await client.TombstoneAsync(streamName, StreamState.Any); -``` -@tab Go -```go -options := esdb.TombstoneStreamOptions{ - ExpectedRevision: esdb.Any{}, -} -_, err = client.TombstoneStream(context.Background(), streamName, options) -``` -@tab Rust -```rust -let options = TombstoneStreamOptions::default(); -client - .tombstone_stream(stream_name, &options) - .await?; -``` -::: \ No newline at end of file +Rust [Rust](/clients/grpc/rust/delete-stream.md) \ No newline at end of file diff --git a/docs/clients/grpc/dotnet/README.md b/docs/clients/grpc/dotnet/README.md new file mode 100644 index 00000000..16b17ac0 --- /dev/null +++ b/docs/clients/grpc/dotnet/README.md @@ -0,0 +1,9 @@ +--- +index: false +--- + +# .NET + +Learn how to use the KurrentDB C# client library to interact with the database. + + \ No newline at end of file diff --git a/docs/clients/grpc/dotnet/appending-events.md b/docs/clients/grpc/dotnet/appending-events.md new file mode 100644 index 00000000..441d2d21 --- /dev/null +++ b/docs/clients/grpc/dotnet/appending-events.md @@ -0,0 +1,87 @@ +--- +order: 2 +head: + - - title + - {} + - Appending Events | .NET | Clients | Kurrent Docs +--- + +# Appending Events + +When you start working with KurrentDB, it is empty. The first meaningful operation is to add one or more events to the database using one of the available client SDKs. + +::: tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Append your first event + +The simplest way to append an event to KurrentDB is to create an `EventData` object and call `AppendToStream` method. + +@[code{append-to-stream}](@grpc:appending-events/Program.cs) + +`AppendToStream` takes a collection of `EventData`, which allows you to save more than one event in a single batch. + +Outside the example above, other options exist for dealing with different scenarios. + +::: tip +If you are new to Event Sourcing, please study the [Handling concurrency](#handling-concurrency) section below. +::: + +## Working with EventData + +Events appended to KurrentDB must be wrapped in an `EventData` object. This allows you to specify the event's content, the type of event, and whether it's in JSON format. In its simplest form, you need three arguments: **eventId**, **type**, and **data**. + +### eventId + +This takes the format of a `Uuid` and is used to uniquely identify the event you are trying to append. If two events with the same `Uuid` are appended to the same stream in quick succession, KurrentDB will only append one of the events to the stream. + +For example, the following code will only append a single event: + +@[code{append-duplicate-event}](@grpc:appending-events/Program.cs) + +![Duplicate Event](../images/duplicate-event.png) + +### type + +Each event should be supplied with an event type. This unique string is used to identify the type of event you are saving. + +It is common to see the explicit event code type name used as the type as it makes serialising and de-serialising of the event easy. However, we recommend against this as it couples the storage to the type and will make it more difficult if you need to version the event at a later date. + +### data + +Representation of your event data. It is recommended that you store your events as JSON objects. This allows you to take advantage of all of KurrentDB's functionality, such as projections. That said, you can save events using whatever format suits your workflow. Eventually, the data will be stored as encoded bytes. + +### metadata + +Storing additional information alongside your event that is part of the event itself is standard practice. This can be correlation IDs, timestamps, access information, etc. KurrentDB allows you to store a separate byte array containing this information to keep it separate. + +### isJson + +Simple boolean field to tell KurrentDB if the event is stored as json, true by default. + +## Handling concurrency + +When appending events to a stream, you can supply a *stream state* or *stream revision*. Your client uses this to inform KurrentDB of the state or version you expect the stream to be in when appending an event. If the stream isn't in that state, an exception will be thrown. + +For example, if you try to append the same record twice, expecting both times that the stream doesn't exist, you will get an exception on the second: + +@[code{append-with-no-stream}](@grpc:appending-events/Program.cs) + +There are three available stream states: +- `Any` +- `NoStream` +- `StreamExists` + +This check can be used to implement optimistic concurrency. When retrieving a stream from KurrentDB, note the current version number. When you save it back, you can determine if somebody else has modified the record in the meantime. + +@[code{append-with-concurrency-check}](@grpc:appending-events/Program.cs) + + + +## User credentials + +You can provide user credentials to append the data as follows. This will override the default credentials set on the connection. + +@[code{overriding-user-credentials}](@grpc:appending-events/Program.cs) + diff --git a/docs/clients/grpc/dotnet/authentication.md b/docs/clients/grpc/dotnet/authentication.md new file mode 100644 index 00000000..9ab8ea44 --- /dev/null +++ b/docs/clients/grpc/dotnet/authentication.md @@ -0,0 +1,60 @@ +--- +title: Authentication +order: 7 +head: + - - title + - {} + - Authentication | .NET | Clients | Kurrent Docs +--- + +## Client x.509 certificate + +X.509 certificates are digital certificates that use the X.509 public key infrastructure (PKI) standard to verify the identity of clients and servers. They play a crucial role in establishing a secure connection by providing a way to authenticate identities and establish trust. + +### Prerequisites + +1. KurrentDB 25.0 or greater, or EventStoreDB 24.10. +2. A commercial license with the User Certificates entitlement. +3. A valid x.509 certificate, which can be created using version `1.3` or higher of the [gencert tool](https://github.com/kurrent-io/es-gencert-cli). +4. The server must run in secure mode. See [Security Options](@server/security/protocol-security.md) for more information. +5. [Enable User Certificates plugin on the server](@server/security/user-authentication.md#user-x509-certificates) + +#### Generate user certificates + +The following command uses the [gencert tool](https://github.com/kurrent-io/es-gencert-cli) to generate a user certificate for the user `admin` that will expire in 10 days: + +::: tabs#os +@tab bash +```bash +./es-gencert-cli create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +@tab PowerShell +```powershell +.\es-gencert-cli.exe create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +::: + +### Connect to KurrentDB using an x.509 certificate + +To connect to KurrentDB using an x.509 certificate, you need to provide the +certificate and the private key to the client. If both username/password and +certificate authentication data are supplied, the client prioritizes user +credentials for authentication. The client will throw an error if the +certificate and the key are not both provided. + +::: tip +Please note that currently, password-protected private key files are not supported. +::: + +The client supports the following parameters: + +| Parameter | Description | +|----------------|--------------------------------------------------------------------------------| +| `userCertFile` | The file containing the X.509 user certificate in PEM format. | +| `userKeyFile` | The file containing the user certificate’s matching private key in PEM format. | + +To authenticate, include these two parameters in your connection string or constructor when initializing the client. + +Check the samples for the following clients: + +@[code{client-with-user-certificates}](@grpc:user-certificates/Program.cs) \ No newline at end of file diff --git a/docs/clients/grpc/dotnet/delete-stream.md b/docs/clients/grpc/dotnet/delete-stream.md new file mode 100644 index 00000000..79daeb2e --- /dev/null +++ b/docs/clients/grpc/dotnet/delete-stream.md @@ -0,0 +1,36 @@ +--- +order: 9 +head: + - - title + - {} + - Deleting Events | .NET | Clients | Kurrent Docs +--- + +# Deleting Events + +In KurrentDB, you can delete events and streams either partially or completely. Settings like $maxAge and $maxCount help control how long events are kept or how many events are stored in a stream, but they won't delete the entire stream. +When you need to fully remove a stream, KurrentDB offers two options: Soft Delete and Hard Delete. + +## Soft delete + +Soft delete in KurrentDB allows you to mark a stream for deletion without completely removing it, so you can still add new events later. While you can do this through the UI, using code is often better for automating the process, +handling many streams at once, or including custom rules. Code is especially helpful for large-scale deletions or when you need to integrate soft deletes into other workflows. + +```csharp +await client.DeleteAsync(streamName, StreamState.Any); +``` + +::: note +Clicking the delete button in the UI performs a soft delete, +setting the TruncateBefore value to remove all events up to a certain point. +While this marks the events for deletion, actual removal occurs during the next scavenging process. +The stream can still be reopened by appending new events. +::: + +## Hard delete + +Hard delete in KurrentDB permanently removes a stream and its events. While you can use the HTTP API, code is often better for automating the process, managing multiple streams, and ensuring precise control. Code is especially useful when you need to integrate hard delete into larger workflows or apply specific conditions. Note that when a stream is hard deleted, you cannot reuse the stream name, it will raise an exception if you try to append to it again. + +```csharp +await client.TombstoneAsync(streamName, StreamState.Any); +``` \ No newline at end of file diff --git a/docs/clients/grpc/dotnet/getting-started.md b/docs/clients/grpc/dotnet/getting-started.md new file mode 100644 index 00000000..04e37576 --- /dev/null +++ b/docs/clients/grpc/dotnet/getting-started.md @@ -0,0 +1,107 @@ +--- +order: 1 +head: + - - title + - {} + - Getting Started | .NET | Clients | Kurrent Docs +--- + +# Getting started + +Get started by connecting your application to KurrentDB. + +## Connecting to KurrentDB + +To connect your application to KurrentDB, instantiate and configure the client. + +::: tip Insecure clusters +All our GRPC clients are secure by default and must be configured to connect to an insecure server via [a connection string](#connection-string) or the client's configuration. +::: + +### Required packages + +Add the .NET `KurrentDB.Client` package to your project: + +```bash +dotnet add package KurrentDB.Client +``` + + +### Connection string + +Each SDK has its own way of configuring the client, but the connection string can always be used. +The KurrentDB connection string supports two schemas: `kurrentdb://` for connecting to a single-node server, and `kurrentdb+discover://` for connecting to a multi-node cluster. The difference between the two schemas is that when using `kurrentdb://`, the client will connect directly to the node; with `kurrentdb+discover://` schema the client will use the gossip protocol to retrieve the cluster information and choose the right node to connect to. +Since version 22.10, ESDB supports gossip on single-node deployments, so `kurrentdb+discover://` schema can be used for connecting to any topology. + +The connection string has the following format: + +``` +kurrentdb+discover://admin:changeit@cluster.dns.name:2113 +``` + +There, `cluster.dns.name` is the name of a DNS `A` record that points to all the cluster nodes. Alternatively, you can list cluster nodes separated by comma instead of the cluster DNS name: + +``` +kurrentdb+discover://admin:changeit@node1.dns.name:2113,node2.dns.name:2113,node3.dns.name:2113 +``` + +There are a number of query parameters that can be used in the connection string to instruct the cluster how and where the connection should be established. All query parameters are optional. + +| Parameter | Accepted values | Default | Description | +|-----------------------|---------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------| +| `tls` | `true`, `false` | `true` | Use secure connection, set to `false` when connecting to a non-secure server or cluster. | +| `connectionName` | Any string | None | Connection name | +| `maxDiscoverAttempts` | Number | `10` | Number of attempts to discover the cluster. | +| `discoveryInterval` | Number | `100` | Cluster discovery polling interval in milliseconds. | +| `gossipTimeout` | Number | `5` | Gossip timeout in seconds, when the gossip call times out, it will be retried. | +| `nodePreference` | `leader`, `follower`, `random`, `readOnlyReplica` | `leader` | Preferred node role. When creating a client for write operations, always use `leader`. | +| `tlsVerifyCert` | `true`, `false` | `true` | In secure mode, set to `true` when using an untrusted connection to the node if you don't have the CA file available. Don't use in production. | +| `tlsCaFile` | String, file path | None | Path to the CA file when connecting to a secure cluster with a certificate that's not signed by a trusted CA. | +| `defaultDeadline` | Number | None | Default timeout for client operations, in milliseconds. Most clients allow overriding the deadline per operation. | +| `keepAliveInterval` | Number | `10` | Interval between keep-alive ping calls, in seconds. | +| `keepAliveTimeout` | Number | `10` | Keep-alive ping call timeout, in seconds. | +| `userCertFile` | String, file path | None | User certificate file for X.509 authentication. | +| `userKeyFile` | String, file path | None | Key file for the user certificate used for X.509 authentication. | + +When connecting to an insecure instance, specify `tls=false` parameter. For example, for a node running locally use `kurrentdb://localhost:2113?tls=false`. Note that usernames and passwords aren't provided there because insecure deployments don't support authentication and authorisation. + +### Creating a client + +First, create a client and get it connected to the database. + +@[code{createClient}](@grpc:quick-start/Program.cs) + +The client instance can be used as a singleton across the whole application. It doesn't need to open or close the connection. + +### Creating an event + +You can write anything to KurrentDB as events. The client needs a byte array as the event payload. Normally, you'd use a serialized object, and it's up to you to choose the serialization method. + +::: tip Server-side projections +User-defined server-side projections require events to be serialized in JSON format. + +We use JSON for serialization in the documentation examples. +::: + +The code snippet below creates an event object instance, serializes it, and adds it as a payload to the `EventData` structure, which the client can then write to the database. + +@[code{createEvent}](@grpc:quick-start/Program.cs) + +### Appending events + +Each event in the database has its own unique identifier (UUID). The database uses it to ensure idempotent writes, but it only works if you specify the stream revision when appending events to the stream. + +In the snippet below, we append the event to the stream `some-stream`. + +@[code{appendEvents}](@grpc:quick-start/Program.cs) + +Here we are appending events without checking if the stream exists or if the stream version matches the expected event version. See more advanced scenarios in [appending events documentation](./appending-events.md). + +### Reading events + +Finally, we can read events back from the `some-stream` stream. + +@[code{readStream}](@grpc:quick-start/Program.cs) + +When you read events from the stream, you get a collection of `ResolvedEvent` structures. The event payload is returned as a byte array and needs to be deserialized. See more advanced scenarios in [reading events documentation](./reading-events.md). + diff --git a/docs/clients/grpc/dotnet/observability.md b/docs/clients/grpc/dotnet/observability.md new file mode 100644 index 00000000..0bb225fe --- /dev/null +++ b/docs/clients/grpc/dotnet/observability.md @@ -0,0 +1,102 @@ +--- +order: 8 +head: + - - title + - {} + - Observability | .NET | Clients | Kurrent Docs +--- + +# Observability + +The KurrentDB gRPC clients are designed with observability in mind, offering +support for OpenTelemetry. This integration provides a set of distributed +traces, enabling developers to gain deeper insights into their system. + +::: warning +Currently, OpenTelemetry observability support is not available for all +clients. Moreover, instrumentation is only provided for append and +subscribe operations, which includes both 'Catchup' and 'Persistent' modes. +::: + +Click [here](https://github.com/kurrent-io/KurrentDB-Client-Dotnet/blob/master/samples/diagnostics/Program.cs) to view the full sample code for observability. + +## Required packages + +The KurrentDB C# client includes OpenTelemetry support by default. + +## Instrumentation + +To emit trace data, you must first install and use the dedicated package, as instructed in the +[Required Packages](./observability.md#required-packages) section, if provided. This package +includes the necessary instrumentation that needs to be registered with the client. + +@[code{register-instrumentation}](@grpc:diagnostics/Program.cs) + +## Traces + +Traces provide a clear picture of how operations are carried out in a +distributed system, making it easier to maintain and enhance the system over +time. Traces from the clients can be exported to any compatible collector that +supports the OpenTelemetry protocol (OTLP). + +In order for the client to emit traces, you need to need to enable +instrumentation as described in +[Instrumentation](./observability.md#instrumentation). + +For more guidance on setting up and utilizing tracing, refer to the +[OpenTelemetry](https://opentelemetry.io/) documentation. + +An example of a trace is shown below: + +```bash +Activity.TraceId: 8da04787239dbb85c1f9c6fba1b1f0d6 +Activity.SpanId: 4352ec4a66a20b95 +Activity.TraceFlags: Recorded +Activity.ActivitySourceName: kurrentdb +Activity.DisplayName: streams.append +Activity.Kind: Client +Activity.StartTime: 2024-05-29T06:50:41.2519016Z +Activity.Duration: 00:00:00.1500707 +Activity.Tags: + db.kurrentdb.stream: d7caa2a5-1e19-4108-9541-58d5fba02d42 + server.address: localhost + server.port: 2113 + db.system: kurrentdb + db.operation: streams.append +StatusCode: Ok +Resource associated with Activity: + service.name: sample + service.instance.id: 7316ef20-c354-4e64-97da-c1b99c2c28b0 + telemetry.sdk.name: opentelemetry + telemetry.sdk.language: dotnet + telemetry.sdk.version: 1.8.1 +``` + +In this case, the trace is for an append operation on a stream. The trace +includes the trace ID, span ID, trace flags, activity source name, display name, +kind, start time, duration, tags, status code, and resource associated with the +activity. + +::: note +The structure of the trace may vary depending on the client and the operation +being performed but will generally include the same information. +::: + +## Exporting traces + +You can set up various exporters to send traces to different destinations. +Additionally, you have the option to export these traces to a collector of your +choice, such as [Jaeger](https://www.jaegertracing.io/) or [Seq](https://datalust.co/seq). + +For instance, if you choose to use Jaeger as your backend of choice, you can +view your traces in the Jaeger UI, which provides a powerful interface for +querying and visualizing your trace data. + +The code snippets below demonstrate how to set up one or more exporters for each +client: + +@[code{setup-exporter}](@grpc:diagnostics/Program.cs) + +For more details on configuring exporters for specific programming languages, +refer to the [OpenTelemetry](https://opentelemetry.io/docs/languages/) +documentation. diff --git a/docs/clients/grpc/dotnet/persistent-subscriptions.md b/docs/clients/grpc/dotnet/persistent-subscriptions.md new file mode 100644 index 00000000..6432c79f --- /dev/null +++ b/docs/clients/grpc/dotnet/persistent-subscriptions.md @@ -0,0 +1,164 @@ +--- +order: 5 +head: + - - title + - {} + - Persistent Subscriptions | .NET | Clients | Kurrent Docs +--- + +# Persistent Subscriptions + +Persistent subscriptions are similar to catch-up subscriptions, but there are two key differences: +- The subscription checkpoint is maintained by the server. It means that when your client reconnects to the persistent subscription, it will automatically resume from the last known position. +- It's possible to connect more than one event consumer to the same persistent subscription. In that case, the server will load-balance the consumers, depending on the defined strategy, and distribute the events to them. + +Because of those, persistent subscriptions are defined as subscription groups that are defined and maintained by the server. Consumer then connect to a particular subscription group, and the server starts sending event to the consumer. + +You can read more about persistent subscriptions in the [server documentation](@server/features/persistent-subscriptions.md). + +## Creating a subscription group + +The first step of dealing with a persistent subscription is to create a subscription group. You will receive an error if you attempt to create a subscription group multiple times. You must have admin permissions to create a persistent subscription group. + +### Subscribing to one stream + +The following sample shows how to create a subscription group for a persistent subscription where you want to receive events from a specific stream. It could be a normal stream, or a stream of links (like `$ce` category stream). + +@[code{create-persistent-subscription-to-stream}](@grpc:persistent-subscriptions/Program.cs) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to create. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +### Subscribing to $all + +The ability to subscribe to `$all` was introduced in EventStoreDB **21.10**. Persistent subscriptions to `$all` also support [filtering](subscriptions.md#server-side-filtering). + +You can create a subscription group on $all much the same way you would create a subscription group on a stream: + +@[code{create-persistent-subscription-to-all}](@grpc:persistent-subscriptions/Program.cs) + +## Connecting a consumer + +Once you have created a subscription group, clients can connect to it. A subscription in your application should only have the connection in your code, you should assume that the subscription already exists. + +The most important parameter to pass when connecting is the buffer size. This represents how many outstanding messages the server should allow this client. If this number is too small, your subscription will spend much of its time idle as it waits for an acknowledgment to come back from the client. If it's too big, you waste resources and can start causing time out messages depending on the speed of your processing. + +### Connecting to one stream + +The code below shows how to connect to an existing subscription group for a specific stream: + +@[code{subscribe-to-persistent-subscription-to-stream}](@grpc:persistent-subscriptions/Program.cs) + +| Parameter | Description | +|:----------------------|:---------------------------------------------------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to subscribe to. | +| `eventAppeared` | The action to call when an event arrives over the subscription. | +| `subscriptionDropped` | The action to call if the subscription is dropped. | +| `credentials` | The user credentials to use for this operation. | +| `bufferSize` | The number of in-flight messages this client is allowed. **Default: 10** | +| `autoAck` | Whether to automatically acknowledge messages after eventAppeared returns. **Default: true** | + +::: warning +The `autoAck` parameter will be deprecated in the next client release. You'll need to explicitly [manage acknowledgements](#acknowledgements). +::: + +### Connecting to $all + +The code below shows how to connect to an existing subscription group for `$all`: + +@[code{subscribe-to-persistent-subscription-to-all}](@grpc:persistent-subscriptions/Program.cs) + +The `SubscribeToAllAsync` method is identical to the `SubscribeToStreamAsync` method, except that you don't need to specify a stream name. + +## Acknowledgements + +Clients must acknowledge (or not acknowledge) messages in the competing consumer model. + +If processing is successful, you must send an Ack (acknowledge) to the server to let it know that the message has been handled. If processing fails for some reason, then you can Nack (not acknowledge) the message and tell the server how to handle the failure. + +@[code{subscribe-to-persistent-subscription-with-manual-acks}](@grpc:persistent-subscriptions/Program.cs) + +The _Nack event action_ describes what the server should do with the message: + +| Action | Description | +|:----------|:---------------------------------------------------------------------| +| `Unknown` | The client does not know what action to take. Let the server decide. | +| `Park` | Park the message and do not resend. Put it on poison queue. | +| `Retry` | Explicitly retry the message. | +| `Skip` | Skip this message do not resend and do not put in poison queue. | + +## Consumer strategies + +When creating a persistent subscription, you can choose between a number of consumer strategies. + +### RoundRobin (default) + +Distributes events to all clients evenly. If the client `bufferSize` is reached, the client won't receive more events until it acknowledges or not acknowledges events in its buffer. + +This strategy provides equal load balancing between all consumers in the group. + +### DispatchToSingle + +Distributes events to a single client until the `bufferSize` is reached. After that, the next client is selected in a round-robin style, and the process repeats. + +This option can be seen as a fall-back scenario for high availability, when a single consumer processes all the events until it reaches its maximum capacity. When that happens, another consumer takes the load to free up the main consumer resources. + +### Pinned + +For use with an indexing projection such as the system `$by_category` projection. + +KurrentDB inspects the event for its source stream id, hashing the id to one of 1024 buckets assigned to individual clients. When a client disconnects, its buckets are assigned to other clients. When a client connects, it is assigned some existing buckets. This naively attempts to maintain a balanced workload. + +The main aim of this strategy is to decrease the likelihood of concurrency and ordering issues while maintaining load balancing. This is **not a guarantee**, and you should handle the usual ordering and concurrency issues. + +## Updating a subscription group + +You can edit the settings of an existing subscription group while it is running, you don't need to delete and recreate it to change settings. When you update the subscription group, it resets itself internally, dropping the connections and having them reconnect. You must have admin permissions to update a persistent subscription group. + +@[code{update-persistent-subscription}](@grpc:persistent-subscriptions/Program.cs) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to update. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +## Persistent subscription settings + +Both the `Create` and `Update` methods take some settings for configuring the persistent subscription. + +The following table shows the configuration options you can set on a persistent subscription. + +| Option | Description | Default | +|:------------------------|:----------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------| +| `ResolveLinkTos` | Whether the subscription should resolve link events to their linked events. | `false` | +| `StartFrom` | The exclusive position in the stream or transaction file the subscription should start from. | `null` (start from the end of the stream) | +| `ExtraStatistics` | Whether to track latency statistics on this subscription. | `false` | +| `MessageTimeout` | The amount of time after which to consider a message as timed out and retried. | `30` (seconds) | +| `MaxRetryCount` | The maximum number of retries (due to timeout) before a message is considered to be parked. | `10` | +| `LiveBufferSize` | The size of the buffer (in-memory) listening to live messages as they happen before paging occurs. | `500` | +| `ReadBatchSize` | The number of events read at a time when paging through history. | `20` | +| `HistoryBufferSize` | The number of events to cache when paging through history. | `500` | +| `CheckPointAfter` | The amount of time to try to checkpoint after. | `2` seconds | +| `MinCheckPointCount` | The minimum number of messages to process before a checkpoint may be written. | `10` | +| `MaxCheckPointCount` | The maximum number of messages not checkpointed before forcing a checkpoint. | `1000` | +| `MaxSubscriberCount` | The maximum number of subscribers allowed. | `0` (unbounded) | +| `NamedConsumerStrategy` | The strategy to use for distributing events to client consumers. See the [consumer strategies](#consumer-strategies) in this doc. | `RoundRobin` | + +## Deleting a subscription group + +Remove a subscription group with the delete operation. Like the creation of groups, you rarely do this in your runtime code and is undertaken by an administrator running a script. + +@[code{delete-persistent-subscription}](@grpc:persistent-subscriptions/Program.cs) + +| Parameter | Description | +|:--------------|:-----------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to delete. | +| `credentials` | The user credentials to use for this operation | diff --git a/docs/clients/grpc/dotnet/projections.md b/docs/clients/grpc/dotnet/projections.md new file mode 100644 index 00000000..406261e2 --- /dev/null +++ b/docs/clients/grpc/dotnet/projections.md @@ -0,0 +1,173 @@ +--- +order: 6 +title: Projections +head: + - - title + - {} + - Projections | .NET | Clients | Kurrent Docs +--- + +# Projection management + +The various gRPC client APIs include dedicated clients that allow you to manage projections. + +For a detailed explanation of projections, see the [server documentation](@server/features/projections/README.md). + +You can find the full sample code from this documentation page in the respective [clients repositories](https://github.com/kurrent-io/?q=client). + +## Creating a client + +Projection management operations are exposed through a dedicated client. + +@[code{createClient}](@grpc:projection-management/Program.cs) + +## Create a projection + +Creates a projection that runs until the last event in the store, and then continues processing new events as they are appended to the store. The query parameter contains the JavaScript you want created as a projection. +Projections have explicit names, and you can enable or disable them via this name. + +@[code{CreateContinuous}](@grpc:projection-management/Program.cs) + +Trying to create projections with the same name will result in an error: + +@[code{CreateContinuous_Conflict}](@grpc:projection-management/Program.cs) + +## Restart the subsystem + +It is possible to restart the entire projection subsystem using the projections management client API. The user must be in the `$ops` or `$admin` group to perform this operation. + +@[code{RestartSubSystem}](@grpc:projection-management/Program.cs) + +## Enable a projection + +Enables an existing projection by name. +Once enabled, the projection will start to process events even after restarting the server or the projection subsystem. +You must have access to a projection to enable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Enable}](@grpc:projection-management/Program.cs) + +You can only enable an existing projection. When you try to enable a non-existing projection, you'll get an error: + +@[code{EnableNotFound}](@grpc:projection-management/Program.cs) + +## Disable a projection + +Disables a projection, this will save the projection checkpoint. +Once disabled, the projection will not process events even after restarting the server or the projection subsystem. +You must have access to a projection to disable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Disable}](@grpc:projection-management/Program.cs) + +You can only disable an existing projection. When you try to disable a non-existing projection, you'll get an error: + +@[code{DisableNotFound}](@grpc:projection-management/Program.cs) + +## Delete a projection + +This feature is not available for this client. + +## Abort a projection + +Aborts a projection, this will not save the projection's checkpoint. + +@[code{Abort}](@grpc:projection-management/Program.cs) + +You can only abort an existing projection. When you try to abort a non-existing projection, you'll get an error: + +@[code{Abort_NotFound}](@grpc:projection-management/Program.cs) + +## Reset a projection + +Resets a projection, which causes deleting the projection checkpoint. This will force the projection to start afresh and re-emit events. Streams that are written to from the projection will also be soft-deleted. + +@[code{Reset}](@grpc:projection-management/Program.cs) + +Resetting a projection that does not exist will result in an error. + +@[code{Reset_NotFound}](@grpc:projection-management/Program.cs) + +## Update a projection + +Updates a projection with a given name. The query parameter contains the new JavaScript. Updating system projections using this operation is not supported at the moment. + +@[code{Update}](@grpc:projection-management/Program.cs) + +You can only update an existing projection. When you try to update a non-existing projection, you'll get an error: + +@[code{Update_NotFound}](@grpc:projection-management/Program.cs) + +## List all projections + +Returns a list of all projections, user defined & system projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListAll}](@grpc:projection-management/Program.cs) + +## List continuous projections + +Returns a list of all continuous projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListContinuous}](@grpc:projection-management/Program.cs) + +## Get status + +Gets the status of a named projection. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{GetStatus}](@grpc:projection-management/Program.cs) + +## Get state + +Retrieves the state of a projection. + +@[code{GetState}](@grpc:projection-management/Program.cs) + +## Get result + +Retrieves the result of the named projection and partition. + +@[code{GetResult}](@grpc:projection-management/Program.cs) + +## Projection Details + +[List all](#list-all-projections), [list continuous](#list-continuous-projections) and [get status](#get-status) all return the details and statistics of projections + +| Field | Description | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Name`, `EffectiveName` | The name of the projection | +| `Status` | A human readable string of the current statuses of the projection (see below) | +| `StateReason` | A human readable string explaining the reason of the current projection state | +| `CheckpointStatus` | A human readable string explaining the current operation performed on the checkpoint : `requested`, `writing` | +| `Mode` | `Continuous`, `OneTime` , `Transient` | +| `CoreProcessingTime` | The total time, in ms, the projection took to handle events since the last restart | +| `Progress` | The progress, in %, indicates how far this projection has processed event, in case of a restart this could be -1% or some number. It will be updated as soon as a new event is appended and processed | +| `WritesInProgress` | The number of write requests to emitted streams currently in progress, these writes can be batches of events | +| `ReadsInProgress` | The number of read requests currently in progress | +| `PartitionsCached` | The number of cached projection partitions | +| `Position` | The Position of the last processed event | +| `LastCheckpoint` | The Position of the last checkpoint of this projection | +| `EventsProcessedAfterRestart` | The number of events processed since the last restart of this projection | +| `BufferedEvents` | The number of events in the projection read buffer | +| `WritePendingEventsBeforeCheckpoint` | The number of events waiting to be appended to emitted streams before the pending checkpoint can be written | +| `WritePendingEventsAfterCheckpoint` | The number of events to be appended to emitted streams since the last checkpoint | +| `Version` | This is used internally, the version is increased when the projection is edited or reset | +| `Epoch` | This is used internally, the epoch is increased when the projection is reset | + +The `Status` string is a combination of the following values. +The first 3 are the most common one, as the other one are transient values while the projection is initialised or stopped + +| Value | Description | +|--------------------|-------------------------------------------------------------------------------------------------------------------------| +| Running | The projection is running and processing events | +| Stopped | The projection is stopped and is no longer processing new events | +| Faulted | An error occurred in the projection, `StateReason` will give the fault details, the projection is not processing events | +| Initial | This is the initial state, before the projection is fully initialised | +| Suspended | The projection is suspended and will not process events, this happens while stopping the projection | +| LoadStateRequested | The state of the projection is being retrieved, this happens while the projection is starting | +| StateLoaded | The state of the projection is loaded, this happens while the projection is starting | +| Subscribed | The projection has successfully subscribed to its readers, this happens while the projection is starting | +| FaultedStopping | This happens before the projection is stopped due to an error in the projection | +| Stopping | The projection is being stopped | +| CompletingPhase | This happens while the projection is stopping | +| PhaseCompleted | This happens while the projection is stopping | diff --git a/docs/clients/grpc/dotnet/reading-events.md b/docs/clients/grpc/dotnet/reading-events.md new file mode 100644 index 00000000..2facf685 --- /dev/null +++ b/docs/clients/grpc/dotnet/reading-events.md @@ -0,0 +1,135 @@ +--- +order: 3 +head: + - - title + - {} + - Reading Events | .NET | Clients | Kurrent Docs +--- + +# Reading Events + +There are two options for reading events from KurrentDB. You can either: + 1. Read from an individual stream, or + 2. Read from the `$all` stream, which will return all events in the store. + +Each event in KurrentDB belongs to an individual stream. When reading events, pick the name of the stream from which you want to read the events and choose whether to read the stream forwards or backwards. + +All events have a `StreamPosition` and a `Position`. `StreamPosition` is a *big int* (unsigned 64-bit integer) and represents the place of the event in the stream. `Position` is the event's logical position, and is represented by `CommitPosition` and a `PreparePosition`. Note that when reading events you will supply a different "position" depending on whether you are reading from an individual stream or the `$all` stream. + +:::tip +Check [connecting to KurrentDB instructions](getting-started.md#required-packages) to learn how to configure and use the client SDK. +::: + +## Reading from a stream + +You can read all the events or a sample of the events from individual streams, starting from any position in the stream, and can read either forward or backward. It is only possible to read events from a single stream at a time. You can read events from the global event log, which spans across streams. Learn more about this process in the [Read from `$all`](#reading-from-the-all-stream) section below. + +### Reading forwards + +The simplest way to read a stream forwards is to supply a stream name, read direction, and revision from which to start. The revision can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-stream}](@grpc:reading-events/Program.cs) + +This will return an enumerable that can be iterated on: + +@[code{iterate-stream}](@grpc:reading-events/Program.cs) + +There are a number of additional arguments you can provide when reading a stream, listed below. + +#### maxCount + +Passing in the max count will limit the number of events returned. + +#### resolveLinkTos + +When using projections to create new events, you can set whether the generated events are pointers to existing events. Setting this value to `true` tells KurrentDB to return the event as well as the event linking to it. + +#### configureOperationOptions + +You can use the `configureOperationOptions` argument to provide a function that will customise settings for each operation. + +#### userCredentials + +The `userCredentials` argument is optional. It is used to override the default credentials specified when creating the client instance. + +@[code{overriding-user-credentials}](@grpc:reading-events/Program.cs) + +### Reading from a revision + +Instead of providing the `StreamPosition` you can also provide a specific stream revision as a *big int* (unsigned 64-bit integer). + +@[code{read-from-stream-position}](@grpc:reading-events/Program.cs) + +### Reading backwards + +In addition to reading a stream forwards, streams can be read backwards. To read all the events backwards, set the *stream position* to the end: + +@[code{reading-backwards}](@grpc:reading-events/Program.cs) + +:::tip +Read one event backwards to find the last position in the stream. +::: + +### Checking if the stream exists + +Reading a stream returns a `ReadStreamResult`, which contains a property `ReadState`. This property can have the value `StreamNotFound` or `Ok`. + +It is important to check the value of this field before attempting to iterate an empty stream, as it will throw an exception. + +For example: + +@[code{checking-for-stream-presence}](@grpc:reading-events/Program.cs) + +## Reading from the $all stream + +Reading from the `$all` stream is similar to reading from an individual stream, but please note there are differences. One significant difference is the need to provide admin user account credentials to read from the `$all` stream. Additionally, you need to provide a transaction log position instead of a stream revision when reading from the `$all` stream. + +### Reading forwards + +The simplest way to read the `$all` stream forwards is to supply a read direction and the transaction log position from which you want to start. The transaction log postion can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-all-stream}](@grpc:reading-events/Program.cs) + +You can iterate asynchronously through the result: + +@[code{read-from-all-stream-iterate}](@grpc:reading-events/Program.cs) + +There are a number of additional arguments you can provide when reading the `$all` stream. + +#### maxCount + +Passing in the max count allows you to limit the number of events that returned. + +#### resolveLinkTos + +When using projections to create new events you can set whether the generated events are pointers to existing events. Setting this value to true will tell KurrentDB to return the event as well as the event linking to it. + +@[code{read-from-all-stream-resolving-link-Tos}](@grpc:reading-events/Program.cs) + +#### configureOperationOptions + +This argument is generic setting class for all operations that can be set on all operations executed against KurrentDB. + +#### userCredentials +The credentials used to read the data can be used by the subscription as follows. This will override the default credentials set on the connection. + +@[code{read-all-overriding-user-credentials}](@grpc:reading-events/Program.cs) + +### Reading backwards + +In addition to reading the `$all` stream forwards, it can be read backwards. To read all the events backwards, set the *position* to the end: + +@[code{read-from-all-stream-backwards}](@grpc:reading-events/Program.cs) + +:::tip +Read one event backwards to find the last position in the `$all` stream. +::: + +### Handling system events + +KurrentDB will also return system events when reading from the `$all` stream. In most cases you can ignore these events. + +All system events begin with `$` or `$$` and can be easily ignored by checking the `EventType` property. + +@[code{ignore-system-events}](@grpc:reading-events/Program.cs) + diff --git a/docs/clients/grpc/dotnet/release-notes.md b/docs/clients/grpc/dotnet/release-notes.md new file mode 100644 index 00000000..41494358 --- /dev/null +++ b/docs/clients/grpc/dotnet/release-notes.md @@ -0,0 +1,11 @@ +--- +order: 10 +head: + - - title + - {} + - Release Notes | .NET | Clients | Kurrent Docs +--- + +# Release Notes + +To stay up to date with the latest changes and improvements, including release history, changelogs, upgrade instructions, and migration guides, see the [KurrentDB .NET client release notes](https://github.com/kurrent-io/KurrentDB-Client-Dotnet/releases). \ No newline at end of file diff --git a/docs/clients/grpc/dotnet/subscriptions.md b/docs/clients/grpc/dotnet/subscriptions.md new file mode 100644 index 00000000..d3d67309 --- /dev/null +++ b/docs/clients/grpc/dotnet/subscriptions.md @@ -0,0 +1,234 @@ +--- +order: 4 +head: + - - title + - {} + - Catch-up Subscriptions | .NET | Clients | Kurrent Docs +--- + +# Catch-up Subscriptions + +Subscriptions allow you to subscribe to a stream and receive notifications about new events added to the stream. + +You provide an event handler and an optional starting point to the subscription. The handler is called for each event from the starting point onward. + +If events already exist, the handler will be called for each event one by one until it reaches the end of the stream. The server will then notify the handler whenever a new event appears. + +:::tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Subscribing from the start + +If you need to process all the events in the store, including historical events, you'll need to subscribe from the beginning. You can either subscribe to receive events from a single stream or subscribe to `$all` if you need to process all events in the database. + +### Subscribing to a stream + +The simplest stream subscription looks like the following : + +@[code{subscribe-to-stream}](@grpc:subscribing-to-streams/Program.cs) + +The provided handler will be called for every event in the stream. + +When you subscribe to a stream with link events, for example the `$ce` category stream, you need to set `resolveLinkTos` to `true`. Read more about it [below](#resolving-link-to-s). + +### Subscribing to `$all` + +Subscribing to `$all` is similar to subscribing to a single stream. The handler will be called for every event appended after the starting position. + +@[code{subscribe-to-all}](@grpc:subscribing-to-streams/Program.cs) + +## Subscribing from a specific position + +The previous examples subscribed to the stream from the beginning. That subscription invoked the handler for every event in the stream before waiting for new events. + +Both stream and $all subscriptions accept a starting position if you want to read from a specific point onward. If events already exist at the position you subscribe to, they will be read on the server side and sent to the subscription. + +Once caught up, the server will push any new events received on the streams to the client. There is no difference between catching up and live on the client side. + +::: warning +The positions provided to the subscriptions are exclusive. You will only receive the next event after the subscribed position. +::: + +### Subscribing to a stream + +To subscribe to a stream from a specific position, you must provide a *stream position*. This can be `Start`, `End` or a *big int* (unsigned 64 bit integer) position. + +The following subscribes to the stream `some-stream` at position `20`, this means that events `21` and onward will be handled: + +@[code{subscribe-to-stream-from-position}](@grpc:subscribing-to-streams/Program.cs) + +### Subscribing to $all + +Subscribing to the `$all` stream is similar to subscribing to a regular stream. The difference is how to specify the starting position. For the `$all` stream, provide a `Position` structure that consists of two big integers: the prepare and commit positions. Use `Start`, `End`, or create a `Position` from specific commit and prepare values. + +The corresponding `$all` subscription will subscribe from the event after the one at commit position `1056` and prepare position `1056`. + +Please note that this position will need to be a legitimate position in `$all`. + +@[code{subscribe-to-all-from-position}](@grpc:subscribing-to-streams/Program.cs) + +## Subscribing to a stream for live updates + +You can subscribe to a stream to get live updates by subscribing to the end of the stream: + +@[code{subscribe-to-stream-live}](@grpc:subscribing-to-streams/Program.cs) + +And the same works with `$all` : + +@[code{subscribe-to-all-live}](@grpc:subscribing-to-streams/Program.cs) + +This will not read through the history of the stream but will notify the handler when a new event appears in the respective stream. + +Keep in mind that when you subscribe to a stream from a specific position, as described [above](#subscribing-from-a-specific-position), you will also get live updates after your subscription catches up (processes all the historical events). + +## Resolving link-to events + +Link-to events point to events in other streams in KurrentDB. These are generally created by projections such as the `$by_event_type` projection which links events of the same event type into the same stream. This makes it easier to look up all events of a specific type. + +::: tip +[Filtered subscriptions](subscriptions.md#server-side-filtering) make it easier and faster to subscribe to all events of a specific type or matching a prefix. +::: + +When reading a stream you can specify whether to resolve link-to's. By default, link-to events are not resolved. You can change this behaviour by setting the `resolveLinkTos` parameter to `true`: + +@[code{subscribe-to-stream-resolving-linktos}](@grpc:subscribing-to-streams/Program.cs) + +## Dropped subscriptions + +When a subscription stops or experiences an error, it will be dropped. The subscription provides a `subscriptionDropped` callback, which will get called when the subscription breaks. + +The `subscriptionDropped` callback allows you to inspect the reason why the subscription dropped, as well as any exceptions that occurred. + +The possible reasons for a subscription to drop are: + +| Reason | Why it might happen | +|:------------------|:---------------------------------------------------------------------------------------------------------------------| +| `Disposed` | The client canceled or disposed of the subscription. | +| `SubscriberError` | An error occurred while handling an event in the subscription handler. | +| `ServerError` | An error occurred on the server, and the server closed the subscription. Check the server logs for more information. | + +Bear in mind that a subscription can also drop because it is slow. The server tried to push all the live events to the subscription when it is in the live processing mode. If the subscription gets the reading buffer overflow and won't be able to acknowledge the buffer, it will break. + +### Handling subscription drops + +An application, which hosts the subscription, can go offline for some time for different reasons. It could be a crash, infrastructure failure, or a new version deployment. As you rarely would want to reprocess all the events again, you'd need to store the current position of the subscription somewhere, and then use it to restore the subscription from the point where it dropped off: + +@[code{subscribe-to-stream-subscription-dropped}](@grpc:subscribing-to-streams/Program.cs) + +When subscribed to `$all` you want to keep the event's position in the `$all` stream. As mentioned previously, the `$all` stream position consists of two big integers (prepare and commit positions), not one: + +@[code{subscribe-to-all-subscription-dropped}](@grpc:subscribing-to-streams/Program.cs) + +## User credentials + +The user creating a subscription must have read access to the stream it's subscribing to, and only admin users may subscribe to `$all` or create filtered subscriptions. + +The code below shows how you can provide user credentials for a subscription. When you specify subscription credentials explicitly, it will override the default credentials set for the client. If you don't specify any credentials, the client will use the credentials specified for the client, if you specified those. + +@[code{overriding-user-credentials}](@grpc:subscribing-to-streams/Program.cs) + +## Server-side filtering + +KurrentDB allows you to filter the events whilst subscribing to the `$all` stream to only receive the events you care about. + +You can filter by event type or stream name using a regular expression or a prefix. Server-side filtering is currently only available on the `$all` stream. + +::: tip +Server-side filtering was introduced as a simpler alternative to projections. You should consider filtering before creating a projection to include the events you care about. +::: + +A simple stream prefix filter looks like this: + +@[code{stream-prefix-filtered-subscription}](@grpc:subscribing-to-streams/Program.cs) + +The filtering API is described more in-depth in the [filtering section](subscriptions.md#server-side-filtering). + +### Filtering out system events + +There are events in KurrentDB called system events. These are prefixed with a `$` and under most circumstances you won't care about these. They can be filtered out by passing in a `SubscriptionFilterOptions` when subscribing to the `$all` stream. + +@[code{exclude-system}](@grpc:server-side-filtering/Program.cs) + +::: tip +`$stats` events are no longer stored in KurrentDB by default so there won't be as many `$` events as before. +::: + +### Filtering by event type + +If you only want to subscribe to events of a given type, there are two options. You can either use a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `EventTypeFilter.Prefix`. + +@[code{event-type-prefix}](@grpc:server-side-filtering/Program.cs) + +This will only subscribe to events with a type that begin with `customer-`. + +#### Filtering by regular expression + +It might be advantageous to provide a regular expression when you want to subscribe to multiple event types. + +@[code{event-type-regex}](@grpc:server-side-filtering/Program.cs) + +This will subscribe to any event that begins with `user` or `company`. + +### Filtering by stream name + +To subscribe to a stream by name, choose either a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `StreamFilter.Prefix`. + +@[code{stream-prefix}](@grpc:server-side-filtering/Program.cs) + +This will only subscribe to all streams with a name that begins with `user-`. + +#### Filtering by regular expression + +To subscribe to multiple streams, use a regular expression. + +@[code{stream-regex}](@grpc:server-side-filtering/Program.cs) + +This will subscribe to any stream with a name that begins with `account` or `savings`. + +## Checkpointing + +When a catch-up subscription is used to process an `$all` stream containing many events, the last thing you want is for your application to crash midway, forcing you to restart from the beginning. + +### What is a checkpoint? + +A checkpoint is the position of an event in the `$all` stream to which your application has processed. By saving this position to a persistent store (e.g., a database), it allows your catch-up subscription to: +- Recover from crashes by reading the checkpoint and resuming from that position +- Avoid reprocessing all events from the start + +To create a checkpoint, store the event's commit or prepare position. + +::: warning +If your database contains events created by the legacy TCP client using the [transaction feature](https://docs.kurrent.io/clients/tcp/dotnet/21.2/appending.html#transactions), you should store both the commit and prepare positions together as your checkpoint. +::: + +### Updating checkpoints at regular intervals +The client SDK provides a way to notify your application after processing a configurable number of events. This allows you to periodically save a checkpoint at regular intervals. + +@[code{checkpoint}](@grpc:server-side-filtering/Program.cs) + +By default, the checkpoint notification is sent after every 32 non-system events processed from $all. + +### Configuring the checkpoint interval +You can adjust the checkpoint interval to change how often the client is notified. + +@[code{checkpoint-with-interval}](@grpc:server-side-filtering/Program.cs) + +By configuring this parameter, you can balance between reducing checkpoint overhead and ensuring quick recovery in case of a failure. + +::: info +The checkpoint interval parameter configures the database to notify the client after `n` * 32 number of events where `n` is defined by the parameter. + +For example: +- If `n` = 1, a checkpoint notification is sent every 32 events. +- If `n` = 2, the notification is sent every 64 events. +- If `n` = 3, it is sent every 96 events, and so on. +::: diff --git a/docs/clients/grpc/getting-started.md b/docs/clients/grpc/getting-started.md index d9add623..52efced8 100644 --- a/docs/clients/grpc/getting-started.md +++ b/docs/clients/grpc/getting-started.md @@ -1,176 +1,21 @@ --- -order: 1 +sitemap: + priority: 0 + changefreq: monthly --- -# Getting started +# Getting Started -Get started by connecting your application to KurrentDB. +To redirect you to the right page, please select a client: -## Connecting to KurrentDB +.NET [.NET](/clients/grpc/dotnet/getting-started.md) -For your application to start communicating with KurrentDB, you need to instantiate the client and configure it accordingly. Below are instructions for supported SDKs. +Python [Python](/clients/grpc/python/getting-started.md) -::: tip Insecure clusters -All our GRPC clients are secure by default and must be configured to connect to an insecure server via [a connection string](#connection-string) or the client's configuration. -::: +Node.js [Node.js](/clients/grpc/nodejs/getting-started.md) -### Required packages +Java [Java](/clients/grpc/java/getting-started.md) -Install the client SDK package to your project. - -#### Python - -Install the `kurrentdbclient` package from PyPI or use Poetry: - -::: tabs -@tab pip -```bash -pip install kurrentdbclient -``` -@tab Poetry -```bash -poetry add kurrentdbclient -``` -::: - -#### NodeJS - -Install the `@kurrent/kurrentdb-client` package using NPM, Yarn or PNPM: - -::: tabs -@tab npm -```bash -npm install --save @kurrent/kurrentdb-client -``` -@tab yarn -```bash -yarn add @kurrent/kurrentdb-client -``` -@tab pnpm -```bash -pnpm add @kurrent/kurrentdb-client -``` -::: - -TypeScript Declarations are included in the package. - -#### Java - -Add the `kurrentdb-client` dependency to your Maven or Gradle project. - -::: tabs -@tab Maven -```xml - - io.kurrent - kurrentdb-client - 1.0.0 - -``` - -@tab Gradle -```groovy -implementation 'io.kurrent:kurrentdb-client:1.0.0' -``` - -For the most recent version of the KurrentDB client package, see [Maven Central](https://mvnrepository.com/artifact/io.kurrent/kurrentdb-client). -::: - -#### .NET - -Add the `KurrentDB.Client` package to your project: - -```bash -dotnet add package KurrentDB.Client -``` - -#### Go - -Install the `kurrentdb` package using Go modules: - -```bash -go get github.com/kurrent-io/KurrentDB-Client-Go/kurrentdb -``` - -#### Rust - -No additional configuration is needed having Rust installed. Go check [https://rustup.rs](https://rustup.rs). - -### Connection string - -Each SDK has its own way of configuring the client, but the connection string can always be used. -The KurrentDB connection string supports two schemas: `kurrentdb://` for connecting to a single-node server, and `kurrentdb+discover://` for connecting to a multi-node cluster. The difference between the two schemas is that when using `kurrentdb://`, the client will connect directly to the node; with `kurrentdb+discover://` schema the client will use the gossip protocol to retrieve the cluster information and choose the right node to connect to. -Since version 22.10, ESDB supports gossip on single-node deployments, so `kurrentdb+discover://` schema can be used for connecting to any topology. - -The connection string has the following format: - -``` -kurrentdb+discover://admin:changeit@cluster.dns.name:2113 -``` - -There, `cluster.dns.name` is the name of a DNS `A` record that points to all the cluster nodes. Alternatively, you can list cluster nodes separated by comma instead of the cluster DNS name: - -``` -kurrentdb+discover://admin:changeit@node1.dns.name:2113,node2.dns.name:2113,node3.dns.name:2113 -``` - -There are a number of query parameters that can be used in the connection string to instruct the cluster how and where the connection should be established. All query parameters are optional. - -| Parameter | Accepted values | Default | Description | -|-----------------------|---------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------| -| `tls` | `true`, `false` | `true` | Use secure connection, set to `false` when connecting to a non-secure server or cluster. | -| `connectionName` | Any string | None | Connection name | -| `maxDiscoverAttempts` | Number | `10` | Number of attempts to discover the cluster. | -| `discoveryInterval` | Number | `100` | Cluster discovery polling interval in milliseconds. | -| `gossipTimeout` | Number | `5` | Gossip timeout in seconds, when the gossip call times out, it will be retried. | -| `nodePreference` | `leader`, `follower`, `random`, `readOnlyReplica` | `leader` | Preferred node role. When creating a client for write operations, always use `leader`. | -| `tlsVerifyCert` | `true`, `false` | `true` | In secure mode, set to `true` when using an untrusted connection to the node if you don't have the CA file available. Don't use in production. | -| `tlsCaFile` | String, file path | None | Path to the CA file when connecting to a secure cluster with a certificate that's not signed by a trusted CA. | -| `defaultDeadline` | Number | None | Default timeout for client operations, in milliseconds. Most clients allow overriding the deadline per operation. | -| `keepAliveInterval` | Number | `10` | Interval between keep-alive ping calls, in seconds. | -| `keepAliveTimeout` | Number | `10` | Keep-alive ping call timeout, in seconds. | -| `userCertFile` | String, file path | None | User certificate file for X.509 authentication. | -| `userKeyFile` | String, file path | None | Key file for the user certificate used for X.509 authentication. | - -When connecting to an insecure instance, specify `tls=false` parameter. For example, for a node running locally use `kurrentdb://localhost:2113?tls=false`. Note that usernames and passwords aren't provided there because insecure deployments don't support authentication and authorisation. - -### Creating a client - -First, create a client and get it connected to the database. - -@[code{createClient}](@grpc:quickstart.py;get-started.js;get-started.ts;quick_start/QuickStart.java;quick-start/Program.cs;quickstart.go;quickstart.rs) - -The client instance can be used as a singleton across the whole application. It doesn't need to open or close the connection. - -### Creating an event - -You can write anything to KurrentDB as events. The client needs a byte array as the event payload. Normally, you'd use a serialized object, and it's up to you to choose the serialization method. - -::: tip Server-side projections -User-defined server-side projections require events to be serialized in JSON format. - -We use JSON for serialization in the documentation examples. -::: - -The code snippet below creates an event object instance, serializes it, and adds it as a payload to the `EventData` structure, which the client can then write to the database. - -@[code{createEvent}](@grpc:quickstart.py;get-started.js;get-started.ts;quick_start/QuickStart.java;quick-start/Program.cs;quickstart.go;quickstart.rs) - -### Appending events - -Each event in the database has its own unique identifier (UUID). The database uses it to ensure idempotent writes, but it only works if you specify the stream revision when appending events to the stream. - -In the snippet below, we append the event to the stream `some-stream`. - -@[code{appendEvents}](@grpc:quickstart.py;get-started.js;get-started.ts;quick_start/QuickStart.java;quick-start/Program.cs;quickstart.go;quickstart.rs) - -Here we are appending events without checking if the stream exists or if the stream version matches the expected event version. See more advanced scenarios in [appending events documentation](./appending-events.md). - -### Reading events - -Finally, we can read events back from the `some-stream` stream. - -@[code{readStream}](@grpc:quickstart.py;get-started.js;get-started.ts;quick_start/QuickStart.java;quick-start/Program.cs;quickstart.go;quickstart.rs) - -When you read events from the stream, you get a collection of `ResolvedEvent` structures. The event payload is returned as a byte array and needs to be deserialized. See more advanced scenarios in [reading events documentation](./reading-events.md). +Go [Go](/clients/grpc/go/getting-started.md) +Rust [Rust](/clients/grpc/rust/getting-started.md) \ No newline at end of file diff --git a/docs/clients/grpc/go/README.md b/docs/clients/grpc/go/README.md new file mode 100644 index 00000000..6e8f4fde --- /dev/null +++ b/docs/clients/grpc/go/README.md @@ -0,0 +1,9 @@ +--- +index: false +--- + +# Go + +Learn how to use the KurrentDB Go client library to interact with the database. + + \ No newline at end of file diff --git a/docs/clients/grpc/go/appending-events.md b/docs/clients/grpc/go/appending-events.md new file mode 100644 index 00000000..40094dba --- /dev/null +++ b/docs/clients/grpc/go/appending-events.md @@ -0,0 +1,87 @@ +--- +order: 2 +head: + - - title + - {} + - Appending Events | Go | Clients | Kurrent Docs +--- + +# Appending Events + +When you start working with KurrentDB, it is empty. The first meaningful operation is to add one or more events to the database using one of the available client SDKs. + +::: tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Append your first event + +The simplest way to append an event to KurrentDB is to create an `EventData` object and call `AppendToStream` method. + +@[code{append-to-stream}](@grpc:appendingEvents.go) + +`AppendToStream` takes a collection of `EventData`, which allows you to save more than one event in a single batch. + +Outside the example above, other options exist for dealing with different scenarios. + +::: tip +If you are new to Event Sourcing, please study the [Handling concurrency](#handling-concurrency) section below. +::: + +## Working with EventData + +Events appended to KurrentDB must be wrapped in an `EventData` object. This allows you to specify the event's content, the type of event, and whether it's in JSON format. In its simplest form, you need three arguments: **eventId**, **type**, and **data**. + +### eventId + +This takes the format of a `Uuid` and is used to uniquely identify the event you are trying to append. If two events with the same `Uuid` are appended to the same stream in quick succession, KurrentDB will only append one of the events to the stream. + +For example, the following code will only append a single event: + +@[code{append-duplicate-event}](@grpc:appendingEvents.go) + +![Duplicate Event](../images/duplicate-event.png) + +### type + +Each event should be supplied with an event type. This unique string is used to identify the type of event you are saving. + +It is common to see the explicit event code type name used as the type as it makes serialising and de-serialising of the event easy. However, we recommend against this as it couples the storage to the type and will make it more difficult if you need to version the event at a later date. + +### data + +Representation of your event data. It is recommended that you store your events as JSON objects. This allows you to take advantage of all of KurrentDB's functionality, such as projections. That said, you can save events using whatever format suits your workflow. Eventually, the data will be stored as encoded bytes. + +### metadata + +Storing additional information alongside your event that is part of the event itself is standard practice. This can be correlation IDs, timestamps, access information, etc. KurrentDB allows you to store a separate byte array containing this information to keep it separate. + +### isJson + +Simple boolean field to tell KurrentDB if the event is stored as json, true by default. + +## Handling concurrency + +When appending events to a stream, you can supply a *stream state* or *stream revision*. Your client uses this to inform KurrentDB of the state or version you expect the stream to be in when appending an event. If the stream isn't in that state, an exception will be thrown. + +For example, if you try to append the same record twice, expecting both times that the stream doesn't exist, you will get an exception on the second: + +@[code{append-with-no-stream}](@grpc:appendingEvents.go) + +There are three available stream states: +- `Any` +- `NoStream` +- `StreamExists` + +This check can be used to implement optimistic concurrency. When retrieving a stream from KurrentDB, note the current version number. When you save it back, you can determine if somebody else has modified the record in the meantime. + +@[code{append-with-concurrency-check}](@grpc:appendingEvents.go) + + + +## User credentials + +You can provide user credentials to append the data as follows. This will override the default credentials set on the connection. + +@[code{overriding-user-credentials}](@grpc:appendingEvents.go) + diff --git a/docs/clients/grpc/go/authentication.md b/docs/clients/grpc/go/authentication.md new file mode 100644 index 00000000..9a1eee09 --- /dev/null +++ b/docs/clients/grpc/go/authentication.md @@ -0,0 +1,60 @@ +--- +title: Authentication +order: 7 +head: + - - title + - {} + - Authentication | Go | Clients | Kurrent Docs +--- + +## Client x.509 certificate + +X.509 certificates are digital certificates that use the X.509 public key infrastructure (PKI) standard to verify the identity of clients and servers. They play a crucial role in establishing a secure connection by providing a way to authenticate identities and establish trust. + +### Prerequisites + +1. KurrentDB 25.0 or greater, or EventStoreDB 24.10. +2. A commercial license with the User Certificates entitlement. +3. A valid x.509 certificate, which can be created using version `1.3` or higher of the [gencert tool](https://github.com/kurrent-io/es-gencert-cli). +4. The server must run in secure mode. See [Security Options](@server/security/protocol-security.md) for more information. +5. [Enable User Certificates plugin on the server](@server/security/user-authentication.md#user-x509-certificates) + +#### Generate user certificates + +The following command uses the [gencert tool](https://github.com/kurrent-io/es-gencert-cli) to generate a user certificate for the user `admin` that will expire in 10 days: + +::: tabs#os +@tab bash +```bash +./es-gencert-cli create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +@tab PowerShell +```powershell +.\es-gencert-cli.exe create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +::: + +### Connect to KurrentDB using an x.509 certificate + +To connect to KurrentDB using an x.509 certificate, you need to provide the +certificate and the private key to the client. If both username/password and +certificate authentication data are supplied, the client prioritizes user +credentials for authentication. The client will throw an error if the +certificate and the key are not both provided. + +::: tip +Please note that currently, password-protected private key files are not supported. +::: + +The client supports the following parameters: + +| Parameter | Description | +|----------------|--------------------------------------------------------------------------------| +| `userCertFile` | The file containing the X.509 user certificate in PEM format. | +| `userKeyFile` | The file containing the user certificate’s matching private key in PEM format. | + +To authenticate, include these two parameters in your connection string or constructor when initializing the client. + +For example: + +@[code{client-with-user-certificates}](@grpc:/userCertificates.go) \ No newline at end of file diff --git a/docs/clients/grpc/go/delete-stream.md b/docs/clients/grpc/go/delete-stream.md new file mode 100644 index 00000000..b9000774 --- /dev/null +++ b/docs/clients/grpc/go/delete-stream.md @@ -0,0 +1,42 @@ +--- +order: 9 +head: + - - title + - {} + - Deleting Events | Go | Clients | Kurrent Docs +--- + +# Deleting Events + +In KurrentDB, you can delete events and streams either partially or completely. Settings like $maxAge and $maxCount help control how long events are kept or how many events are stored in a stream, but they won't delete the entire stream. +When you need to fully remove a stream, KurrentDB offers two options: Soft Delete and Hard Delete. + +## Soft delete + +Soft delete in KurrentDB allows you to mark a stream for deletion without completely removing it, so you can still add new events later. While you can do this through the UI, using code is often better for automating the process, +handling many streams at once, or including custom rules. Code is especially helpful for large-scale deletions or when you need to integrate soft deletes into other workflows. + +```go +options := esdb.DeleteStreamOptions{ + ExpectedRevision: esdb.Any{}, +} +_, err = client.DeleteStream(context.Background(), streamName, options) +``` + +::: note +Clicking the delete button in the UI performs a soft delete, +setting the TruncateBefore value to remove all events up to a certain point. +While this marks the events for deletion, actual removal occurs during the next scavenging process. +The stream can still be reopened by appending new events. +::: + +## Hard delete + +Hard delete in KurrentDB permanently removes a stream and its events. While you can use the HTTP API, code is often better for automating the process, managing multiple streams, and ensuring precise control. Code is especially useful when you need to integrate hard delete into larger workflows or apply specific conditions. Note that when a stream is hard deleted, you cannot reuse the stream name, it will raise an exception if you try to append to it again. + +```go +options := esdb.TombstoneStreamOptions{ + ExpectedRevision: esdb.Any{}, +} +_, err = client.TombstoneStream(context.Background(), streamName, options) +``` \ No newline at end of file diff --git a/docs/clients/grpc/go/getting-started.md b/docs/clients/grpc/go/getting-started.md new file mode 100644 index 00000000..4eb753c4 --- /dev/null +++ b/docs/clients/grpc/go/getting-started.md @@ -0,0 +1,106 @@ +--- +order: 1 +head: + - - title + - {} + - Getting Started | Go | Clients | Kurrent Docs +--- + +# Getting Started + +Get started by connecting your application to KurrentDB. + +## Connecting to KurrentDB + +To connect your application to KurrentDB, instantiate and configure the client. + +::: tip Insecure clusters +All our GRPC clients are secure by default and must be configured to connect to an insecure server via [a connection string](#connection-string) or the client's configuration. +::: + +### Required packages + +Install the `kurrentdb` package using Go modules: + +```bash +go get github.com/kurrent-io/KurrentDB-Client-Go/kurrentdb +``` + +### Connection string + +Each SDK has its own way of configuring the client, but the connection string can always be used. +The KurrentDB connection string supports two schemas: `kurrentdb://` for connecting to a single-node server, and `kurrentdb+discover://` for connecting to a multi-node cluster. The difference between the two schemas is that when using `kurrentdb://`, the client will connect directly to the node; with `kurrentdb+discover://` schema the client will use the gossip protocol to retrieve the cluster information and choose the right node to connect to. +Since version 22.10, ESDB supports gossip on single-node deployments, so `kurrentdb+discover://` schema can be used for connecting to any topology. + +The connection string has the following format: + +``` +kurrentdb+discover://admin:changeit@cluster.dns.name:2113 +``` + +There, `cluster.dns.name` is the name of a DNS `A` record that points to all the cluster nodes. Alternatively, you can list cluster nodes separated by comma instead of the cluster DNS name: + +``` +kurrentdb+discover://admin:changeit@node1.dns.name:2113,node2.dns.name:2113,node3.dns.name:2113 +``` + +There are a number of query parameters that can be used in the connection string to instruct the cluster how and where the connection should be established. All query parameters are optional. + +| Parameter | Accepted values | Default | Description | +|-----------------------|---------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------| +| `tls` | `true`, `false` | `true` | Use secure connection, set to `false` when connecting to a non-secure server or cluster. | +| `connectionName` | Any string | None | Connection name | +| `maxDiscoverAttempts` | Number | `10` | Number of attempts to discover the cluster. | +| `discoveryInterval` | Number | `100` | Cluster discovery polling interval in milliseconds. | +| `gossipTimeout` | Number | `5` | Gossip timeout in seconds, when the gossip call times out, it will be retried. | +| `nodePreference` | `leader`, `follower`, `random`, `readOnlyReplica` | `leader` | Preferred node role. When creating a client for write operations, always use `leader`. | +| `tlsVerifyCert` | `true`, `false` | `true` | In secure mode, set to `true` when using an untrusted connection to the node if you don't have the CA file available. Don't use in production. | +| `tlsCaFile` | String, file path | None | Path to the CA file when connecting to a secure cluster with a certificate that's not signed by a trusted CA. | +| `defaultDeadline` | Number | None | Default timeout for client operations, in milliseconds. Most clients allow overriding the deadline per operation. | +| `keepAliveInterval` | Number | `10` | Interval between keep-alive ping calls, in seconds. | +| `keepAliveTimeout` | Number | `10` | Keep-alive ping call timeout, in seconds. | +| `userCertFile` | String, file path | None | User certificate file for X.509 authentication. | +| `userKeyFile` | String, file path | None | Key file for the user certificate used for X.509 authentication. | + +When connecting to an insecure instance, specify `tls=false` parameter. For example, for a node running locally use `kurrentdb://localhost:2113?tls=false`. Note that usernames and passwords aren't provided there because insecure deployments don't support authentication and authorisation. + +### Creating a client + +First, create a client and get it connected to the database. + +@[code{createClient}](@grpc:quickstart.go) + +The client instance can be used as a singleton across the whole application. It doesn't need to open or close the connection. + +### Creating an event + +You can write anything to KurrentDB as events. The client needs a byte array as the event payload. Normally, you'd use a serialized object, and it's up to you to choose the serialization method. + +::: tip Server-side projections +User-defined server-side projections require events to be serialized in JSON format. + +We use JSON for serialization in the documentation examples. +::: + +The code snippet below creates an event object instance, serializes it, and adds it as a payload to the `EventData` structure, which the client can then write to the database. + +@[code{createEvent}](@grpc:quickstart.go) + +### Appending events + +Each event in the database has its own unique identifier (UUID). The database uses it to ensure idempotent writes, but it only works if you specify the stream revision when appending events to the stream. + +In the snippet below, we append the event to the stream `some-stream`. + +@[code{appendEvents}](@grpc:quickstart.go) + +Here we are appending events without checking if the stream exists or if the stream version matches the expected event version. See more advanced scenarios in [appending events documentation](./appending-events.md). + +### Reading events + +Finally, we can read events back from the `some-stream` stream. + +@[code{readStream}](@grpc:quickstart.go) + +When you read events from the stream, you get a collection of `ResolvedEvent` structures. The event payload is returned as a byte array and needs to be deserialized. See more advanced scenarios in [reading events documentation](./reading-events.md). + diff --git a/docs/clients/grpc/go/persistent-subscriptions.md b/docs/clients/grpc/go/persistent-subscriptions.md new file mode 100644 index 00000000..80db47f2 --- /dev/null +++ b/docs/clients/grpc/go/persistent-subscriptions.md @@ -0,0 +1,164 @@ +--- +order: 5 +head: + - - title + - {} + - Persistent Subscription | Go | Clients | Kurrent Docs +--- + +# Persistent Subscriptions + +Persistent subscriptions are similar to catch-up subscriptions, but there are two key differences: +- The subscription checkpoint is maintained by the server. It means that when your client reconnects to the persistent subscription, it will automatically resume from the last known position. +- It's possible to connect more than one event consumer to the same persistent subscription. In that case, the server will load-balance the consumers, depending on the defined strategy, and distribute the events to them. + +Because of those, persistent subscriptions are defined as subscription groups that are defined and maintained by the server. Consumer then connect to a particular subscription group, and the server starts sending event to the consumer. + +You can read more about persistent subscriptions in the [server documentation](@server/features/persistent-subscriptions.md). + +## Creating a subscription group + +The first step of dealing with a persistent subscription is to create a subscription group. You will receive an error if you attempt to create a subscription group multiple times. You must have admin permissions to create a persistent subscription group. + +### Subscribing to one stream + +The following sample shows how to create a subscription group for a persistent subscription where you want to receive events from a specific stream. It could be a normal stream, or a stream of links (like `$ce` category stream). + +@[code{create-persistent-subscription-to-stream}](@grpc:persistentSubscriptions.go) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to create. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +### Subscribing to $all + +The ability to subscribe to `$all` was introduced in EventStoreDB **21.10**. Persistent subscriptions to `$all` also support [filtering](subscriptions.md#server-side-filtering). + +You can create a subscription group on $all much the same way you would create a subscription group on a stream: + +@[code{create-persistent-subscription-to-all}](@grpc:persistentSubscriptions.go) + +## Connecting a consumer + +Once you have created a subscription group, clients can connect to it. A subscription in your application should only have the connection in your code, you should assume that the subscription already exists. + +The most important parameter to pass when connecting is the buffer size. This represents how many outstanding messages the server should allow this client. If this number is too small, your subscription will spend much of its time idle as it waits for an acknowledgment to come back from the client. If it's too big, you waste resources and can start causing time out messages depending on the speed of your processing. + +### Connecting to one stream + +The code below shows how to connect to an existing subscription group for a specific stream: + +@[code{subscribe-to-persistent-subscription-to-stream}](@grpc:persistentSubscriptions.go) + +| Parameter | Description | +|:----------------------|:---------------------------------------------------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to subscribe to. | +| `eventAppeared` | The action to call when an event arrives over the subscription. | +| `subscriptionDropped` | The action to call if the subscription is dropped. | +| `credentials` | The user credentials to use for this operation. | +| `bufferSize` | The number of in-flight messages this client is allowed. **Default: 10** | +| `autoAck` | Whether to automatically acknowledge messages after eventAppeared returns. **Default: true** | + +::: warning +The `autoAck` parameter will be deprecated in the next client release. You'll need to explicitly [manage acknowledgements](#acknowledgements). +::: + +### Connecting to $all + +The code below shows how to connect to an existing subscription group for `$all`: + +@[code{subscribe-to-persistent-subscription-to-all}](@grpc:persistentSubscriptions.go) + +The `SubscribeToAllAsync` method is identical to the `SubscribeToStreamAsync` method, except that you don't need to specify a stream name. + +## Acknowledgements + +Clients must acknowledge (or not acknowledge) messages in the competing consumer model. + +If processing is successful, you must send an Ack (acknowledge) to the server to let it know that the message has been handled. If processing fails for some reason, then you can Nack (not acknowledge) the message and tell the server how to handle the failure. + +@[code{subscribe-to-persistent-subscription-with-manual-acks}](@grpc:persistentSubscriptions.go) + +The _Nack event action_ describes what the server should do with the message: + +| Action | Description | +|:----------|:---------------------------------------------------------------------| +| `Unknown` | The client does not know what action to take. Let the server decide. | +| `Park` | Park the message and do not resend. Put it on poison queue. | +| `Retry` | Explicitly retry the message. | +| `Skip` | Skip this message do not resend and do not put in poison queue. | + +## Consumer strategies + +When creating a persistent subscription, you can choose between a number of consumer strategies. + +### RoundRobin (default) + +Distributes events to all clients evenly. If the client `bufferSize` is reached, the client won't receive more events until it acknowledges or not acknowledges events in its buffer. + +This strategy provides equal load balancing between all consumers in the group. + +### DispatchToSingle + +Distributes events to a single client until the `bufferSize` is reached. After that, the next client is selected in a round-robin style, and the process repeats. + +This option can be seen as a fall-back scenario for high availability, when a single consumer processes all the events until it reaches its maximum capacity. When that happens, another consumer takes the load to free up the main consumer resources. + +### Pinned + +For use with an indexing projection such as the system `$by_category` projection. + +KurrentDB inspects the event for its source stream id, hashing the id to one of 1024 buckets assigned to individual clients. When a client disconnects, its buckets are assigned to other clients. When a client connects, it is assigned some existing buckets. This naively attempts to maintain a balanced workload. + +The main aim of this strategy is to decrease the likelihood of concurrency and ordering issues while maintaining load balancing. This is **not a guarantee**, and you should handle the usual ordering and concurrency issues. + +## Updating a subscription group + +You can edit the settings of an existing subscription group while it is running, you don't need to delete and recreate it to change settings. When you update the subscription group, it resets itself internally, dropping the connections and having them reconnect. You must have admin permissions to update a persistent subscription group. + +@[code{update-persistent-subscription}](@grpc:persistentSubscriptions.go) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to update. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +## Persistent subscription settings + +Both the `Create` and `Update` methods take some settings for configuring the persistent subscription. + +The following table shows the configuration options you can set on a persistent subscription. + +| Option | Description | Default | +|:------------------------|:----------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------| +| `ResolveLinkTos` | Whether the subscription should resolve link events to their linked events. | `false` | +| `StartFrom` | The exclusive position in the stream or transaction file the subscription should start from. | `null` (start from the end of the stream) | +| `ExtraStatistics` | Whether to track latency statistics on this subscription. | `false` | +| `MessageTimeout` | The amount of time after which to consider a message as timed out and retried. | `30` (seconds) | +| `MaxRetryCount` | The maximum number of retries (due to timeout) before a message is considered to be parked. | `10` | +| `LiveBufferSize` | The size of the buffer (in-memory) listening to live messages as they happen before paging occurs. | `500` | +| `ReadBatchSize` | The number of events read at a time when paging through history. | `20` | +| `HistoryBufferSize` | The number of events to cache when paging through history. | `500` | +| `CheckPointAfter` | The amount of time to try to checkpoint after. | `2` seconds | +| `MinCheckPointCount` | The minimum number of messages to process before a checkpoint may be written. | `10` | +| `MaxCheckPointCount` | The maximum number of messages not checkpointed before forcing a checkpoint. | `1000` | +| `MaxSubscriberCount` | The maximum number of subscribers allowed. | `0` (unbounded) | +| `NamedConsumerStrategy` | The strategy to use for distributing events to client consumers. See the [consumer strategies](#consumer-strategies) in this doc. | `RoundRobin` | + +## Deleting a subscription group + +Remove a subscription group with the delete operation. Like the creation of groups, you rarely do this in your runtime code and is undertaken by an administrator running a script. + +@[code{delete-persistent-subscription}](@grpc:persistentSubscriptions.go) + +| Parameter | Description | +|:--------------|:-----------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to delete. | +| `credentials` | The user credentials to use for this operation | diff --git a/docs/clients/grpc/go/projections.md b/docs/clients/grpc/go/projections.md new file mode 100644 index 00000000..09204a6a --- /dev/null +++ b/docs/clients/grpc/go/projections.md @@ -0,0 +1,179 @@ +--- +order: 6 +title: Projections +head: + - - title + - {} + - Projections | Go | Clients | Kurrent Docs +--- + +# Projection Management + +The various gRPC client APIs include dedicated clients that allow you to manage projections. + +For a detailed explanation of projections, see the [server documentation](@server/features/projections/README.md). + +You can find the full sample code from this documentation page in the respective [clients repositories](https://github.com/kurrent-io/?q=client). + +## Creating a client + +Projection management operations are exposed through a dedicated client. + +@[code{createClient}](@grpc:projectionManagement.go) + +## Create a projection + +Creates a projection that runs until the last event in the store, and then continues processing new events as they are appended to the store. The query parameter contains the JavaScript you want created as a projection. +Projections have explicit names, and you can enable or disable them via this name. + +@[code{CreateContinuous}](@grpc:projectionManagement.go) + +Trying to create projections with the same name will result in an error: + +@[code{CreateContinuous_Conflict}](@grpc:projectionManagement.go) + +## Restart the subsystem + +It is possible to restart the entire projection subsystem using the projections management client API. The user must be in the `$ops` or `$admin` group to perform this operation. + +@[code{RestartSubSystem}](@grpc:projectionManagement.go) + +## Enable a projection + +Enables an existing projection by name. +Once enabled, the projection will start to process events even after restarting the server or the projection subsystem. +You must have access to a projection to enable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Enable}](@grpc:projectionManagement.go) + +You can only enable an existing projection. When you try to enable a non-existing projection, you'll get an error: + +@[code{EnableNotFound}](@grpc:projectionManagement.go) + +## Disable a projection + +Disables a projection, this will save the projection checkpoint. +Once disabled, the projection will not process events even after restarting the server or the projection subsystem. +You must have access to a projection to disable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Disable}](@grpc:projectionManagement.go) + +You can only disable an existing projection. When you try to disable a non-existing projection, you'll get an error: + +@[code{DisableNotFound}](@grpc:projectionManagement.go) + +## Delete a projection + +Deletes an existing projection. You must disable the projection before deleting it, running projections cannot be deleted. Deleting a projection includes deleting the checkpoint and the emitted streams. + +@[code{Delete}](@grpc:projectionManagement.go) + +You can only delete an existing projection. When you try to delete a non-existing projection, you'll get an error: + +@[code{DeleteNotFound}](@grpc:projectionManagement.go) + +## Abort a projection + +Aborts a projection, this will not save the projection's checkpoint. + +@[code{Abort}](@grpc:projectionManagement.go) + +You can only abort an existing projection. When you try to abort a non-existing projection, you'll get an error: + +@[code{Abort_NotFound}](@grpc:projectionManagement.go) + +## Reset a projection + +Resets a projection, which causes deleting the projection checkpoint. This will force the projection to start afresh and re-emit events. Streams that are written to from the projection will also be soft-deleted. + +@[code{Reset}](@grpc:projectionManagement.go) + +Resetting a projection that does not exist will result in an error. + +@[code{Reset_NotFound}](@grpc:projectionManagement.go) + +## Update a projection + +Updates a projection with a given name. The query parameter contains the new JavaScript. Updating system projections using this operation is not supported at the moment. + +@[code{Update}](@grpc:projectionManagement.go) + +You can only update an existing projection. When you try to update a non-existing projection, you'll get an error: + +@[code{Update_NotFound}](@grpc:projectionManagement.go) + +## List all projections + +Returns a list of all projections, user defined & system projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListAll}](@grpc:projectionManagement.go) + +## List continuous projections + +Returns a list of all continuous projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListContinuous}](@grpc:projectionManagement.go) + +## Get status + +Gets the status of a named projection. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{GetStatus}](@grpc:projectionManagement.go) + +## Get state + +Retrieves the state of a projection. + +@[code{GetState}](@grpc:projectionManagement.go) + +## Get result + +Retrieves the result of the named projection and partition. + +@[code{GetResult}](@grpc:projectionManagement.go) + +## Projection Details + +[List all](#list-all-projections), [list continuous](#list-continuous-projections) and [get status](#get-status) all return the details and statistics of projections + +| Field | Description | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Name`, `EffectiveName` | The name of the projection | +| `Status` | A human readable string of the current statuses of the projection (see below) | +| `StateReason` | A human readable string explaining the reason of the current projection state | +| `CheckpointStatus` | A human readable string explaining the current operation performed on the checkpoint : `requested`, `writing` | +| `Mode` | `Continuous`, `OneTime` , `Transient` | +| `CoreProcessingTime` | The total time, in ms, the projection took to handle events since the last restart | +| `Progress` | The progress, in %, indicates how far this projection has processed event, in case of a restart this could be -1% or some number. It will be updated as soon as a new event is appended and processed | +| `WritesInProgress` | The number of write requests to emitted streams currently in progress, these writes can be batches of events | +| `ReadsInProgress` | The number of read requests currently in progress | +| `PartitionsCached` | The number of cached projection partitions | +| `Position` | The Position of the last processed event | +| `LastCheckpoint` | The Position of the last checkpoint of this projection | +| `EventsProcessedAfterRestart` | The number of events processed since the last restart of this projection | +| `BufferedEvents` | The number of events in the projection read buffer | +| `WritePendingEventsBeforeCheckpoint` | The number of events waiting to be appended to emitted streams before the pending checkpoint can be written | +| `WritePendingEventsAfterCheckpoint` | The number of events to be appended to emitted streams since the last checkpoint | +| `Version` | This is used internally, the version is increased when the projection is edited or reset | +| `Epoch` | This is used internally, the epoch is increased when the projection is reset | + +The `Status` string is a combination of the following values. +The first 3 are the most common one, as the other one are transient values while the projection is initialised or stopped + +| Value | Description | +|--------------------|-------------------------------------------------------------------------------------------------------------------------| +| Running | The projection is running and processing events | +| Stopped | The projection is stopped and is no longer processing new events | +| Faulted | An error occurred in the projection, `StateReason` will give the fault details, the projection is not processing events | +| Initial | This is the initial state, before the projection is fully initialised | +| Suspended | The projection is suspended and will not process events, this happens while stopping the projection | +| LoadStateRequested | The state of the projection is being retrieved, this happens while the projection is starting | +| StateLoaded | The state of the projection is loaded, this happens while the projection is starting | +| Subscribed | The projection has successfully subscribed to its readers, this happens while the projection is starting | +| FaultedStopping | This happens before the projection is stopped due to an error in the projection | +| Stopping | The projection is being stopped | +| CompletingPhase | This happens while the projection is stopping | +| PhaseCompleted | This happens while the projection is stopping | diff --git a/docs/clients/grpc/go/reading-events.md b/docs/clients/grpc/go/reading-events.md new file mode 100644 index 00000000..5be0bdf1 --- /dev/null +++ b/docs/clients/grpc/go/reading-events.md @@ -0,0 +1,135 @@ +--- +order: 3 +head: + - - title + - {} + - Reading Events | Go | Clients | Kurrent Docs +--- + +# Reading Events + +There are two options for reading events from KurrentDB. You can either: + 1. Read from an individual stream, or + 2. Read from the `$all` stream, which will return all events in the store. + +Each event in KurrentDB belongs to an individual stream. When reading events, pick the name of the stream from which you want to read the events and choose whether to read the stream forwards or backwards. + +All events have a `StreamPosition` and a `Position`. `StreamPosition` is a *big int* (unsigned 64-bit integer) and represents the place of the event in the stream. `Position` is the event's logical position, and is represented by `CommitPosition` and a `PreparePosition`. Note that when reading events you will supply a different "position" depending on whether you are reading from an individual stream or the `$all` stream. + +:::tip +Check [connecting to KurrentDB instructions](getting-started.md#required-packages) to learn how to configure and use the client SDK. +::: + +## Reading from a stream + +You can read all the events or a sample of the events from individual streams, starting from any position in the stream, and can read either forward or backward. It is only possible to read events from a single stream at a time. You can read events from the global event log, which spans across streams. Learn more about this process in the [Read from `$all`](#reading-from-the-all-stream) section below. + +### Reading forwards + +The simplest way to read a stream forwards is to supply a stream name, read direction, and revision from which to start. The revision can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-stream}](@grpc:readingEvents.go) + +This will return an enumerable that can be iterated on: + +@[code{iterate-stream}](@grpc:readingEvents.go) + +There are a number of additional arguments you can provide when reading a stream, listed below. + +#### maxCount + +Passing in the max count will limit the number of events returned. + +#### resolveLinkTos + +When using projections to create new events, you can set whether the generated events are pointers to existing events. Setting this value to `true` tells KurrentDB to return the event as well as the event linking to it. + +#### configureOperationOptions + +You can use the `configureOperationOptions` argument to provide a function that will customise settings for each operation. + +#### userCredentials + +The `userCredentials` argument is optional. It is used to override the default credentials specified when creating the client instance. + +@[code{overriding-user-credentials}](@grpc:readingEvents.go) + +### Reading from a revision + +Instead of providing the `StreamPosition` you can also provide a specific stream revision as a *big int* (unsigned 64-bit integer). + +@[code{read-from-stream-position}](@grpc:readingEvents.go) + +### Reading backwards + +In addition to reading a stream forwards, streams can be read backwards. To read all the events backwards, set the *stream position* to the end: + +@[code{reading-backwards}](@grpc:readingEvents.go) + +:::tip +Read one event backwards to find the last position in the stream. +::: + +### Checking if the stream exists + +Reading a stream returns a `ReadStreamResult`, which contains a property `ReadState`. This property can have the value `StreamNotFound` or `Ok`. + +It is important to check the value of this field before attempting to iterate an empty stream, as it will throw an exception. + +For example: + +@[code{checking-for-stream-presence}](@grpc:readingEvents.go) + +## Reading from the $all stream + +Reading from the `$all` stream is similar to reading from an individual stream, but please note there are differences. One significant difference is the need to provide admin user account credentials to read from the `$all` stream. Additionally, you need to provide a transaction log position instead of a stream revision when reading from the `$all` stream. + +### Reading forwards + +The simplest way to read the `$all` stream forwards is to supply a read direction and the transaction log position from which you want to start. The transaction log postion can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-all-stream}](@grpc:readingEvents.go) + +You can iterate asynchronously through the result: + +@[code{read-from-all-stream-iterate}](@grpc:readingEvents.go) + +There are a number of additional arguments you can provide when reading the `$all` stream. + +#### maxCount + +Passing in the max count allows you to limit the number of events that returned. + +#### resolveLinkTos + +When using projections to create new events you can set whether the generated events are pointers to existing events. Setting this value to true will tell KurrentDB to return the event as well as the event linking to it. + +@[code{read-from-all-stream-resolving-link-Tos}](@grpc:readingEvents.go) + +#### configureOperationOptions + +This argument is generic setting class for all operations that can be set on all operations executed against KurrentDB. + +#### userCredentials +The credentials used to read the data can be used by the subscription as follows. This will override the default credentials set on the connection. + +@[code{read-all-overriding-user-credentials}](@grpc:readingEvents.go) + +### Reading backwards + +In addition to reading the `$all` stream forwards, it can be read backwards. To read all the events backwards, set the *position* to the end: + +@[code{read-from-all-stream-backwards}](@grpc:readingEvents.go) + +:::tip +Read one event backwards to find the last position in the `$all` stream. +::: + +### Handling system events + +KurrentDB will also return system events when reading from the `$all` stream. In most cases you can ignore these events. + +All system events begin with `$` or `$$` and can be easily ignored by checking the `EventType` property. + +@[code{ignore-system-events}](@grpc:readingEvents.go) + diff --git a/docs/clients/grpc/go/release-notes.md b/docs/clients/grpc/go/release-notes.md new file mode 100644 index 00000000..f94f4fd4 --- /dev/null +++ b/docs/clients/grpc/go/release-notes.md @@ -0,0 +1,11 @@ +--- +order: 10 +head: + - - title + - {} + - Release Notes | Go | Clients | Kurrent Docs +--- + +# Release Notes + +To stay up to date with the latest changes and improvements, including release history, changelogs, upgrade instructions, and migration guides, see the [KurrentDB Go client release notes](https://github.com/kurrent-io/KurrentDB-Client-Go/releases). \ No newline at end of file diff --git a/docs/clients/grpc/go/subscriptions.md b/docs/clients/grpc/go/subscriptions.md new file mode 100644 index 00000000..f01ff90a --- /dev/null +++ b/docs/clients/grpc/go/subscriptions.md @@ -0,0 +1,234 @@ +--- +order: 4 +head: + - - title + - {} + - Catch-up Subscription | Go | Clients | Kurrent Docs +--- + +# Catch-up Subscriptions + +Subscriptions allow you to subscribe to a stream and receive notifications about new events added to the stream. + +You provide an event handler and an optional starting point to the subscription. The handler is called for each event from the starting point onward. + +If events already exist, the handler will be called for each event one by one until it reaches the end of the stream. The server will then notify the handler whenever a new event appears. + +:::tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Subscribing from the start + +If you need to process all the events in the store, including historical events, you'll need to subscribe from the beginning. You can either subscribe to receive events from a single stream or subscribe to `$all` if you need to process all events in the database. + +### Subscribing to a stream + +The simplest stream subscription looks like the following : + +@[code{subscribe-to-stream}](@grpc:subscribingToStream.go) + +The provided handler will be called for every event in the stream. + +When you subscribe to a stream with link events, for example the `$ce` category stream, you need to set `resolveLinkTos` to `true`. Read more about it [below](#resolving-link-to-s). + +### Subscribing to `$all` + +Subscribing to `$all` is similar to subscribing to a single stream. The handler will be called for every event appended after the starting position. + +@[code{subscribe-to-all}](@grpc:subscribingToStream.go) + +## Subscribing from a specific position + +The previous examples subscribed to the stream from the beginning. That subscription invoked the handler for every event in the stream before waiting for new events. + +Both stream and $all subscriptions accept a starting position if you want to read from a specific point onward. If events already exist at the position you subscribe to, they will be read on the server side and sent to the subscription. + +Once caught up, the server will push any new events received on the streams to the client. There is no difference between catching up and live on the client side. + +::: warning +The positions provided to the subscriptions are exclusive. You will only receive the next event after the subscribed position. +::: + +### Subscribing to a stream + +To subscribe to a stream from a specific position, you must provide a *stream position*. This can be `Start`, `End` or a *big int* (unsigned 64 bit integer) position. + +The following subscribes to the stream `some-stream` at position `20`, this means that events `21` and onward will be handled: + +@[code{subscribe-to-stream-from-position}](@grpc:subscribingToStream.go) + +### Subscribing to $all + +Subscribing to the `$all` stream is similar to subscribing to a regular stream. The difference is how to specify the starting position. For the `$all` stream, provide a `Position` structure that consists of two big integers: the prepare and commit positions. Use `Start`, `End`, or create a `Position` from specific commit and prepare values. + +The corresponding `$all` subscription will subscribe from the event after the one at commit position `1056` and prepare position `1056`. + +Please note that this position will need to be a legitimate position in `$all`. + +@[code{subscribe-to-all-from-position}](@grpc:subscribingToStream.go) + +## Subscribing to a stream for live updates + +You can subscribe to a stream to get live updates by subscribing to the end of the stream: + +@[code{subscribe-to-stream-live}](@grpc:subscribingToStream.go) + +And the same works with `$all` : + +@[code{subscribe-to-all-live}](@grpc:subscribingToStream.go) + +This will not read through the history of the stream but will notify the handler when a new event appears in the respective stream. + +Keep in mind that when you subscribe to a stream from a specific position, as described [above](#subscribing-from-a-specific-position), you will also get live updates after your subscription catches up (processes all the historical events). + +## Resolving link-to events + +Link-to events point to events in other streams in KurrentDB. These are generally created by projections such as the `$by_event_type` projection which links events of the same event type into the same stream. This makes it easier to look up all events of a specific type. + +::: tip +[Filtered subscriptions](subscriptions.md#server-side-filtering) make it easier and faster to subscribe to all events of a specific type or matching a prefix. +::: + +When reading a stream you can specify whether to resolve link-to's. By default, link-to events are not resolved. You can change this behaviour by setting the `resolveLinkTos` parameter to `true`: + +@[code{subscribe-to-stream-resolving-linktos}](@grpc:subscribingToStream.go) + +## Dropped subscriptions + +When a subscription stops or experiences an error, it will be dropped. The subscription provides a `subscriptionDropped` callback, which will get called when the subscription breaks. + +The `subscriptionDropped` callback allows you to inspect the reason why the subscription dropped, as well as any exceptions that occurred. + +The possible reasons for a subscription to drop are: + +| Reason | Why it might happen | +|:------------------|:---------------------------------------------------------------------------------------------------------------------| +| `Disposed` | The client canceled or disposed of the subscription. | +| `SubscriberError` | An error occurred while handling an event in the subscription handler. | +| `ServerError` | An error occurred on the server, and the server closed the subscription. Check the server logs for more information. | + +Bear in mind that a subscription can also drop because it is slow. The server tried to push all the live events to the subscription when it is in the live processing mode. If the subscription gets the reading buffer overflow and won't be able to acknowledge the buffer, it will break. + +### Handling subscription drops + +An application, which hosts the subscription, can go offline for some time for different reasons. It could be a crash, infrastructure failure, or a new version deployment. As you rarely would want to reprocess all the events again, you'd need to store the current position of the subscription somewhere, and then use it to restore the subscription from the point where it dropped off: + +@[code{subscribe-to-stream-subscription-dropped}](@grpc:subscribingToStream.go) + +When subscribed to `$all` you want to keep the event's position in the `$all` stream. As mentioned previously, the `$all` stream position consists of two big integers (prepare and commit positions), not one: + +@[code{subscribe-to-all-subscription-dropped}](@grpc:subscribingToStream.go) + +## User credentials + +The user creating a subscription must have read access to the stream it's subscribing to, and only admin users may subscribe to `$all` or create filtered subscriptions. + +The code below shows how you can provide user credentials for a subscription. When you specify subscription credentials explicitly, it will override the default credentials set for the client. If you don't specify any credentials, the client will use the credentials specified for the client, if you specified those. + +@[code{overriding-user-credentials}](@grpc:subscribingToStream.go) + +## Server-side filtering + +KurrentDB allows you to filter the events whilst subscribing to the `$all` stream to only receive the events you care about. + +You can filter by event type or stream name using a regular expression or a prefix. Server-side filtering is currently only available on the `$all` stream. + +::: tip +Server-side filtering was introduced as a simpler alternative to projections. You should consider filtering before creating a projection to include the events you care about. +::: + +A simple stream prefix filter looks like this: + +@[code{stream-prefix-filtered-subscription}](@grpc:subscribingToStream.go) + +The filtering API is described more in-depth in the [filtering section](subscriptions.md#server-side-filtering). + +### Filtering out system events + +There are events in KurrentDB called system events. These are prefixed with a `$` and under most circumstances you won't care about these. They can be filtered out by passing in a `SubscriptionFilterOptions` when subscribing to the `$all` stream. + +@[code{exclude-system}](@grpc:serverSideFiltering.go) + +::: tip +`$stats` events are no longer stored in KurrentDB by default so there won't be as many `$` events as before. +::: + +### Filtering by event type + +If you only want to subscribe to events of a given type, there are two options. You can either use a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `EventTypeFilter.Prefix`. + +@[code{event-type-prefix}](@grpc:serverSideFiltering.go) + +This will only subscribe to events with a type that begin with `customer-`. + +#### Filtering by regular expression + +It might be advantageous to provide a regular expression when you want to subscribe to multiple event types. + +@[code{event-type-regex}](@grpc:serverSideFiltering.go) + +This will subscribe to any event that begins with `user` or `company`. + +### Filtering by stream name + +To subscribe to a stream by name, choose either a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `StreamFilter.Prefix`. + +@[code{stream-prefix}](@grpc:serverSideFiltering.go) + +This will only subscribe to all streams with a name that begins with `user-`. + +#### Filtering by regular expression + +To subscribe to multiple streams, use a regular expression. + +@[code{stream-regex}](@grpc:serverSideFiltering.go) + +This will subscribe to any stream with a name that begins with `account` or `savings`. + +## Checkpointing + +When a catch-up subscription is used to process an `$all` stream containing many events, the last thing you want is for your application to crash midway, forcing you to restart from the beginning. + +### What is a checkpoint? + +A checkpoint is the position of an event in the `$all` stream to which your application has processed. By saving this position to a persistent store (e.g., a database), it allows your catch-up subscription to: +- Recover from crashes by reading the checkpoint and resuming from that position +- Avoid reprocessing all events from the start + +To create a checkpoint, store the event's commit or prepare position. + +::: warning +If your database contains events created by the legacy TCP client using the [transaction feature](https://docs.kurrent.io/clients/tcp/dotnet/21.2/appending.html#transactions), you should store both the commit and prepare positions together as your checkpoint. +::: + +### Updating checkpoints at regular intervals +The client SDK provides a way to notify your application after processing a configurable number of events. This allows you to periodically save a checkpoint at regular intervals. + +@[code{checkpoint}](@grpc:serverSideFiltering.go) + +By default, the checkpoint notification is sent after every 32 non-system events processed from $all. + +### Configuring the checkpoint interval +You can adjust the checkpoint interval to change how often the client is notified. + +@[code{checkpoint-with-interval}](@grpc:serverSideFiltering.go) + +By configuring this parameter, you can balance between reducing checkpoint overhead and ensuring quick recovery in case of a failure. + +::: info +The checkpoint interval parameter configures the database to notify the client after `n` * 32 number of events where `n` is defined by the parameter. + +For example: +- If `n` = 1, a checkpoint notification is sent every 32 events. +- If `n` = 2, the notification is sent every 64 events. +- If `n` = 3, it is sent every 96 events, and so on. +::: diff --git a/docs/clients/grpc/java/README.md b/docs/clients/grpc/java/README.md new file mode 100644 index 00000000..d84b98be --- /dev/null +++ b/docs/clients/grpc/java/README.md @@ -0,0 +1,9 @@ +--- +index: false +--- + +# Java + +Learn how to use the KurrentDB Java client library to interact with the database. + + \ No newline at end of file diff --git a/docs/clients/grpc/java/appending-events.md b/docs/clients/grpc/java/appending-events.md new file mode 100644 index 00000000..21c15acc --- /dev/null +++ b/docs/clients/grpc/java/appending-events.md @@ -0,0 +1,87 @@ +--- +order: 2 +head: + - - title + - {} + - Appending Events | Java | Clients | Kurrent Docs +--- + +# Appending Events + +When you start working with KurrentDB, it is empty. The first meaningful operation is to add one or more events to the database using one of the available client SDKs. + +::: tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Append your first event + +The simplest way to append an event to KurrentDB is to create an `EventData` object and call `AppendToStream` method. + +@[code{append-to-stream}](@grpc:appending_events/AppendingEvents.java;) + +`AppendToStream` takes a collection of `EventData`, which allows you to save more than one event in a single batch. + +Outside the example above, other options exist for dealing with different scenarios. + +::: tip +If you are new to Event Sourcing, please study the [Handling concurrency](#handling-concurrency) section below. +::: + +## Working with EventData + +Events appended to KurrentDB must be wrapped in an `EventData` object. This allows you to specify the event's content, the type of event, and whether it's in JSON format. In its simplest form, you need three arguments: **eventId**, **type**, and **data**. + +### eventId + +This takes the format of a `Uuid` and is used to uniquely identify the event you are trying to append. If two events with the same `Uuid` are appended to the same stream in quick succession, KurrentDB will only append one of the events to the stream. + +For example, the following code will only append a single event: + +@[code{append-duplicate-event}](@grpc:appending_events/AppendingEvents.java;) + +![Duplicate Event](../images/duplicate-event.png) + +### type + +Each event should be supplied with an event type. This unique string is used to identify the type of event you are saving. + +It is common to see the explicit event code type name used as the type as it makes serialising and de-serialising of the event easy. However, we recommend against this as it couples the storage to the type and will make it more difficult if you need to version the event at a later date. + +### data + +Representation of your event data. It is recommended that you store your events as JSON objects. This allows you to take advantage of all of KurrentDB's functionality, such as projections. That said, you can save events using whatever format suits your workflow. Eventually, the data will be stored as encoded bytes. + +### metadata + +Storing additional information alongside your event that is part of the event itself is standard practice. This can be correlation IDs, timestamps, access information, etc. KurrentDB allows you to store a separate byte array containing this information to keep it separate. + +### isJson + +Simple boolean field to tell KurrentDB if the event is stored as json, true by default. + +## Handling concurrency + +When appending events to a stream, you can supply a *stream state* or *stream revision*. Your client uses this to inform KurrentDB of the state or version you expect the stream to be in when appending an event. If the stream isn't in that state, an exception will be thrown. + +For example, if you try to append the same record twice, expecting both times that the stream doesn't exist, you will get an exception on the second: + +@[code{append-with-no-stream}](@grpc:appending_events/AppendingEvents.java;) + +There are three available stream states: +- `Any` +- `NoStream` +- `StreamExists` + +This check can be used to implement optimistic concurrency. When retrieving a stream from KurrentDB, note the current version number. When you save it back, you can determine if somebody else has modified the record in the meantime. + +@[code{append-with-concurrency-check}](@grpc:appending_events/AppendingEvents.java;) + + + +## User credentials + +You can provide user credentials to append the data as follows. This will override the default credentials set on the connection. + +@[code{overriding-user-credentials}](@grpc:appending_events/AppendingEvents.java;) + diff --git a/docs/clients/grpc/java/authentication.md b/docs/clients/grpc/java/authentication.md new file mode 100644 index 00000000..f0116e08 --- /dev/null +++ b/docs/clients/grpc/java/authentication.md @@ -0,0 +1,60 @@ +--- +title: Authentication +order: 7 +head: + - - title + - {} + - Authentication | Java | Clients | Kurrent Docs +--- + +## Client x.509 certificate + +X.509 certificates are digital certificates that use the X.509 public key infrastructure (PKI) standard to verify the identity of clients and servers. They play a crucial role in establishing a secure connection by providing a way to authenticate identities and establish trust. + +### Prerequisites + +1. KurrentDB 25.0 or greater, or EventStoreDB 24.10. +2. A commercial license with the User Certificates entitlement. +3. A valid x.509 certificate, which can be created using version `1.3` or higher of the [gencert tool](https://github.com/kurrent-io/es-gencert-cli). +4. The server must run in secure mode. See [Security Options](@server/security/protocol-security.md) for more information. +5. [Enable User Certificates plugin on the server](@server/security/user-authentication.md#user-x509-certificates) + +#### Generate user certificates + +The following command uses the [gencert tool](https://github.com/kurrent-io/es-gencert-cli) to generate a user certificate for the user `admin` that will expire in 10 days: + +::: tabs#os +@tab bash +```bash +./es-gencert-cli create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +@tab PowerShell +```powershell +.\es-gencert-cli.exe create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +::: + +### Connect to KurrentDB using an x.509 certificate + +To connect to KurrentDB using an x.509 certificate, you need to provide the +certificate and the private key to the client. If both username/password and +certificate authentication data are supplied, the client prioritizes user +credentials for authentication. The client will throw an error if the +certificate and the key are not both provided. + +::: tip +Please note that currently, password-protected private key files are not supported. +::: + +The client supports the following parameters: + +| Parameter | Description | +|----------------|--------------------------------------------------------------------------------| +| `userCertFile` | The file containing the X.509 user certificate in PEM format. | +| `userKeyFile` | The file containing the user certificate’s matching private key in PEM format. | + +To authenticate, include these two parameters in your connection string or constructor when initializing the client. + +For example: + +@[code{client-with-user-certificates}](@grpc:authentication/UserCertificate.java) \ No newline at end of file diff --git a/docs/clients/grpc/java/delete-stream.md b/docs/clients/grpc/java/delete-stream.md new file mode 100644 index 00000000..26c2360e --- /dev/null +++ b/docs/clients/grpc/java/delete-stream.md @@ -0,0 +1,36 @@ +--- +order: 9 +head: + - - title + - {} + - Deleting Events | Java | Clients | Kurrent Docs +--- + +# Deleting Events + +In KurrentDB, you can delete events and streams either partially or completely. Settings like $maxAge and $maxCount help control how long events are kept or how many events are stored in a stream, but they won't delete the entire stream. +When you need to fully remove a stream, KurrentDB offers two options: Soft Delete and Hard Delete. + +## Soft delete + +Soft delete in KurrentDB allows you to mark a stream for deletion without completely removing it, so you can still add new events later. While you can do this through the UI, using code is often better for automating the process, +handling many streams at once, or including custom rules. Code is especially helpful for large-scale deletions or when you need to integrate soft deletes into other workflows. + +```java +client.deleteStream(streamName, DeleteStreamOptions.get()).get(); +``` + +::: note +Clicking the delete button in the UI performs a soft delete, +setting the TruncateBefore value to remove all events up to a certain point. +While this marks the events for deletion, actual removal occurs during the next scavenging process. +The stream can still be reopened by appending new events. +::: + +## Hard delete + +Hard delete in KurrentDB permanently removes a stream and its events. While you can use the HTTP API, code is often better for automating the process, managing multiple streams, and ensuring precise control. Code is especially useful when you need to integrate hard delete into larger workflows or apply specific conditions. Note that when a stream is hard deleted, you cannot reuse the stream name, it will raise an exception if you try to append to it again. + +```java +client.tombstoneStream(streamName, DeleteStreamOptions.get()).get(); +``` \ No newline at end of file diff --git a/docs/clients/grpc/java/getting-started.md b/docs/clients/grpc/java/getting-started.md new file mode 100644 index 00000000..2ec4ecb0 --- /dev/null +++ b/docs/clients/grpc/java/getting-started.md @@ -0,0 +1,121 @@ +--- +order: 1 +head: + - - title + - {} + - Getting Started | Java | Clients | Kurrent Docs +--- + +# Getting Started + +Get started by connecting your application to KurrentDB. + +## Connecting to KurrentDB + +To connect your application to KurrentDB, instantiate and configure the client. + +::: tip Insecure clusters +All our GRPC clients are secure by default and must be configured to connect to an insecure server via [a connection string](#connection-string) or the client's configuration. +::: + +### Required packages + +Add the `kurrentdb-client` dependency to your Maven or Gradle project. + +::: tabs +@tab Maven +```xml + + io.kurrent + kurrentdb-client + 1.0.0 + +``` + +@tab Gradle +```groovy +implementation 'io.kurrent:kurrentdb-client:1.0.0' +``` + +For the most recent version of the KurrentDB client package, see [Maven Central](https://mvnrepository.com/artifact/io.kurrent/kurrentdb-client). +::: + + +### Connection string + +Each SDK has its own way of configuring the client, but the connection string can always be used. +The KurrentDB connection string supports two schemas: `kurrentdb://` for connecting to a single-node server, and `kurrentdb+discover://` for connecting to a multi-node cluster. The difference between the two schemas is that when using `kurrentdb://`, the client will connect directly to the node; with `kurrentdb+discover://` schema the client will use the gossip protocol to retrieve the cluster information and choose the right node to connect to. +Since version 22.10, ESDB supports gossip on single-node deployments, so `kurrentdb+discover://` schema can be used for connecting to any topology. + +The connection string has the following format: + +``` +kurrentdb+discover://admin:changeit@cluster.dns.name:2113 +``` + +There, `cluster.dns.name` is the name of a DNS `A` record that points to all the cluster nodes. Alternatively, you can list cluster nodes separated by comma instead of the cluster DNS name: + +``` +kurrentdb+discover://admin:changeit@node1.dns.name:2113,node2.dns.name:2113,node3.dns.name:2113 +``` + +There are a number of query parameters that can be used in the connection string to instruct the cluster how and where the connection should be established. All query parameters are optional. + +| Parameter | Accepted values | Default | Description | +|-----------------------|---------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------| +| `tls` | `true`, `false` | `true` | Use secure connection, set to `false` when connecting to a non-secure server or cluster. | +| `connectionName` | Any string | None | Connection name | +| `maxDiscoverAttempts` | Number | `10` | Number of attempts to discover the cluster. | +| `discoveryInterval` | Number | `100` | Cluster discovery polling interval in milliseconds. | +| `gossipTimeout` | Number | `5` | Gossip timeout in seconds, when the gossip call times out, it will be retried. | +| `nodePreference` | `leader`, `follower`, `random`, `readOnlyReplica` | `leader` | Preferred node role. When creating a client for write operations, always use `leader`. | +| `tlsVerifyCert` | `true`, `false` | `true` | In secure mode, set to `true` when using an untrusted connection to the node if you don't have the CA file available. Don't use in production. | +| `tlsCaFile` | String, file path | None | Path to the CA file when connecting to a secure cluster with a certificate that's not signed by a trusted CA. | +| `defaultDeadline` | Number | None | Default timeout for client operations, in milliseconds. Most clients allow overriding the deadline per operation. | +| `keepAliveInterval` | Number | `10` | Interval between keep-alive ping calls, in seconds. | +| `keepAliveTimeout` | Number | `10` | Keep-alive ping call timeout, in seconds. | +| `userCertFile` | String, file path | None | User certificate file for X.509 authentication. | +| `userKeyFile` | String, file path | None | Key file for the user certificate used for X.509 authentication. | + +When connecting to an insecure instance, specify `tls=false` parameter. For example, for a node running locally use `kurrentdb://localhost:2113?tls=false`. Note that usernames and passwords aren't provided there because insecure deployments don't support authentication and authorisation. + +### Creating a client + +First, create a client and get it connected to the database. + +@[code{createClient}](@grpc:quick_start/QuickStart.java) + +The client instance can be used as a singleton across the whole application. It doesn't need to open or close the connection. + +### Creating an event + +You can write anything to KurrentDB as events. The client needs a byte array as the event payload. Normally, you'd use a serialized object, and it's up to you to choose the serialization method. + +::: tip Server-side projections +User-defined server-side projections require events to be serialized in JSON format. + +We use JSON for serialization in the documentation examples. +::: + +The code snippet below creates an event object instance, serializes it, and adds it as a payload to the `EventData` structure, which the client can then write to the database. + +@[code{createEvent}](@grpc:quick_start/QuickStart.java) + +### Appending events + +Each event in the database has its own unique identifier (UUID). The database uses it to ensure idempotent writes, but it only works if you specify the stream revision when appending events to the stream. + +In the snippet below, we append the event to the stream `some-stream`. + +@[code{appendEvents}](@grpc:quick_start/QuickStart.java) + +Here we are appending events without checking if the stream exists or if the stream version matches the expected event version. See more advanced scenarios in [appending events documentation](./appending-events.md). + +### Reading events + +Finally, we can read events back from the `some-stream` stream. + +@[code{readStream}](@grpc:quick_start/QuickStart.java) + +When you read events from the stream, you get a collection of `ResolvedEvent` structures. The event payload is returned as a byte array and needs to be deserialized. See more advanced scenarios in [reading events documentation](./reading-events.md). + diff --git a/docs/clients/grpc/java/observability.md b/docs/clients/grpc/java/observability.md new file mode 100644 index 00000000..336775c7 --- /dev/null +++ b/docs/clients/grpc/java/observability.md @@ -0,0 +1,103 @@ +--- +order: 8 +head: + - - title + - {} + - Observability | Java | Clients | Kurrent Docs +--- + +# Observability + +The KurrentDB gRPC clients are designed with observability in mind, offering +support for OpenTelemetry. This integration provides a set of distributed +traces, enabling developers to gain deeper insights into their system. + +::: warning +Currently, OpenTelemetry observability support is not available for all +clients. Moreover, instrumentation is only provided for append and +subscribe operations, which includes both 'Catchup' and 'Persistent' modes. +::: + +Click [here](https://github.com/kurrent-io/KurrentDB-Client-Java/blob/trunk/src/test/java/io/kurrent/dbclient/samples/opentelemetry/Instrumentation.java) to view the full sample code for observability. + +## Required packages + +OpenTelemetry support is included to the KurrentDB Java client by default. + + +## Instrumentation + +To emit trace data, you must first install and use the dedicated package, as instructed in the +[Required Packages](./observability.md#required-packages) section, if provided. This package +includes the necessary instrumentation that needs to be registered with the client. + +@[code{register-instrumentation}](@grpc:opentelemetry/Instrumentation.java) + +## Traces + +Traces provide a clear picture of how operations are carried out in a +distributed system, making it easier to maintain and enhance the system over +time. Traces from the clients can be exported to any compatible collector that +supports the OpenTelemetry protocol (OTLP). + +In order for the client to emit traces, you need to need to enable +instrumentation as described in +[Instrumentation](./observability.md#instrumentation). + +For more guidance on setting up and utilizing tracing, refer to the +[OpenTelemetry](https://opentelemetry.io/) documentation. + +An example of a trace is shown below: + +```bash +Activity.TraceId: 8da04787239dbb85c1f9c6fba1b1f0d6 +Activity.SpanId: 4352ec4a66a20b95 +Activity.TraceFlags: Recorded +Activity.ActivitySourceName: kurrentdb +Activity.DisplayName: streams.append +Activity.Kind: Client +Activity.StartTime: 2024-05-29T06:50:41.2519016Z +Activity.Duration: 00:00:00.1500707 +Activity.Tags: + db.kurrentdb.stream: d7caa2a5-1e19-4108-9541-58d5fba02d42 + server.address: localhost + server.port: 2113 + db.system: kurrentdb + db.operation: streams.append +StatusCode: Ok +Resource associated with Activity: + service.name: sample + service.instance.id: 7316ef20-c354-4e64-97da-c1b99c2c28b0 + telemetry.sdk.name: opentelemetry + telemetry.sdk.language: dotnet + telemetry.sdk.version: 1.8.1 +``` + +In this case, the trace is for an append operation on a stream. The trace +includes the trace ID, span ID, trace flags, activity source name, display name, +kind, start time, duration, tags, status code, and resource associated with the +activity. + +::: note +The structure of the trace may vary depending on the client and the operation +being performed but will generally include the same information. +::: + +## Exporting traces + +You can set up various exporters to send traces to different destinations. +Additionally, you have the option to export these traces to a collector of your +choice, such as [Jaeger](https://www.jaegertracing.io/) or [Seq](https://datalust.co/seq). + +For instance, if you choose to use Jaeger as your backend of choice, you can +view your traces in the Jaeger UI, which provides a powerful interface for +querying and visualizing your trace data. + +The code snippets below demonstrate how to set up one or more exporters for each +client: + +@[code{setup-exporter}](@grpc:opentelemetry/Instrumentation.java) + +For more details on configuring exporters for specific programming languages, +refer to the [OpenTelemetry](https://opentelemetry.io/docs/languages/) +documentation. diff --git a/docs/clients/grpc/java/persistent-subscriptions.md b/docs/clients/grpc/java/persistent-subscriptions.md new file mode 100644 index 00000000..85ff7bb4 --- /dev/null +++ b/docs/clients/grpc/java/persistent-subscriptions.md @@ -0,0 +1,164 @@ +--- +order: 5 +head: + - - title + - {} + - Persistent Subscriptions | Java | Clients | Kurrent Docs +--- + +# Persistent Subscriptions + +Persistent subscriptions are similar to catch-up subscriptions, but there are two key differences: +- The subscription checkpoint is maintained by the server. It means that when your client reconnects to the persistent subscription, it will automatically resume from the last known position. +- It's possible to connect more than one event consumer to the same persistent subscription. In that case, the server will load-balance the consumers, depending on the defined strategy, and distribute the events to them. + +Because of those, persistent subscriptions are defined as subscription groups that are defined and maintained by the server. Consumer then connect to a particular subscription group, and the server starts sending event to the consumer. + +You can read more about persistent subscriptions in the [server documentation](@server/features/persistent-subscriptions.md). + +## Creating a subscription group + +The first step of dealing with a persistent subscription is to create a subscription group. You will receive an error if you attempt to create a subscription group multiple times. You must have admin permissions to create a persistent subscription group. + +### Subscribing to one stream + +The following sample shows how to create a subscription group for a persistent subscription where you want to receive events from a specific stream. It could be a normal stream, or a stream of links (like `$ce` category stream). + +@[code{create-persistent-subscription-to-stream}](@grpc:persistent_subscriptions/PersistentSubscriptions.java;) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to create. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +### Subscribing to $all + +The ability to subscribe to `$all` was introduced in EventStoreDB **21.10**. Persistent subscriptions to `$all` also support [filtering](subscriptions.md#server-side-filtering). + +You can create a subscription group on $all much the same way you would create a subscription group on a stream: + +@[code{create-persistent-subscription-to-all}](@grpc:persistent_subscriptions/PersistentSubscriptions.java;) + +## Connecting a consumer + +Once you have created a subscription group, clients can connect to it. A subscription in your application should only have the connection in your code, you should assume that the subscription already exists. + +The most important parameter to pass when connecting is the buffer size. This represents how many outstanding messages the server should allow this client. If this number is too small, your subscription will spend much of its time idle as it waits for an acknowledgment to come back from the client. If it's too big, you waste resources and can start causing time out messages depending on the speed of your processing. + +### Connecting to one stream + +The code below shows how to connect to an existing subscription group for a specific stream: + +@[code{subscribe-to-persistent-subscription-to-stream}](@grpc:persistent_subscriptions/PersistentSubscriptions.java;) + +| Parameter | Description | +|:----------------------|:---------------------------------------------------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to subscribe to. | +| `eventAppeared` | The action to call when an event arrives over the subscription. | +| `subscriptionDropped` | The action to call if the subscription is dropped. | +| `credentials` | The user credentials to use for this operation. | +| `bufferSize` | The number of in-flight messages this client is allowed. **Default: 10** | +| `autoAck` | Whether to automatically acknowledge messages after eventAppeared returns. **Default: true** | + +::: warning +The `autoAck` parameter will be deprecated in the next client release. You'll need to explicitly [manage acknowledgements](#acknowledgements). +::: + +### Connecting to $all + +The code below shows how to connect to an existing subscription group for `$all`: + +@[code{subscribe-to-persistent-subscription-to-all}](@grpc:persistent_subscriptions/PersistentSubscriptions.java;) + +The `SubscribeToAllAsync` method is identical to the `SubscribeToStreamAsync` method, except that you don't need to specify a stream name. + +## Acknowledgements + +Clients must acknowledge (or not acknowledge) messages in the competing consumer model. + +If processing is successful, you must send an Ack (acknowledge) to the server to let it know that the message has been handled. If processing fails for some reason, then you can Nack (not acknowledge) the message and tell the server how to handle the failure. + +@[code{subscribe-to-persistent-subscription-with-manual-acks}](@grpc:persistent_subscriptions/PersistentSubscriptions.java;) + +The _Nack event action_ describes what the server should do with the message: + +| Action | Description | +|:----------|:---------------------------------------------------------------------| +| `Unknown` | The client does not know what action to take. Let the server decide. | +| `Park` | Park the message and do not resend. Put it on poison queue. | +| `Retry` | Explicitly retry the message. | +| `Skip` | Skip this message do not resend and do not put in poison queue. | + +## Consumer strategies + +When creating a persistent subscription, you can choose between a number of consumer strategies. + +### RoundRobin (default) + +Distributes events to all clients evenly. If the client `bufferSize` is reached, the client won't receive more events until it acknowledges or not acknowledges events in its buffer. + +This strategy provides equal load balancing between all consumers in the group. + +### DispatchToSingle + +Distributes events to a single client until the `bufferSize` is reached. After that, the next client is selected in a round-robin style, and the process repeats. + +This option can be seen as a fall-back scenario for high availability, when a single consumer processes all the events until it reaches its maximum capacity. When that happens, another consumer takes the load to free up the main consumer resources. + +### Pinned + +For use with an indexing projection such as the system `$by_category` projection. + +KurrentDB inspects the event for its source stream id, hashing the id to one of 1024 buckets assigned to individual clients. When a client disconnects, its buckets are assigned to other clients. When a client connects, it is assigned some existing buckets. This naively attempts to maintain a balanced workload. + +The main aim of this strategy is to decrease the likelihood of concurrency and ordering issues while maintaining load balancing. This is **not a guarantee**, and you should handle the usual ordering and concurrency issues. + +## Updating a subscription group + +You can edit the settings of an existing subscription group while it is running, you don't need to delete and recreate it to change settings. When you update the subscription group, it resets itself internally, dropping the connections and having them reconnect. You must have admin permissions to update a persistent subscription group. + +@[code{update-persistent-subscription}](@grpc:persistent_subscriptions/PersistentSubscriptions.java;) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to update. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +## Persistent subscription settings + +Both the `Create` and `Update` methods take some settings for configuring the persistent subscription. + +The following table shows the configuration options you can set on a persistent subscription. + +| Option | Description | Default | +|:------------------------|:----------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------| +| `ResolveLinkTos` | Whether the subscription should resolve link events to their linked events. | `false` | +| `StartFrom` | The exclusive position in the stream or transaction file the subscription should start from. | `null` (start from the end of the stream) | +| `ExtraStatistics` | Whether to track latency statistics on this subscription. | `false` | +| `MessageTimeout` | The amount of time after which to consider a message as timed out and retried. | `30` (seconds) | +| `MaxRetryCount` | The maximum number of retries (due to timeout) before a message is considered to be parked. | `10` | +| `LiveBufferSize` | The size of the buffer (in-memory) listening to live messages as they happen before paging occurs. | `500` | +| `ReadBatchSize` | The number of events read at a time when paging through history. | `20` | +| `HistoryBufferSize` | The number of events to cache when paging through history. | `500` | +| `CheckPointAfter` | The amount of time to try to checkpoint after. | `2` seconds | +| `MinCheckPointCount` | The minimum number of messages to process before a checkpoint may be written. | `10` | +| `MaxCheckPointCount` | The maximum number of messages not checkpointed before forcing a checkpoint. | `1000` | +| `MaxSubscriberCount` | The maximum number of subscribers allowed. | `0` (unbounded) | +| `NamedConsumerStrategy` | The strategy to use for distributing events to client consumers. See the [consumer strategies](#consumer-strategies) in this doc. | `RoundRobin` | + +## Deleting a subscription group + +Remove a subscription group with the delete operation. Like the creation of groups, you rarely do this in your runtime code and is undertaken by an administrator running a script. + +@[code{delete-persistent-subscription}](@grpc:persistent_subscriptions/PersistentSubscriptions.java;) + +| Parameter | Description | +|:--------------|:-----------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to delete. | +| `credentials` | The user credentials to use for this operation | diff --git a/docs/clients/grpc/java/projections.md b/docs/clients/grpc/java/projections.md new file mode 100644 index 00000000..b818b34c --- /dev/null +++ b/docs/clients/grpc/java/projections.md @@ -0,0 +1,179 @@ +--- +order: 6 +title: Projections +head: + - - title + - {} + - Projections | Java | Clients | Kurrent Docs +--- + +# Projection management + +The various gRPC client APIs include dedicated clients that allow you to manage projections. + +For a detailed explanation of projections, see the [server documentation](@server/features/projections/README.md). + +You can find the full sample code from this documentation page in the respective [clients repositories](https://github.com/kurrent-io/?q=client). + +## Creating a client + +Projection management operations are exposed through a dedicated client. + +@[code{createClient}](@grpc:projection_management/ProjectionManagement.java) + +## Create a projection + +Creates a projection that runs until the last event in the store, and then continues processing new events as they are appended to the store. The query parameter contains the JavaScript you want created as a projection. +Projections have explicit names, and you can enable or disable them via this name. + +@[code{CreateContinuous}](@grpc:projection_management/ProjectionManagement.java) + +Trying to create projections with the same name will result in an error: + +@[code{CreateContinuous_Conflict}](@grpc:projection_management/ProjectionManagement.java) + +## Restart the subsystem + +It is possible to restart the entire projection subsystem using the projections management client API. The user must be in the `$ops` or `$admin` group to perform this operation. + +@[code{RestartSubSystem}](@grpc:projection_management/ProjectionManagement.java) + +## Enable a projection + +Enables an existing projection by name. +Once enabled, the projection will start to process events even after restarting the server or the projection subsystem. +You must have access to a projection to enable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Enable}](@grpc:projection_management/ProjectionManagement.java) + +You can only enable an existing projection. When you try to enable a non-existing projection, you'll get an error: + +@[code{EnableNotFound}](@grpc:projection_management/ProjectionManagement.java) + +## Disable a projection + +Disables a projection, this will save the projection checkpoint. +Once disabled, the projection will not process events even after restarting the server or the projection subsystem. +You must have access to a projection to disable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Disable}](@grpc:projection_management/ProjectionManagement.java) + +You can only disable an existing projection. When you try to disable a non-existing projection, you'll get an error: + +@[code{DisableNotFound}](@grpc:projection_management/ProjectionManagement.java) + +## Delete a projection + +Deletes an existing projection. You must disable the projection before deleting it, running projections cannot be deleted. Deleting a projection includes deleting the checkpoint and the emitted streams. + +@[code{Delete}](@grpc:projection_management/ProjectionManagement.java) + +You can only delete an existing projection. When you try to delete a non-existing projection, you'll get an error: + +@[code{DeleteNotFound}](@grpc:projection_management/ProjectionManagement.java) + +## Abort a projection + +Aborts a projection, this will not save the projection's checkpoint. + +@[code{Abort}](@grpc:projection_management/ProjectionManagement.java) + +You can only abort an existing projection. When you try to abort a non-existing projection, you'll get an error: + +@[code{Abort_NotFound}](@grpc:projection_management/ProjectionManagement.java) + +## Reset a projection + +Resets a projection, which causes deleting the projection checkpoint. This will force the projection to start afresh and re-emit events. Streams that are written to from the projection will also be soft-deleted. + +@[code{Reset}](@grpc:projection_management/ProjectionManagement.java) + +Resetting a projection that does not exist will result in an error. + +@[code{Reset_NotFound}](@grpc:projection_management/ProjectionManagement.java) + +## Update a projection + +Updates a projection with a given name. The query parameter contains the new JavaScript. Updating system projections using this operation is not supported at the moment. + +@[code{Update}](@grpc:projection_management/ProjectionManagement.java) + +You can only update an existing projection. When you try to update a non-existing projection, you'll get an error: + +@[code{Update_NotFound}](@grpc:projection_management/ProjectionManagement.java) + +## List all projections + +Returns a list of all projections, user defined & system projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListAll}](@grpc:projection_management/ProjectionManagement.java) + +## List continuous projections + +Returns a list of all continuous projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListContinuous}](@grpc:projection_management/ProjectionManagement.java) + +## Get status + +Gets the status of a named projection. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{GetStatus}](@grpc:projection_management/ProjectionManagement.java) + +## Get state + +Retrieves the state of a projection. + +@[code{GetState}](@grpc:projection_management/ProjectionManagement.java) + +## Get result + +Retrieves the result of the named projection and partition. + +@[code{GetResult}](@grpc:projection_management/ProjectionManagement.java) + +## Projection Details + +[List all](#list-all-projections), [list continuous](#list-continuous-projections) and [get status](#get-status) all return the details and statistics of projections + +| Field | Description | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Name`, `EffectiveName` | The name of the projection | +| `Status` | A human readable string of the current statuses of the projection (see below) | +| `StateReason` | A human readable string explaining the reason of the current projection state | +| `CheckpointStatus` | A human readable string explaining the current operation performed on the checkpoint : `requested`, `writing` | +| `Mode` | `Continuous`, `OneTime` , `Transient` | +| `CoreProcessingTime` | The total time, in ms, the projection took to handle events since the last restart | +| `Progress` | The progress, in %, indicates how far this projection has processed event, in case of a restart this could be -1% or some number. It will be updated as soon as a new event is appended and processed | +| `WritesInProgress` | The number of write requests to emitted streams currently in progress, these writes can be batches of events | +| `ReadsInProgress` | The number of read requests currently in progress | +| `PartitionsCached` | The number of cached projection partitions | +| `Position` | The Position of the last processed event | +| `LastCheckpoint` | The Position of the last checkpoint of this projection | +| `EventsProcessedAfterRestart` | The number of events processed since the last restart of this projection | +| `BufferedEvents` | The number of events in the projection read buffer | +| `WritePendingEventsBeforeCheckpoint` | The number of events waiting to be appended to emitted streams before the pending checkpoint can be written | +| `WritePendingEventsAfterCheckpoint` | The number of events to be appended to emitted streams since the last checkpoint | +| `Version` | This is used internally, the version is increased when the projection is edited or reset | +| `Epoch` | This is used internally, the epoch is increased when the projection is reset | + +The `Status` string is a combination of the following values. +The first 3 are the most common one, as the other one are transient values while the projection is initialised or stopped + +| Value | Description | +|--------------------|-------------------------------------------------------------------------------------------------------------------------| +| Running | The projection is running and processing events | +| Stopped | The projection is stopped and is no longer processing new events | +| Faulted | An error occurred in the projection, `StateReason` will give the fault details, the projection is not processing events | +| Initial | This is the initial state, before the projection is fully initialised | +| Suspended | The projection is suspended and will not process events, this happens while stopping the projection | +| LoadStateRequested | The state of the projection is being retrieved, this happens while the projection is starting | +| StateLoaded | The state of the projection is loaded, this happens while the projection is starting | +| Subscribed | The projection has successfully subscribed to its readers, this happens while the projection is starting | +| FaultedStopping | This happens before the projection is stopped due to an error in the projection | +| Stopping | The projection is being stopped | +| CompletingPhase | This happens while the projection is stopping | +| PhaseCompleted | This happens while the projection is stopping | diff --git a/docs/clients/grpc/java/reading-events.md b/docs/clients/grpc/java/reading-events.md new file mode 100644 index 00000000..40795d8a --- /dev/null +++ b/docs/clients/grpc/java/reading-events.md @@ -0,0 +1,135 @@ +--- +order: 3 +head: + - - title + - {} + - Reading Events | Java | Clients | Kurrent Docs +--- + +# Reading Events + +There are two options for reading events from KurrentDB. You can either: + 1. Read from an individual stream, or + 2. Read from the `$all` stream, which will return all events in the store. + +Each event in KurrentDB belongs to an individual stream. When reading events, pick the name of the stream from which you want to read the events and choose whether to read the stream forwards or backwards. + +All events have a `StreamPosition` and a `Position`. `StreamPosition` is a *big int* (unsigned 64-bit integer) and represents the place of the event in the stream. `Position` is the event's logical position, and is represented by `CommitPosition` and a `PreparePosition`. Note that when reading events you will supply a different "position" depending on whether you are reading from an individual stream or the `$all` stream. + +:::tip +Check [connecting to KurrentDB instructions](getting-started.md#required-packages) to learn how to configure and use the client SDK. +::: + +## Reading from a stream + +You can read all the events or a sample of the events from individual streams, starting from any position in the stream, and can read either forward or backward. It is only possible to read events from a single stream at a time. You can read events from the global event log, which spans across streams. Learn more about this process in the [Read from `$all`](#reading-from-the-all-stream) section below. + +### Reading forwards + +The simplest way to read a stream forwards is to supply a stream name, read direction, and revision from which to start. The revision can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-stream}](@grpc:reading_events/ReadingEvents.java) + +This will return an enumerable that can be iterated on: + +@[code{iterate-stream}](@grpc:reading_events/ReadingEvents.java) + +There are a number of additional arguments you can provide when reading a stream, listed below. + +#### maxCount + +Passing in the max count will limit the number of events returned. + +#### resolveLinkTos + +When using projections to create new events, you can set whether the generated events are pointers to existing events. Setting this value to `true` tells KurrentDB to return the event as well as the event linking to it. + +#### configureOperationOptions + +You can use the `configureOperationOptions` argument to provide a function that will customise settings for each operation. + +#### userCredentials + +The `userCredentials` argument is optional. It is used to override the default credentials specified when creating the client instance. + +@[code{overriding-user-credentials}](@grpc:reading_events/ReadingEvents.java) + +### Reading from a revision + +Instead of providing the `StreamPosition` you can also provide a specific stream revision as a *big int* (unsigned 64-bit integer). + +@[code{read-from-stream-position}](@grpc:reading_events/ReadingEvents.java) + +### Reading backwards + +In addition to reading a stream forwards, streams can be read backwards. To read all the events backwards, set the *stream position* to the end: + +@[code{reading-backwards}](@grpc:reading_events/ReadingEvents.java) + +:::tip +Read one event backwards to find the last position in the stream. +::: + +### Checking if the stream exists + +Reading a stream returns a `ReadStreamResult`, which contains a property `ReadState`. This property can have the value `StreamNotFound` or `Ok`. + +It is important to check the value of this field before attempting to iterate an empty stream, as it will throw an exception. + +For example: + +@[code{checking-for-stream-presence}](@grpc:reading_events/ReadingEvents.java) + +## Reading from the $all stream + +Reading from the `$all` stream is similar to reading from an individual stream, but please note there are differences. One significant difference is the need to provide admin user account credentials to read from the `$all` stream. Additionally, you need to provide a transaction log position instead of a stream revision when reading from the `$all` stream. + +### Reading forwards + +The simplest way to read the `$all` stream forwards is to supply a read direction and the transaction log position from which you want to start. The transaction log postion can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-all-stream}](@grpc:reading_events/ReadingEvents.java) + +You can iterate asynchronously through the result: + +@[code{read-from-all-stream-iterate}](@grpc:reading_events/ReadingEvents.java) + +There are a number of additional arguments you can provide when reading the `$all` stream. + +#### maxCount + +Passing in the max count allows you to limit the number of events that returned. + +#### resolveLinkTos + +When using projections to create new events you can set whether the generated events are pointers to existing events. Setting this value to true will tell KurrentDB to return the event as well as the event linking to it. + +@[code{read-from-all-stream-resolving-link-Tos}](@grpc:reading_events/ReadingEvents.java) + +#### configureOperationOptions + +This argument is generic setting class for all operations that can be set on all operations executed against KurrentDB. + +#### userCredentials +The credentials used to read the data can be used by the subscription as follows. This will override the default credentials set on the connection. + +@[code{read-all-overriding-user-credentials}](@grpc:reading_events/ReadingEvents.java) + +### Reading backwards + +In addition to reading the `$all` stream forwards, it can be read backwards. To read all the events backwards, set the *position* to the end: + +@[code{read-from-all-stream-backwards}](@grpc:reading_events/ReadingEvents.java) + +:::tip +Read one event backwards to find the last position in the `$all` stream. +::: + +### Handling system events + +KurrentDB will also return system events when reading from the `$all` stream. In most cases you can ignore these events. + +All system events begin with `$` or `$$` and can be easily ignored by checking the `EventType` property. + +@[code{ignore-system-events}](@grpc:reading_events/ReadingEvents.java) + diff --git a/docs/clients/grpc/java/release-notes.md b/docs/clients/grpc/java/release-notes.md new file mode 100644 index 00000000..4ac84cf4 --- /dev/null +++ b/docs/clients/grpc/java/release-notes.md @@ -0,0 +1,11 @@ +--- +order: 10 +head: + - - title + - {} + - Release Notes | Java | Clients | Kurrent Docs +--- + +# Release Notes + +To stay up to date with the latest changes and improvements, including release history, changelogs, upgrade instructions, and migration guides, see the [KurrentDB Java client release notes](https://github.com/kurrent-io/KurrentDB-Client-Java/releases). \ No newline at end of file diff --git a/docs/clients/grpc/java/subscriptions.md b/docs/clients/grpc/java/subscriptions.md new file mode 100644 index 00000000..b7202292 --- /dev/null +++ b/docs/clients/grpc/java/subscriptions.md @@ -0,0 +1,234 @@ +--- +order: 4 +head: + - - title + - {} + - Catch-up Subscriptions | Java | Clients | Kurrent Docs +--- + +# Catch-up Subscriptions + +Subscriptions allow you to subscribe to a stream and receive notifications about new events added to the stream. + +You provide an event handler and an optional starting point to the subscription. The handler is called for each event from the starting point onward. + +If events already exist, the handler will be called for each event one by one until it reaches the end of the stream. The server will then notify the handler whenever a new event appears. + +:::tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Subscribing from the start + +If you need to process all the events in the store, including historical events, you'll need to subscribe from the beginning. You can either subscribe to receive events from a single stream or subscribe to `$all` if you need to process all events in the database. + +### Subscribing to a stream + +The simplest stream subscription looks like the following : + +@[code{subscribe-to-stream}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +The provided handler will be called for every event in the stream. + +When you subscribe to a stream with link events, for example the `$ce` category stream, you need to set `resolveLinkTos` to `true`. Read more about it [below](#resolving-link-to-s). + +### Subscribing to `$all` + +Subscribing to `$all` is similar to subscribing to a single stream. The handler will be called for every event appended after the starting position. + +@[code{subscribe-to-all}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +## Subscribing from a specific position + +The previous examples subscribed to the stream from the beginning. That subscription invoked the handler for every event in the stream before waiting for new events. + +Both stream and $all subscriptions accept a starting position if you want to read from a specific point onward. If events already exist at the position you subscribe to, they will be read on the server side and sent to the subscription. + +Once caught up, the server will push any new events received on the streams to the client. There is no difference between catching up and live on the client side. + +::: warning +The positions provided to the subscriptions are exclusive. You will only receive the next event after the subscribed position. +::: + +### Subscribing to a stream + +To subscribe to a stream from a specific position, you must provide a *stream position*. This can be `Start`, `End` or a *big int* (unsigned 64 bit integer) position. + +The following subscribes to the stream `some-stream` at position `20`, this means that events `21` and onward will be handled: + +@[code{subscribe-to-stream-from-position}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +### Subscribing to $all + +Subscribing to the `$all` stream is similar to subscribing to a regular stream. The difference is how to specify the starting position. For the `$all` stream, provide a `Position` structure that consists of two big integers: the prepare and commit positions. Use `Start`, `End`, or create a `Position` from specific commit and prepare values. + +The corresponding `$all` subscription will subscribe from the event after the one at commit position `1056` and prepare position `1056`. + +Please note that this position will need to be a legitimate position in `$all`. + +@[code{subscribe-to-all-from-position}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +## Subscribing to a stream for live updates + +You can subscribe to a stream to get live updates by subscribing to the end of the stream: + +@[code{subscribe-to-stream-live}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +And the same works with `$all` : + +@[code{subscribe-to-all-live}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +This will not read through the history of the stream but will notify the handler when a new event appears in the respective stream. + +Keep in mind that when you subscribe to a stream from a specific position, as described [above](#subscribing-from-a-specific-position), you will also get live updates after your subscription catches up (processes all the historical events). + +## Resolving link-to events + +Link-to events point to events in other streams in KurrentDB. These are generally created by projections such as the `$by_event_type` projection which links events of the same event type into the same stream. This makes it easier to look up all events of a specific type. + +::: tip +[Filtered subscriptions](subscriptions.md#server-side-filtering) make it easier and faster to subscribe to all events of a specific type or matching a prefix. +::: + +When reading a stream you can specify whether to resolve link-to's. By default, link-to events are not resolved. You can change this behaviour by setting the `resolveLinkTos` parameter to `true`: + +@[code{subscribe-to-stream-resolving-linktos}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +## Dropped subscriptions + +When a subscription stops or experiences an error, it will be dropped. The subscription provides a `subscriptionDropped` callback, which will get called when the subscription breaks. + +The `subscriptionDropped` callback allows you to inspect the reason why the subscription dropped, as well as any exceptions that occurred. + +The possible reasons for a subscription to drop are: + +| Reason | Why it might happen | +|:------------------|:---------------------------------------------------------------------------------------------------------------------| +| `Disposed` | The client canceled or disposed of the subscription. | +| `SubscriberError` | An error occurred while handling an event in the subscription handler. | +| `ServerError` | An error occurred on the server, and the server closed the subscription. Check the server logs for more information. | + +Bear in mind that a subscription can also drop because it is slow. The server tried to push all the live events to the subscription when it is in the live processing mode. If the subscription gets the reading buffer overflow and won't be able to acknowledge the buffer, it will break. + +### Handling subscription drops + +An application, which hosts the subscription, can go offline for some time for different reasons. It could be a crash, infrastructure failure, or a new version deployment. As you rarely would want to reprocess all the events again, you'd need to store the current position of the subscription somewhere, and then use it to restore the subscription from the point where it dropped off: + +@[code{subscribe-to-stream-subscription-dropped}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +When subscribed to `$all` you want to keep the event's position in the `$all` stream. As mentioned previously, the `$all` stream position consists of two big integers (prepare and commit positions), not one: + +@[code{subscribe-to-all-subscription-dropped}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +## User credentials + +The user creating a subscription must have read access to the stream it's subscribing to, and only admin users may subscribe to `$all` or create filtered subscriptions. + +The code below shows how you can provide user credentials for a subscription. When you specify subscription credentials explicitly, it will override the default credentials set for the client. If you don't specify any credentials, the client will use the credentials specified for the client, if you specified those. + +@[code{overriding-user-credentials}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +## Server-side filtering + +KurrentDB allows you to filter the events whilst subscribing to the `$all` stream to only receive the events you care about. + +You can filter by event type or stream name using a regular expression or a prefix. Server-side filtering is currently only available on the `$all` stream. + +::: tip +Server-side filtering was introduced as a simpler alternative to projections. You should consider filtering before creating a projection to include the events you care about. +::: + +A simple stream prefix filter looks like this: + +@[code{stream-prefix-filtered-subscription}](@grpc:subscribing_to_stream/SubscribingToStream.java) + +The filtering API is described more in-depth in the [filtering section](subscriptions.md#server-side-filtering). + +### Filtering out system events + +There are events in KurrentDB called system events. These are prefixed with a `$` and under most circumstances you won't care about these. They can be filtered out by passing in a `SubscriptionFilterOptions` when subscribing to the `$all` stream. + +@[code{exclude-system}](@grpc:server_side_filtering/ServerSideFiltering.java) + +::: tip +`$stats` events are no longer stored in KurrentDB by default so there won't be as many `$` events as before. +::: + +### Filtering by event type + +If you only want to subscribe to events of a given type, there are two options. You can either use a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `EventTypeFilter.Prefix`. + +@[code{event-type-prefix}](@grpc:server_side_filtering/ServerSideFiltering.java) + +This will only subscribe to events with a type that begin with `customer-`. + +#### Filtering by regular expression + +It might be advantageous to provide a regular expression when you want to subscribe to multiple event types. + +@[code{event-type-regex}](@grpc:server_side_filtering/ServerSideFiltering.java) + +This will subscribe to any event that begins with `user` or `company`. + +### Filtering by stream name + +To subscribe to a stream by name, choose either a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `StreamFilter.Prefix`. + +@[code{stream-prefix}](@grpc:server_side_filtering/ServerSideFiltering.java) + +This will only subscribe to all streams with a name that begins with `user-`. + +#### Filtering by regular expression + +To subscribe to multiple streams, use a regular expression. + +@[code{stream-regex}](@grpc:server_side_filtering/ServerSideFiltering.java) + +This will subscribe to any stream with a name that begins with `account` or `savings`. + +## Checkpointing + +When a catch-up subscription is used to process an `$all` stream containing many events, the last thing you want is for your application to crash midway, forcing you to restart from the beginning. + +### What is a checkpoint? + +A checkpoint is the position of an event in the `$all` stream to which your application has processed. By saving this position to a persistent store (e.g., a database), it allows your catch-up subscription to: +- Recover from crashes by reading the checkpoint and resuming from that position +- Avoid reprocessing all events from the start + +To create a checkpoint, store the event's commit or prepare position. + +::: warning +If your database contains events created by the legacy TCP client using the [transaction feature](https://docs.kurrent.io/clients/tcp/dotnet/21.2/appending.html#transactions), you should store both the commit and prepare positions together as your checkpoint. +::: + +### Updating checkpoints at regular intervals +The client SDK provides a way to notify your application after processing a configurable number of events. This allows you to periodically save a checkpoint at regular intervals. + +@[code{checkpoint}](@grpc:server_side_filtering/ServerSideFiltering.java) + +By default, the checkpoint notification is sent after every 32 non-system events processed from $all. + +### Configuring the checkpoint interval +You can adjust the checkpoint interval to change how often the client is notified. + +@[code{checkpoint-with-interval}](@grpc:server_side_filtering/ServerSideFiltering.java) + +By configuring this parameter, you can balance between reducing checkpoint overhead and ensuring quick recovery in case of a failure. + +::: info +The checkpoint interval parameter configures the database to notify the client after `n` * 32 number of events where `n` is defined by the parameter. + +For example: +- If `n` = 1, a checkpoint notification is sent every 32 events. +- If `n` = 2, the notification is sent every 64 events. +- If `n` = 3, it is sent every 96 events, and so on. +::: diff --git a/docs/clients/grpc/nodejs/README.md b/docs/clients/grpc/nodejs/README.md new file mode 100644 index 00000000..e2b0d3fd --- /dev/null +++ b/docs/clients/grpc/nodejs/README.md @@ -0,0 +1,9 @@ +--- +index: false +--- + +# Node.js + +Learn how to use the KurrentDB NodeJS client library to interact with the database. + + \ No newline at end of file diff --git a/docs/clients/grpc/nodejs/appending-events.md b/docs/clients/grpc/nodejs/appending-events.md new file mode 100644 index 00000000..14057b53 --- /dev/null +++ b/docs/clients/grpc/nodejs/appending-events.md @@ -0,0 +1,87 @@ +--- +order: 2 +head: + - - title + - {} + - Appending Events | Node.js | Clients | Kurrent Docs +--- + +# Appending Events + +When you start working with KurrentDB, it is empty. The first meaningful operation is to add one or more events to the database using one of the available client SDKs. + +::: tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Append your first event + +The simplest way to append an event to KurrentDB is to create an `EventData` object and call `AppendToStream` method. + +@[code{append-to-stream}](@grpc:appending-events.js;appending-events.ts) + +`AppendToStream` takes a collection of `EventData`, which allows you to save more than one event in a single batch. + +Outside the example above, other options exist for dealing with different scenarios. + +::: tip +If you are new to Event Sourcing, please study the [Handling concurrency](#handling-concurrency) section below. +::: + +## Working with EventData + +Events appended to KurrentDB must be wrapped in an `EventData` object. This allows you to specify the event's content, the type of event, and whether it's in JSON format. In its simplest form, you need three arguments: **eventId**, **type**, and **data**. + +### eventId + +This takes the format of a `Uuid` and is used to uniquely identify the event you are trying to append. If two events with the same `Uuid` are appended to the same stream in quick succession, KurrentDB will only append one of the events to the stream. + +For example, the following code will only append a single event: + +@[code{append-duplicate-event}](@grpc:appending-events.js;appending-events.ts) + +![Duplicate Event](../images/duplicate-event.png) + +### type + +Each event should be supplied with an event type. This unique string is used to identify the type of event you are saving. + +It is common to see the explicit event code type name used as the type as it makes serialising and de-serialising of the event easy. However, we recommend against this as it couples the storage to the type and will make it more difficult if you need to version the event at a later date. + +### data + +Representation of your event data. It is recommended that you store your events as JSON objects. This allows you to take advantage of all of KurrentDB's functionality, such as projections. That said, you can save events using whatever format suits your workflow. Eventually, the data will be stored as encoded bytes. + +### metadata + +Storing additional information alongside your event that is part of the event itself is standard practice. This can be correlation IDs, timestamps, access information, etc. KurrentDB allows you to store a separate byte array containing this information to keep it separate. + +### isJson + +Simple boolean field to tell KurrentDB if the event is stored as json, true by default. + +## Handling concurrency + +When appending events to a stream, you can supply a *stream state* or *stream revision*. Your client uses this to inform KurrentDB of the state or version you expect the stream to be in when appending an event. If the stream isn't in that state, an exception will be thrown. + +For example, if you try to append the same record twice, expecting both times that the stream doesn't exist, you will get an exception on the second: + +@[code{append-with-no-stream}](@grpc:appending-events.js;appending-events.ts) + +There are three available stream states: +- `Any` +- `NoStream` +- `StreamExists` + +This check can be used to implement optimistic concurrency. When retrieving a stream from KurrentDB, note the current version number. When you save it back, you can determine if somebody else has modified the record in the meantime. + +@[code{append-with-concurrency-check}](@grpc:appending-events.js;appending-events.ts) + + + +## User credentials + +You can provide user credentials to append the data as follows. This will override the default credentials set on the connection. + +@[code{overriding-user-credentials}](@grpc:appending-events.js;appending-events.ts) + diff --git a/docs/clients/grpc/nodejs/authentication.md b/docs/clients/grpc/nodejs/authentication.md new file mode 100644 index 00000000..7c520968 --- /dev/null +++ b/docs/clients/grpc/nodejs/authentication.md @@ -0,0 +1,60 @@ +--- +title: Authentication +order: 7 +head: + - - title + - {} + - Authentication | Node.js | Clients | Kurrent Docs +--- + +## Client x.509 certificate + +X.509 certificates are digital certificates that use the X.509 public key infrastructure (PKI) standard to verify the identity of clients and servers. They play a crucial role in establishing a secure connection by providing a way to authenticate identities and establish trust. + +### Prerequisites + +1. KurrentDB 25.0 or greater, or EventStoreDB 24.10. +2. A commercial license with the User Certificates entitlement. +3. A valid x.509 certificate, which can be created using version `1.3` or higher of the [gencert tool](https://github.com/kurrent-io/es-gencert-cli). +4. The server must run in secure mode. See [Security Options](@server/security/protocol-security.md) for more information. +5. [Enable User Certificates plugin on the server](@server/security/user-authentication.md#user-x509-certificates) + +#### Generate user certificates + +The following command uses the [gencert tool](https://github.com/kurrent-io/es-gencert-cli) to generate a user certificate for the user `admin` that will expire in 10 days: + +::: tabs#os +@tab bash +```bash +./es-gencert-cli create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +@tab PowerShell +```powershell +.\es-gencert-cli.exe create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +::: + +### Connect to KurrentDB using an x.509 certificate + +To connect to KurrentDB using an x.509 certificate, you need to provide the +certificate and the private key to the client. If both username/password and +certificate authentication data are supplied, the client prioritizes user +credentials for authentication. The client will throw an error if the +certificate and the key are not both provided. + +::: tip +Please note that currently, password-protected private key files are not supported. +::: + +The client supports the following parameters: + +| Parameter | Description | +|----------------|--------------------------------------------------------------------------------| +| `userCertFile` | The file containing the X.509 user certificate in PEM format. | +| `userKeyFile` | The file containing the user certificate’s matching private key in PEM format. | + +To authenticate, include these two parameters in your connection string or constructor when initializing the client. + +Check the samples for the following clients: + +@[code{client-with-user-certificates}](@grpc:user-certificates.ts) \ No newline at end of file diff --git a/docs/clients/grpc/nodejs/delete-stream.md b/docs/clients/grpc/nodejs/delete-stream.md new file mode 100644 index 00000000..b4f2fe2f --- /dev/null +++ b/docs/clients/grpc/nodejs/delete-stream.md @@ -0,0 +1,51 @@ +--- +order: 9 +head: + - - title + - {} + - Deleting Events | Node.js | Clients | Kurrent Docs +--- + +# Deleting Events + +In KurrentDB, you can delete events and streams either partially or completely. Settings like $maxAge and $maxCount help control how long events are kept or how many events are stored in a stream, but they won't delete the entire stream. +When you need to fully remove a stream, KurrentDB offers two options: Soft Delete and Hard Delete. + +## Soft delete + +Soft delete in KurrentDB allows you to mark a stream for deletion without completely removing it, so you can still add new events later. While you can do this through the UI, using code is often better for automating the process, +handling many streams at once, or including custom rules. Code is especially helpful for large-scale deletions or when you need to integrate soft deletes into other workflows. + +::: tabs#lang + +@tab JavaScript +```javascript +await client.deleteStream(streamName); +``` +@tab TypeScript +```typescript +await client.deleteStream(streamName); +``` +::: + +::: note +Clicking the delete button in the UI performs a soft delete, +setting the TruncateBefore value to remove all events up to a certain point. +While this marks the events for deletion, actual removal occurs during the next scavenging process. +The stream can still be reopened by appending new events. +::: + +## Hard delete + +Hard delete in KurrentDB permanently removes a stream and its events. While you can use the HTTP API, code is often better for automating the process, managing multiple streams, and ensuring precise control. Code is especially useful when you need to integrate hard delete into larger workflows or apply specific conditions. Note that when a stream is hard deleted, you cannot reuse the stream name, it will raise an exception if you try to append to it again. + +::: tabs#lang +@tab JavaScript +```javascript +await client.tombstoneStream(streamName); +``` +@tab TypeScript +```typescript +await client.tombstoneStream(streamName); +``` +::: \ No newline at end of file diff --git a/docs/clients/grpc/nodejs/getting-started.md b/docs/clients/grpc/nodejs/getting-started.md new file mode 100644 index 00000000..1bcbb5bd --- /dev/null +++ b/docs/clients/grpc/nodejs/getting-started.md @@ -0,0 +1,119 @@ +--- +order: 1 +head: + - - title + - {} + - Getting Started | Node.js | Clients | Kurrent Docs +--- + +# Getting Started + +Get started by connecting your application to KurrentDB. + +## Connecting to KurrentDB + +To connect your application to KurrentDB, instantiate and configure the client. + +::: tip Insecure clusters +All our GRPC clients are secure by default and must be configured to connect to an insecure server via [a connection string](#connection-string) or the client's configuration. +::: + +### Required packages + +Install the `@kurrent/kurrentdb-client` package using NPM, Yarn or PNPM: + +::: tabs +@tab npm +```bash +npm install --save @kurrent/kurrentdb-client +``` +@tab yarn +```bash +yarn add @kurrent/kurrentdb-client +``` +@tab pnpm +```bash +pnpm add @kurrent/kurrentdb-client +``` +::: + +TypeScript declarations are included in the package. + +### Connection string + +Each SDK has its own way of configuring the client, but the connection string can always be used. +The KurrentDB connection string supports two schemas: `kurrentdb://` for connecting to a single-node server, and `kurrentdb+discover://` for connecting to a multi-node cluster. The difference between the two schemas is that when using `kurrentdb://`, the client will connect directly to the node; with `kurrentdb+discover://` schema the client will use the gossip protocol to retrieve the cluster information and choose the right node to connect to. +Since version 22.10, ESDB supports gossip on single-node deployments, so `kurrentdb+discover://` schema can be used for connecting to any topology. + +The connection string has the following format: + +``` +kurrentdb+discover://admin:changeit@cluster.dns.name:2113 +``` + +There, `cluster.dns.name` is the name of a DNS `A` record that points to all the cluster nodes. Alternatively, you can list cluster nodes separated by comma instead of the cluster DNS name: + +``` +kurrentdb+discover://admin:changeit@node1.dns.name:2113,node2.dns.name:2113,node3.dns.name:2113 +``` + +There are a number of query parameters that can be used in the connection string to instruct the cluster how and where the connection should be established. All query parameters are optional. + +| Parameter | Accepted values | Default | Description | +|-----------------------|---------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------| +| `tls` | `true`, `false` | `true` | Use secure connection, set to `false` when connecting to a non-secure server or cluster. | +| `connectionName` | Any string | None | Connection name | +| `maxDiscoverAttempts` | Number | `10` | Number of attempts to discover the cluster. | +| `discoveryInterval` | Number | `100` | Cluster discovery polling interval in milliseconds. | +| `gossipTimeout` | Number | `5` | Gossip timeout in seconds, when the gossip call times out, it will be retried. | +| `nodePreference` | `leader`, `follower`, `random`, `readOnlyReplica` | `leader` | Preferred node role. When creating a client for write operations, always use `leader`. | +| `tlsVerifyCert` | `true`, `false` | `true` | In secure mode, set to `true` when using an untrusted connection to the node if you don't have the CA file available. Don't use in production. | +| `tlsCaFile` | String, file path | None | Path to the CA file when connecting to a secure cluster with a certificate that's not signed by a trusted CA. | +| `defaultDeadline` | Number | None | Default timeout for client operations, in milliseconds. Most clients allow overriding the deadline per operation. | +| `keepAliveInterval` | Number | `10` | Interval between keep-alive ping calls, in seconds. | +| `keepAliveTimeout` | Number | `10` | Keep-alive ping call timeout, in seconds. | +| `userCertFile` | String, file path | None | User certificate file for X.509 authentication. | +| `userKeyFile` | String, file path | None | Key file for the user certificate used for X.509 authentication. | + +When connecting to an insecure instance, specify `tls=false` parameter. For example, for a node running locally use `kurrentdb://localhost:2113?tls=false`. Note that usernames and passwords aren't provided there because insecure deployments don't support authentication and authorisation. + +### Creating a client + +First, create a client and get it connected to the database. + +@[code{createClient}](@grpc:get-started.js;get-started.ts) + +The client instance can be used as a singleton across the whole application. It doesn't need to open or close the connection. + +### Creating an event + +You can write anything to KurrentDB as events. The client needs a byte array as the event payload. Normally, you'd use a serialized object, and it's up to you to choose the serialization method. + +::: tip Server-side projections +User-defined server-side projections require events to be serialized in JSON format. + +We use JSON for serialization in the documentation examples. +::: + +The code snippet below creates an event object instance, serializes it, and adds it as a payload to the `EventData` structure, which the client can then write to the database. + +@[code{createEvent}](@grpc:get-started.js;get-started.ts) + +### Appending events + +Each event in the database has its own unique identifier (UUID). The database uses it to ensure idempotent writes, but it only works if you specify the stream revision when appending events to the stream. + +In the snippet below, we append the event to the stream `some-stream`. + +@[code{appendEvents}](@grpc:get-started.js;get-started.ts) + +Here we are appending events without checking if the stream exists or if the stream version matches the expected event version. See more advanced scenarios in [appending events documentation](./appending-events.md). + +### Reading events + +Finally, we can read events back from the `some-stream` stream. + +@[code{readStream}](@grpc:get-started.js;get-started.ts) + +When you read events from the stream, you get a collection of `ResolvedEvent` structures. The event payload is returned as a byte array and needs to be deserialized. See more advanced scenarios in [reading events documentation](./reading-events.md). + diff --git a/docs/clients/grpc/nodejs/observability.md b/docs/clients/grpc/nodejs/observability.md new file mode 100644 index 00000000..2ee0bc5c --- /dev/null +++ b/docs/clients/grpc/nodejs/observability.md @@ -0,0 +1,123 @@ +--- +order: 8 +head: + - - title + - {} + - Observability | Node.js | Clients | Kurrent Docs +--- + +# Observability + +The KurrentDB gRPC clients are designed with observability in mind, offering +support for OpenTelemetry. This integration provides a set of distributed +traces, enabling developers to gain deeper insights into their system. + +::: warning +Currently, OpenTelemetry observability support is not available for all +clients. Moreover, instrumentation is only provided for append and +subscribe operations, which includes both 'Catchup' and 'Persistent' modes. +::: + +You can click on the links below to view the full code: + +Click [here](https://github.com/kurrent-io/KurrentDB-Client-NodeJS/blob/master/packages/test/src/samples/opentelemetry.ts) to view the full sample code for observability. + +## Required packages + +OpenTelemetry support is included to the KurrentDB Java client by default. For other clients, you need to install the dedicated package to enable OpenTelemetry support. + +### NodeJS + +Install the `@kurrent/opentelemetry` package using your package manager of choice. TypeScript type declarations are included in the package. + +::: tabs +@tab npm +```bash +npm install --save @kurrent/opentelemetry +``` +@tab yarn +```bash +yarn add @kurrent/opentelemetry +``` +@tab pnpm +```bash +pnpm add @kurrent/opentelemetry +``` +::: + +## Instrumentation + +To emit trace data, you must first install and use the dedicated package, as instructed in the +[Required Packages](./observability.md#required-packages) section, if provided. This package +includes the necessary instrumentation that needs to be registered with the client. + +@[code{register-instrumentation}](@grpc:opentelemetry.js;opentelemetry.ts) + +## Traces + +Traces provide a clear picture of how operations are carried out in a +distributed system, making it easier to maintain and enhance the system over +time. Traces from the clients can be exported to any compatible collector that +supports the OpenTelemetry protocol (OTLP). + +In order for the client to emit traces, you need to need to enable +instrumentation as described in +[Instrumentation](./observability.md#instrumentation). + +For more guidance on setting up and utilizing tracing, refer to the +[OpenTelemetry](https://opentelemetry.io/) documentation. + +An example of a trace is shown below: + +```bash +Activity.TraceId: 8da04787239dbb85c1f9c6fba1b1f0d6 +Activity.SpanId: 4352ec4a66a20b95 +Activity.TraceFlags: Recorded +Activity.ActivitySourceName: kurrentdb +Activity.DisplayName: streams.append +Activity.Kind: Client +Activity.StartTime: 2024-05-29T06:50:41.2519016Z +Activity.Duration: 00:00:00.1500707 +Activity.Tags: + db.kurrentdb.stream: d7caa2a5-1e19-4108-9541-58d5fba02d42 + server.address: localhost + server.port: 2113 + db.system: kurrentdb + db.operation: streams.append +StatusCode: Ok +Resource associated with Activity: + service.name: sample + service.instance.id: 7316ef20-c354-4e64-97da-c1b99c2c28b0 + telemetry.sdk.name: opentelemetry + telemetry.sdk.language: dotnet + telemetry.sdk.version: 1.8.1 +``` + +In this case, the trace is for an append operation on a stream. The trace +includes the trace ID, span ID, trace flags, activity source name, display name, +kind, start time, duration, tags, status code, and resource associated with the +activity. + +::: note +The structure of the trace may vary depending on the client and the operation +being performed but will generally include the same information. +::: + +## Exporting traces + +You can set up various exporters to send traces to different destinations. +Additionally, you have the option to export these traces to a collector of your +choice, such as [Jaeger](https://www.jaegertracing.io/) or [Seq](https://datalust.co/seq). + +For instance, if you choose to use Jaeger as your backend of choice, you can +view your traces in the Jaeger UI, which provides a powerful interface for +querying and visualizing your trace data. + +The code snippets below demonstrate how to set up one or more exporters for each +client: + +@[code{setup-exporter}](@grpc:opentelemetry.js;opentelemetry.ts) + +For more details on configuring exporters for specific programming languages, +refer to the [OpenTelemetry](https://opentelemetry.io/docs/languages/) +documentation. diff --git a/docs/clients/grpc/nodejs/persistent-subscriptions.md b/docs/clients/grpc/nodejs/persistent-subscriptions.md new file mode 100644 index 00000000..e53e7863 --- /dev/null +++ b/docs/clients/grpc/nodejs/persistent-subscriptions.md @@ -0,0 +1,164 @@ +--- +order: 5 +head: + - - title + - {} + - Persistent Subscriptions | Node.js | Clients | Kurrent Docs +--- + +# Persistent Subscriptions + +Persistent subscriptions are similar to catch-up subscriptions, but there are two key differences: +- The subscription checkpoint is maintained by the server. It means that when your client reconnects to the persistent subscription, it will automatically resume from the last known position. +- It's possible to connect more than one event consumer to the same persistent subscription. In that case, the server will load-balance the consumers, depending on the defined strategy, and distribute the events to them. + +Because of those, persistent subscriptions are defined as subscription groups that are defined and maintained by the server. Consumer then connect to a particular subscription group, and the server starts sending event to the consumer. + +You can read more about persistent subscriptions in the [server documentation](@server/features/persistent-subscriptions.md). + +## Creating a subscription group + +The first step of dealing with a persistent subscription is to create a subscription group. You will receive an error if you attempt to create a subscription group multiple times. You must have admin permissions to create a persistent subscription group. + +### Subscribing to one stream + +The following sample shows how to create a subscription group for a persistent subscription where you want to receive events from a specific stream. It could be a normal stream, or a stream of links (like `$ce` category stream). + +@[code{create-persistent-subscription-to-stream}](@grpc:persistent-subscriptions.js;persistent-subscriptions.ts) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to create. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +### Subscribing to $all + +The ability to subscribe to `$all` was introduced in EventStoreDB **21.10**. Persistent subscriptions to `$all` also support [filtering](subscriptions.md#server-side-filtering). + +You can create a subscription group on $all much the same way you would create a subscription group on a stream: + +@[code{create-persistent-subscription-to-all}](@grpc:persistent-subscriptions.js;persistent-subscriptions.ts) + +## Connecting a consumer + +Once you have created a subscription group, clients can connect to it. A subscription in your application should only have the connection in your code, you should assume that the subscription already exists. + +The most important parameter to pass when connecting is the buffer size. This represents how many outstanding messages the server should allow this client. If this number is too small, your subscription will spend much of its time idle as it waits for an acknowledgment to come back from the client. If it's too big, you waste resources and can start causing time out messages depending on the speed of your processing. + +### Connecting to one stream + +The code below shows how to connect to an existing subscription group for a specific stream: + +@[code{subscribe-to-persistent-subscription-to-stream}](@grpc:persistent-subscriptions.js;persistent-subscriptions.ts) + +| Parameter | Description | +|:----------------------|:---------------------------------------------------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to subscribe to. | +| `eventAppeared` | The action to call when an event arrives over the subscription. | +| `subscriptionDropped` | The action to call if the subscription is dropped. | +| `credentials` | The user credentials to use for this operation. | +| `bufferSize` | The number of in-flight messages this client is allowed. **Default: 10** | +| `autoAck` | Whether to automatically acknowledge messages after eventAppeared returns. **Default: true** | + +::: warning +The `autoAck` parameter will be deprecated in the next client release. You'll need to explicitly [manage acknowledgements](#acknowledgements). +::: + +### Connecting to $all + +The code below shows how to connect to an existing subscription group for `$all`: + +@[code{subscribe-to-persistent-subscription-to-all}](@grpc:persistent-subscriptions.js;persistent-subscriptions.ts) + +The `SubscribeToAllAsync` method is identical to the `SubscribeToStreamAsync` method, except that you don't need to specify a stream name. + +## Acknowledgements + +Clients must acknowledge (or not acknowledge) messages in the competing consumer model. + +If processing is successful, you must send an Ack (acknowledge) to the server to let it know that the message has been handled. If processing fails for some reason, then you can Nack (not acknowledge) the message and tell the server how to handle the failure. + +@[code{subscribe-to-persistent-subscription-with-manual-acks}](@grpc:persistent-subscriptions.js;persistent-subscriptions.ts) + +The _Nack event action_ describes what the server should do with the message: + +| Action | Description | +|:----------|:---------------------------------------------------------------------| +| `Unknown` | The client does not know what action to take. Let the server decide. | +| `Park` | Park the message and do not resend. Put it on poison queue. | +| `Retry` | Explicitly retry the message. | +| `Skip` | Skip this message do not resend and do not put in poison queue. | + +## Consumer strategies + +When creating a persistent subscription, you can choose between a number of consumer strategies. + +### RoundRobin (default) + +Distributes events to all clients evenly. If the client `bufferSize` is reached, the client won't receive more events until it acknowledges or not acknowledges events in its buffer. + +This strategy provides equal load balancing between all consumers in the group. + +### DispatchToSingle + +Distributes events to a single client until the `bufferSize` is reached. After that, the next client is selected in a round-robin style, and the process repeats. + +This option can be seen as a fall-back scenario for high availability, when a single consumer processes all the events until it reaches its maximum capacity. When that happens, another consumer takes the load to free up the main consumer resources. + +### Pinned + +For use with an indexing projection such as the system `$by_category` projection. + +KurrentDB inspects the event for its source stream id, hashing the id to one of 1024 buckets assigned to individual clients. When a client disconnects, its buckets are assigned to other clients. When a client connects, it is assigned some existing buckets. This naively attempts to maintain a balanced workload. + +The main aim of this strategy is to decrease the likelihood of concurrency and ordering issues while maintaining load balancing. This is **not a guarantee**, and you should handle the usual ordering and concurrency issues. + +## Updating a subscription group + +You can edit the settings of an existing subscription group while it is running, you don't need to delete and recreate it to change settings. When you update the subscription group, it resets itself internally, dropping the connections and having them reconnect. You must have admin permissions to update a persistent subscription group. + +@[code{update-persistent-subscription}](@grpc:persistent-subscriptions.js;persistent-subscriptions.ts) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to update. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +## Persistent subscription settings + +Both the `Create` and `Update` methods take some settings for configuring the persistent subscription. + +The following table shows the configuration options you can set on a persistent subscription. + +| Option | Description | Default | +|:------------------------|:----------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------| +| `ResolveLinkTos` | Whether the subscription should resolve link events to their linked events. | `false` | +| `StartFrom` | The exclusive position in the stream or transaction file the subscription should start from. | `null` (start from the end of the stream) | +| `ExtraStatistics` | Whether to track latency statistics on this subscription. | `false` | +| `MessageTimeout` | The amount of time after which to consider a message as timed out and retried. | `30` (seconds) | +| `MaxRetryCount` | The maximum number of retries (due to timeout) before a message is considered to be parked. | `10` | +| `LiveBufferSize` | The size of the buffer (in-memory) listening to live messages as they happen before paging occurs. | `500` | +| `ReadBatchSize` | The number of events read at a time when paging through history. | `20` | +| `HistoryBufferSize` | The number of events to cache when paging through history. | `500` | +| `CheckPointAfter` | The amount of time to try to checkpoint after. | `2` seconds | +| `MinCheckPointCount` | The minimum number of messages to process before a checkpoint may be written. | `10` | +| `MaxCheckPointCount` | The maximum number of messages not checkpointed before forcing a checkpoint. | `1000` | +| `MaxSubscriberCount` | The maximum number of subscribers allowed. | `0` (unbounded) | +| `NamedConsumerStrategy` | The strategy to use for distributing events to client consumers. See the [consumer strategies](#consumer-strategies) in this doc. | `RoundRobin` | + +## Deleting a subscription group + +Remove a subscription group with the delete operation. Like the creation of groups, you rarely do this in your runtime code and is undertaken by an administrator running a script. + +@[code{delete-persistent-subscription}](@grpc:persistent-subscriptions.js;persistent-subscriptions.ts) + +| Parameter | Description | +|:--------------|:-----------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to delete. | +| `credentials` | The user credentials to use for this operation | diff --git a/docs/clients/grpc/nodejs/projections.md b/docs/clients/grpc/nodejs/projections.md new file mode 100644 index 00000000..42e0e0b4 --- /dev/null +++ b/docs/clients/grpc/nodejs/projections.md @@ -0,0 +1,179 @@ +--- +order: 6 +title: Projections +head: + - - title + - {} + - Projections | Node.js | Clients | Kurrent Docs +--- + +# Projection Management + +The various gRPC client APIs include dedicated clients that allow you to manage projections. + +For a detailed explanation of projections, see the [server documentation](@server/features/projections/README.md). + +You can find the full sample code from this documentation page in the respective [clients repositories](https://github.com/kurrent-io/?q=client). + +## Creating a client + +Projection management operations are exposed through a dedicated client. + +@[code{createClient}](@grpc:projection-management.js;projection-management.ts) + +## Create a projection + +Creates a projection that runs until the last event in the store, and then continues processing new events as they are appended to the store. The query parameter contains the JavaScript you want created as a projection. +Projections have explicit names, and you can enable or disable them via this name. + +@[code{CreateContinuous}](@grpc:projection-management.js;projection-management.ts) + +Trying to create projections with the same name will result in an error: + +@[code{CreateContinuous_Conflict}](@grpc:projection-management.js;projection-management.ts) + +## Restart the subsystem + +It is possible to restart the entire projection subsystem using the projections management client API. The user must be in the `$ops` or `$admin` group to perform this operation. + +@[code{RestartSubSystem}](@grpc:projection-management.js;projection-management.ts) + +## Enable a projection + +Enables an existing projection by name. +Once enabled, the projection will start to process events even after restarting the server or the projection subsystem. +You must have access to a projection to enable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Enable}](@grpc:projection-management.js;projection-management.ts) + +You can only enable an existing projection. When you try to enable a non-existing projection, you'll get an error: + +@[code{EnableNotFound}](@grpc:projection-management.js;projection-management.ts) + +## Disable a projection + +Disables a projection, this will save the projection checkpoint. +Once disabled, the projection will not process events even after restarting the server or the projection subsystem. +You must have access to a projection to disable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Disable}](@grpc:projection-management.js;projection-management.ts) + +You can only disable an existing projection. When you try to disable a non-existing projection, you'll get an error: + +@[code{DisableNotFound}](@grpc:projection-management.js;projection-management.ts) + +## Delete a projection + +Deletes an existing projection. You must disable the projection before deleting it, running projections cannot be deleted. Deleting a projection includes deleting the checkpoint and the emitted streams. + +@[code{Delete}](@grpc:projection-management.js;projection-management.ts) + +You can only delete an existing projection. When you try to delete a non-existing projection, you'll get an error: + +@[code{DeleteNotFound}](@grpc:projection-management.js;projection-management.ts) + +## Abort a projection + +Aborts a projection, this will not save the projection's checkpoint. + +@[code{Abort}](@grpc:projection-management.js;projection-management.ts) + +You can only abort an existing projection. When you try to abort a non-existing projection, you'll get an error: + +@[code{Abort_NotFound}](@grpc:projection-management.js;projection-management.ts) + +## Reset a projection + +Resets a projection, which causes deleting the projection checkpoint. This will force the projection to start afresh and re-emit events. Streams that are written to from the projection will also be soft-deleted. + +@[code{Reset}](@grpc:projection-management.js;projection-management.ts) + +Resetting a projection that does not exist will result in an error. + +@[code{Reset_NotFound}](@grpc:projection-management.js;projection-management.ts) + +## Update a projection + +Updates a projection with a given name. The query parameter contains the new JavaScript. Updating system projections using this operation is not supported at the moment. + +@[code{Update}](@grpc:projection-management.js;projection-management.ts) + +You can only update an existing projection. When you try to update a non-existing projection, you'll get an error: + +@[code{Update_NotFound}](@grpc:projection-management.js;projection-management.ts) + +## List all projections + +Returns a list of all projections, user defined & system projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListAll}](@grpc:projection-management.js;projection-management.ts) + +## List continuous projections + +Returns a list of all continuous projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListContinuous}](@grpc:projection-management.js;projection-management.ts) + +## Get status + +Gets the status of a named projection. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{GetStatus}](@grpc:projection-management.js;projection-management.ts) + +## Get state + +Retrieves the state of a projection. + +@[code{GetState}](@grpc:projection-management.js;projection-management.ts) + +## Get result + +Retrieves the result of the named projection and partition. + +@[code{GetResult}](@grpc:projection-management.js;projection-management.ts) + +## Projection Details + +[List all](#list-all-projections), [list continuous](#list-continuous-projections) and [get status](#get-status) all return the details and statistics of projections + +| Field | Description | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Name`, `EffectiveName` | The name of the projection | +| `Status` | A human readable string of the current statuses of the projection (see below) | +| `StateReason` | A human readable string explaining the reason of the current projection state | +| `CheckpointStatus` | A human readable string explaining the current operation performed on the checkpoint : `requested`, `writing` | +| `Mode` | `Continuous`, `OneTime` , `Transient` | +| `CoreProcessingTime` | The total time, in ms, the projection took to handle events since the last restart | +| `Progress` | The progress, in %, indicates how far this projection has processed event, in case of a restart this could be -1% or some number. It will be updated as soon as a new event is appended and processed | +| `WritesInProgress` | The number of write requests to emitted streams currently in progress, these writes can be batches of events | +| `ReadsInProgress` | The number of read requests currently in progress | +| `PartitionsCached` | The number of cached projection partitions | +| `Position` | The Position of the last processed event | +| `LastCheckpoint` | The Position of the last checkpoint of this projection | +| `EventsProcessedAfterRestart` | The number of events processed since the last restart of this projection | +| `BufferedEvents` | The number of events in the projection read buffer | +| `WritePendingEventsBeforeCheckpoint` | The number of events waiting to be appended to emitted streams before the pending checkpoint can be written | +| `WritePendingEventsAfterCheckpoint` | The number of events to be appended to emitted streams since the last checkpoint | +| `Version` | This is used internally, the version is increased when the projection is edited or reset | +| `Epoch` | This is used internally, the epoch is increased when the projection is reset | + +The `Status` string is a combination of the following values. +The first 3 are the most common one, as the other one are transient values while the projection is initialised or stopped + +| Value | Description | +|--------------------|-------------------------------------------------------------------------------------------------------------------------| +| Running | The projection is running and processing events | +| Stopped | The projection is stopped and is no longer processing new events | +| Faulted | An error occurred in the projection, `StateReason` will give the fault details, the projection is not processing events | +| Initial | This is the initial state, before the projection is fully initialised | +| Suspended | The projection is suspended and will not process events, this happens while stopping the projection | +| LoadStateRequested | The state of the projection is being retrieved, this happens while the projection is starting | +| StateLoaded | The state of the projection is loaded, this happens while the projection is starting | +| Subscribed | The projection has successfully subscribed to its readers, this happens while the projection is starting | +| FaultedStopping | This happens before the projection is stopped due to an error in the projection | +| Stopping | The projection is being stopped | +| CompletingPhase | This happens while the projection is stopping | +| PhaseCompleted | This happens while the projection is stopping | diff --git a/docs/clients/grpc/nodejs/reading-events.md b/docs/clients/grpc/nodejs/reading-events.md new file mode 100644 index 00000000..715ff270 --- /dev/null +++ b/docs/clients/grpc/nodejs/reading-events.md @@ -0,0 +1,135 @@ +--- +order: 3 +head: + - - title + - {} + - Reading Events | Node.js | Clients | Kurrent Docs +--- + +# Reading Events + +There are two options for reading events from KurrentDB. You can either: + 1. Read from an individual stream, or + 2. Read from the `$all` stream, which will return all events in the store. + +Each event in KurrentDB belongs to an individual stream. When reading events, pick the name of the stream from which you want to read the events and choose whether to read the stream forwards or backwards. + +All events have a `StreamPosition` and a `Position`. `StreamPosition` is a *big int* (unsigned 64-bit integer) and represents the place of the event in the stream. `Position` is the event's logical position, and is represented by `CommitPosition` and a `PreparePosition`. Note that when reading events you will supply a different "position" depending on whether you are reading from an individual stream or the `$all` stream. + +:::tip +Check [connecting to KurrentDB instructions](getting-started.md#required-packages) to learn how to configure and use the client SDK. +::: + +## Reading from a stream + +You can read all the events or a sample of the events from individual streams, starting from any position in the stream, and can read either forward or backward. It is only possible to read events from a single stream at a time. You can read events from the global event log, which spans across streams. Learn more about this process in the [Read from `$all`](#reading-from-the-all-stream) section below. + +### Reading forwards + +The simplest way to read a stream forwards is to supply a stream name, read direction, and revision from which to start. The revision can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-stream}](@grpc:reading-events.js;reading-events.ts) + +This will return an enumerable that can be iterated on: + +@[code{iterate-stream}](@grpc:reading-events.js;reading-events.ts) + +There are a number of additional arguments you can provide when reading a stream, listed below. + +#### maxCount + +Passing in the max count will limit the number of events returned. + +#### resolveLinkTos + +When using projections to create new events, you can set whether the generated events are pointers to existing events. Setting this value to `true` tells KurrentDB to return the event as well as the event linking to it. + +#### configureOperationOptions + +You can use the `configureOperationOptions` argument to provide a function that will customise settings for each operation. + +#### userCredentials + +The `userCredentials` argument is optional. It is used to override the default credentials specified when creating the client instance. + +@[code{overriding-user-credentials}](@grpc:reading-events.js;reading-events.ts) + +### Reading from a revision + +Instead of providing the `StreamPosition` you can also provide a specific stream revision as a *big int* (unsigned 64-bit integer). + +@[code{read-from-stream-position}](@grpc:reading-events.js;reading-events.ts) + +### Reading backwards + +In addition to reading a stream forwards, streams can be read backwards. To read all the events backwards, set the *stream position* to the end: + +@[code{reading-backwards}](@grpc:reading-events.js;reading-events.ts) + +:::tip +Read one event backwards to find the last position in the stream. +::: + +### Checking if the stream exists + +Reading a stream returns a `ReadStreamResult`, which contains a property `ReadState`. This property can have the value `StreamNotFound` or `Ok`. + +It is important to check the value of this field before attempting to iterate an empty stream, as it will throw an exception. + +For example: + +@[code{checking-for-stream-presence}](@grpc:reading-events.js;reading-events.ts) + +## Reading from the $all stream + +Reading from the `$all` stream is similar to reading from an individual stream, but please note there are differences. One significant difference is the need to provide admin user account credentials to read from the `$all` stream. Additionally, you need to provide a transaction log position instead of a stream revision when reading from the `$all` stream. + +### Reading forwards + +The simplest way to read the `$all` stream forwards is to supply a read direction and the transaction log position from which you want to start. The transaction log postion can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-all-stream}](@grpc:reading-events.js;reading-events.ts) + +You can iterate asynchronously through the result: + +@[code{read-from-all-stream-iterate}](@grpc:reading-events.js;reading-events.ts) + +There are a number of additional arguments you can provide when reading the `$all` stream. + +#### maxCount + +Passing in the max count allows you to limit the number of events that returned. + +#### resolveLinkTos + +When using projections to create new events you can set whether the generated events are pointers to existing events. Setting this value to true will tell KurrentDB to return the event as well as the event linking to it. + +@[code{read-from-all-stream-resolving-link-Tos}](@grpc:reading-events.js;reading-events.ts) + +#### configureOperationOptions + +This argument is generic setting class for all operations that can be set on all operations executed against KurrentDB. + +#### userCredentials +The credentials used to read the data can be used by the subscription as follows. This will override the default credentials set on the connection. + +@[code{read-all-overriding-user-credentials}](@grpc:reading-events.js;reading-events.ts) + +### Reading backwards + +In addition to reading the `$all` stream forwards, it can be read backwards. To read all the events backwards, set the *position* to the end: + +@[code{read-from-all-stream-backwards}](@grpc:reading-events.js;reading-events.ts) + +:::tip +Read one event backwards to find the last position in the `$all` stream. +::: + +### Handling system events + +KurrentDB will also return system events when reading from the `$all` stream. In most cases you can ignore these events. + +All system events begin with `$` or `$$` and can be easily ignored by checking the `EventType` property. + +@[code{ignore-system-events}](@grpc:reading-events.js;reading-events.ts) + diff --git a/docs/clients/grpc/nodejs/release-notes.md b/docs/clients/grpc/nodejs/release-notes.md new file mode 100644 index 00000000..2fa8a745 --- /dev/null +++ b/docs/clients/grpc/nodejs/release-notes.md @@ -0,0 +1,12 @@ +--- +order: 10 +head: + - - title + - {} + - Release Notes | Node.js | Clients | Kurrent Docs +--- + +# Release Notes + +To stay up to date with the latest changes and improvements, including release history, changelogs, upgrade instructions, and migration guides, see the [KurrentDB Node.js client release notes](https://github.com/kurrent-io/KurrentDB-Client-NodeJS/releases). + diff --git a/docs/clients/grpc/nodejs/subscriptions.md b/docs/clients/grpc/nodejs/subscriptions.md new file mode 100644 index 00000000..75cb361d --- /dev/null +++ b/docs/clients/grpc/nodejs/subscriptions.md @@ -0,0 +1,234 @@ +--- +order: 4 +head: + - - title + - {} + - Catch-up Subscriptions | Node.js | Clients | Kurrent Docs +--- + +# Catch-up Subscriptions + +Subscriptions allow you to subscribe to a stream and receive notifications about new events added to the stream. + +You provide an event handler and an optional starting point to the subscription. The handler is called for each event from the starting point onward. + +If events already exist, the handler will be called for each event one by one until it reaches the end of the stream. The server will then notify the handler whenever a new event appears. + +:::tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Subscribing from the start + +If you need to process all the events in the store, including historical events, you'll need to subscribe from the beginning. You can either subscribe to receive events from a single stream or subscribe to `$all` if you need to process all events in the database. + +### Subscribing to a stream + +The simplest stream subscription looks like the following : + +@[code{subscribe-to-stream}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +The provided handler will be called for every event in the stream. + +When you subscribe to a stream with link events, for example the `$ce` category stream, you need to set `resolveLinkTos` to `true`. Read more about it [below](#resolving-link-to-s). + +### Subscribing to `$all` + +Subscribing to `$all` is similar to subscribing to a single stream. The handler will be called for every event appended after the starting position. + +@[code{subscribe-to-all}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +## Subscribing from a specific position + +The previous examples subscribed to the stream from the beginning. That subscription invoked the handler for every event in the stream before waiting for new events. + +Both stream and $all subscriptions accept a starting position if you want to read from a specific point onward. If events already exist at the position you subscribe to, they will be read on the server side and sent to the subscription. + +Once caught up, the server will push any new events received on the streams to the client. There is no difference between catching up and live on the client side. + +::: warning +The positions provided to the subscriptions are exclusive. You will only receive the next event after the subscribed position. +::: + +### Subscribing to a stream + +To subscribe to a stream from a specific position, you must provide a *stream position*. This can be `Start`, `End` or a *big int* (unsigned 64 bit integer) position. + +The following subscribes to the stream `some-stream` at position `20`, this means that events `21` and onward will be handled: + +@[code{subscribe-to-stream-from-position}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +### Subscribing to $all + +Subscribing to the `$all` stream is similar to subscribing to a regular stream. The difference is how to specify the starting position. For the `$all` stream, provide a `Position` structure that consists of two big integers: the prepare and commit positions. Use `Start`, `End`, or create a `Position` from specific commit and prepare values. + +The corresponding `$all` subscription will subscribe from the event after the one at commit position `1056` and prepare position `1056`. + +Please note that this position will need to be a legitimate position in `$all`. + +@[code{subscribe-to-all-from-position}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +## Subscribing to a stream for live updates + +You can subscribe to a stream to get live updates by subscribing to the end of the stream: + +@[code{subscribe-to-stream-live}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +And the same works with `$all` : + +@[code{subscribe-to-all-live}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +This will not read through the history of the stream but will notify the handler when a new event appears in the respective stream. + +Keep in mind that when you subscribe to a stream from a specific position, as described [above](#subscribing-from-a-specific-position), you will also get live updates after your subscription catches up (processes all the historical events). + +## Resolving link-to events + +Link-to events point to events in other streams in KurrentDB. These are generally created by projections such as the `$by_event_type` projection which links events of the same event type into the same stream. This makes it easier to look up all events of a specific type. + +::: tip +[Filtered subscriptions](subscriptions.md#server-side-filtering) make it easier and faster to subscribe to all events of a specific type or matching a prefix. +::: + +When reading a stream you can specify whether to resolve link-to events. By default, link-to events are not resolved. You can change this behaviour by setting the `resolveLinkTos` parameter to `true`: + +@[code{subscribe-to-stream-resolving-linktos}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +## Dropped subscriptions + +When a subscription stops or experiences an error, it will be dropped. The subscription provides a `subscriptionDropped` callback, which will get called when the subscription breaks. + +The `subscriptionDropped` callback allows you to inspect the reason why the subscription dropped, as well as any exceptions that occurred. + +The possible reasons for a subscription to drop are: + +| Reason | Why it might happen | +|:------------------|:---------------------------------------------------------------------------------------------------------------------| +| `Disposed` | The client canceled or disposed of the subscription. | +| `SubscriberError` | An error occurred while handling an event in the subscription handler. | +| `ServerError` | An error occurred on the server, and the server closed the subscription. Check the server logs for more information. | + +Bear in mind that a subscription can also drop because it is slow. The server tried to push all the live events to the subscription when it is in the live processing mode. If the subscription gets the reading buffer overflow and won't be able to acknowledge the buffer, it will break. + +### Handling subscription drops + +An application, which hosts the subscription, can go offline for some time for different reasons. It could be a crash, infrastructure failure, or a new version deployment. As you rarely would want to reprocess all the events again, you'd need to store the current position of the subscription somewhere, and then use it to restore the subscription from the point where it dropped off: + +@[code{subscribe-to-stream-subscription-dropped}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +When subscribed to `$all` you want to keep the event's position in the `$all` stream. As mentioned previously, the `$all` stream position consists of two big integers (prepare and commit positions), not one: + +@[code{subscribe-to-all-subscription-dropped}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +## User credentials + +The user creating a subscription must have read access to the stream it's subscribing to, and only admin users may subscribe to `$all` or create filtered subscriptions. + +The code below shows how you can provide user credentials for a subscription. When you specify subscription credentials explicitly, it will override the default credentials set for the client. If you don't specify any credentials, the client will use the credentials specified for the client, if you specified those. + +@[code{overriding-user-credentials}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +## Server-side filtering + +KurrentDB allows you to filter the events whilst subscribing to the `$all` stream to only receive the events you care about. + +You can filter by event type or stream name using a regular expression or a prefix. Server-side filtering is currently only available on the `$all` stream. + +::: tip +Server-side filtering was introduced as a simpler alternative to projections. You should consider filtering before creating a projection to include the events you care about. +::: + +A simple stream prefix filter looks like this: + +@[code{stream-prefix-filtered-subscription}](@grpc:subscribing-to-streams.js;subscribing-to-streams.ts) + +The filtering API is described more in-depth in the [filtering section](subscriptions.md#server-side-filtering). + +### Filtering out system events + +There are events in KurrentDB called system events. These are prefixed with a `$` and under most circumstances you won't care about these. They can be filtered out by passing in a `SubscriptionFilterOptions` when subscribing to the `$all` stream. + +@[code{exclude-system}](@grpc:server-side-filtering.js;server-side-filtering.ts) + +::: tip +`$stats` events are no longer stored in KurrentDB by default so there won't be as many `$` events as before. +::: + +### Filtering by event type + +If you only want to subscribe to events of a given type, there are two options. You can either use a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `EventTypeFilter.Prefix`. + +@[code{event-type-prefix}](@grpc:server-side-filtering.js;server-side-filtering.ts) + +This will only subscribe to events with a type that begin with `customer-`. + +#### Filtering by regular expression + +It might be advantageous to provide a regular expression when you want to subscribe to multiple event types. + +@[code{event-type-regex}](@grpc:server-side-filtering.js;server-side-filtering.ts) + +This will subscribe to any event that begins with `user` or `company`. + +### Filtering by stream name + +To subscribe to a stream by name, choose either a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `StreamFilter.Prefix`. + +@[code{stream-prefix}](@grpc:server-side-filtering.js;server-side-filtering.ts) + +This will only subscribe to all streams with a name that begins with `user-`. + +#### Filtering by regular expression + +To subscribe to multiple streams, use a regular expression. + +@[code{stream-regex}](@grpc:server-side-filtering.js;server-side-filtering.ts) + +This will subscribe to any stream with a name that begins with `account` or `savings`. + +## Checkpointing + +When a catch-up subscription is used to process an `$all` stream containing many events, the last thing you want is for your application to crash midway, forcing you to restart from the beginning. + +### What is a checkpoint? + +A checkpoint is the position of an event in the `$all` stream to which your application has processed. By saving this position to a persistent store (e.g., a database), it allows your catch-up subscription to: +- Recover from crashes by reading the checkpoint and resuming from that position +- Avoid reprocessing all events from the start + +To create a checkpoint, store the event's commit or prepare position. + +::: warning +If your database contains events created by the legacy TCP client using the [transaction feature](https://docs.kurrent.io/clients/tcp/dotnet/21.2/appending.html#transactions), you should store both the commit and prepare positions together as your checkpoint. +::: + +### Updating checkpoints at regular intervals +The client SDK provides a way to notify your application after processing a configurable number of events. This allows you to periodically save a checkpoint at regular intervals. + +@[code{checkpoint}](@grpc:server-side-filtering.js;server-side-filtering.ts) + +By default, the checkpoint notification is sent after every 32 non-system events processed from $all. + +### Configuring the checkpoint interval +You can adjust the checkpoint interval to change how often the client is notified. + +@[code{checkpoint-with-interval}](@grpc:server-side-filtering.js;server-side-filtering.ts) + +By configuring this parameter, you can balance between reducing checkpoint overhead and ensuring quick recovery in case of a failure. + +::: info +The checkpoint interval parameter configures the database to notify the client after `n` * 32 number of events where `n` is defined by the parameter. + +For example: +- If `n` = 1, a checkpoint notification is sent every 32 events. +- If `n` = 2, the notification is sent every 64 events. +- If `n` = 3, it is sent every 96 events, and so on. +::: diff --git a/docs/clients/grpc/observability.md b/docs/clients/grpc/observability.md index 84b3299f..f3e8e799 100644 --- a/docs/clients/grpc/observability.md +++ b/docs/clients/grpc/observability.md @@ -1,121 +1,15 @@ --- -order: 8 +sitemap: + priority: 0 + changefreq: monthly --- # Observability -The KurrentDB gRPC clients are designed with observability in mind, offering -support for OpenTelemetry. This integration provides a set of distributed -traces, enabling developers to gain deeper insights into their system. +To redirect you to the right page, please select a client: -::: warning -Currently, OpenTelemetry observability support is not available for all -clients. Moreover, instrumentation is only provided for append and -subscribe operations, which includes both 'Catchup' and 'Persistent' modes. -::: +.NET [.NET](/clients/grpc/dotnet/observability.md) -You can click on the links below to view the full code for each client: +Node.js [Node.js](/clients/grpc/nodejs/observability.md) -- [NodeJS](https://github.com/kurrent-io/KurrentDB-Client-NodeJS/blob/master/packages/test/src/samples/opentelemetry.ts) -- [Java](https://github.com/kurrent-io/KurrentDBDB-Client-Java/blob/trunk/src/test/java/io/kurrent/dbclient/samples/opentelemetry/Instrumentation.java) -- [C#](https://github.com/kurrent-io/KurrentDB-Client-Dotnet/blob/master/samples/diagnostics/Program.cs) - -## Required packages - -OpenTelemetry support is included to the KurrentDB Java client by default. For other clients, you need to install the dedicated package to enable OpenTelemetry support. - -### NodeJS - -Install the `@kurrent/opentelemetry` package using your package manager of choice. TypeScript type declarations are included in the package. - -::: tabs -@tab npm -```bash -npm install --save @kurrent/opentelemetry -``` -@tab yarn -```bash -yarn add @kurrent/opentelemetry -``` -@tab pnpm -```bash -pnpm add @kurrent/opentelemetry -``` -::: - -## Instrumentation - -To emit trace data, you must first install and use the dedicated package, as instructed in the -[Required Packages](./observability.md#required-packages) section, if provided. This package -includes the necessary instrumentation that needs to be registered with the client. - -@[code{register-instrumentation}](@grpc:opentelemetry.js;opentelemetry.ts;opentelemetry/Instrumentation.java;diagnostics/Program.cs) - -## Traces - -Traces provide a clear picture of how operations are carried out in a -distributed system, making it easier to maintain and enhance the system over -time. Traces from the clients can be exported to any compatible collector that -supports the OpenTelemetry protocol (OTLP). - -In order for the client to emit traces, you need to need to enable -instrumentation as described in -[Instrumentation](./observability.md#instrumentation). - -For more guidance on setting up and utilizing tracing, refer to the -[OpenTelemetry](https://opentelemetry.io/) documentation. - -An example of a trace is shown below: - -```bash -Activity.TraceId: 8da04787239dbb85c1f9c6fba1b1f0d6 -Activity.SpanId: 4352ec4a66a20b95 -Activity.TraceFlags: Recorded -Activity.ActivitySourceName: kurrentdb -Activity.DisplayName: streams.append -Activity.Kind: Client -Activity.StartTime: 2024-05-29T06:50:41.2519016Z -Activity.Duration: 00:00:00.1500707 -Activity.Tags: - db.kurrentdb.stream: d7caa2a5-1e19-4108-9541-58d5fba02d42 - server.address: localhost - server.port: 2113 - db.system: kurrentdb - db.operation: streams.append -StatusCode: Ok -Resource associated with Activity: - service.name: sample - service.instance.id: 7316ef20-c354-4e64-97da-c1b99c2c28b0 - telemetry.sdk.name: opentelemetry - telemetry.sdk.language: dotnet - telemetry.sdk.version: 1.8.1 -``` - -In this case, the trace is for an append operation on a stream. The trace -includes the trace ID, span ID, trace flags, activity source name, display name, -kind, start time, duration, tags, status code, and resource associated with the -activity. - -::: note -The structure of the trace may vary depending on the client and the operation -being performed but will generally include the same information. -::: - -## Exporting traces - -You can set up various exporters to send traces to different destinations. -Additionally, you have the option to export these traces to a collector of your -choice, such as [Jaeger](https://www.jaegertracing.io/) or [Seq](https://datalust.co/seq). - -For instance, if you choose to use Jaeger as your backend of choice, you can -view your traces in the Jaeger UI, which provides a powerful interface for -querying and visualizing your trace data. - -The code snippets below demonstrate how to set up one or more exporters for each -client: - -@[code{setup-exporter}](@grpc:opentelemetry.js;opentelemetry.ts;opentelemetry/Instrumentation.java;diagnostics/Program.cs) - -For more details on configuring exporters for specific programming languages, -refer to the [OpenTelemetry](https://opentelemetry.io/docs/languages/) -documentation. +Java [Java](/clients/grpc/java/observability.md) \ No newline at end of file diff --git a/docs/clients/grpc/persistent-subscriptions.md b/docs/clients/grpc/persistent-subscriptions.md index 20743c11..dbdb4eaa 100644 --- a/docs/clients/grpc/persistent-subscriptions.md +++ b/docs/clients/grpc/persistent-subscriptions.md @@ -1,160 +1,21 @@ --- -order: 5 +sitemap: + priority: 0 + changefreq: monthly --- -# Persistent subscriptions +# Persistent Subscriptions -Persistent subscriptions are similar to catch-up subscriptions, but there are two key differences: -- The subscription checkpoint is maintained by the server. It means that when your client reconnects to the persistent subscription, it will automatically resume from the last known position. -- It's possible to connect more than one event consumer to the same persistent subscription. In that case, the server will load-balance the consumers, depending on the defined strategy, and distribute the events to them. +To redirect you to the right page, please select a client: -Because of those, persistent subscriptions are defined as subscription groups that are defined and maintained by the server. Consumer then connect to a particular subscription group, and the server starts sending event to the consumer. +.NET [.NET](/clients/grpc/dotnet/persistent-subscriptions.md) -You can read more about persistent subscriptions in the [server documentation](@server/features/persistent-subscriptions.md). +Python [Python](/clients/grpc/python/persistent-subscriptions.md) -## Creating a subscription group +Node.js [Node.js](/clients/grpc/nodejs/persistent-subscriptions.md) -The first step of dealing with a persistent subscription is to create a subscription group. You will receive an error if you attempt to create a subscription group multiple times. You must have admin permissions to create a persistent subscription group. +Java [Java](/clients/grpc/java/persistent-subscriptions.md) -### Subscribing to one stream +Go [Go](/clients/grpc/go/persistent-subscriptions.md) -The following sample shows how to create a subscription group for a persistent subscription where you want to receive events from a specific stream. It could be a normal stream, or a stream of links (like `$ce` category stream). - -@[code{create-persistent-subscription-to-stream}](@grpc:persistent_subscriptions.py;persistent-subscriptions.js;persistent-subscriptions.ts;persistent_subscriptions/PersistentSubscriptions.java;persistent-subscriptions/Program.cs;persistentSubscriptions.go;persistent_subscriptions.rs) - -| Parameter | Description | -|:--------------|:----------------------------------------------------| -| `stream` | The stream the persistent subscription is on. | -| `groupName` | The name of the subscription group to create. | -| `settings` | The settings to use when creating the subscription. | -| `credentials` | The user credentials to use for this operation. | - -### Subscribing to $all - -The ability to subscribe to `$all` was introduced in EventStoreDB **21.10**. Persistent subscriptions to `$all` also support [filtering](subscriptions.md#server-side-filtering). - -You can create a subscription group on $all much the same way you would create a subscription group on a stream: - -@[code{create-persistent-subscription-to-all}](@grpc:persistent_subscriptions.py;persistent-subscriptions.js;persistent-subscriptions.ts;persistent_subscriptions/PersistentSubscriptions.java;persistent-subscriptions/Program.cs;persistentSubscriptions.go;persistent_subscriptions.rs) - -## Connecting a consumer - -Once you have created a subscription group, clients can connect to it. A subscription in your application should only have the connection in your code, you should assume that the subscription already exists. - -The most important parameter to pass when connecting is the buffer size. This represents how many outstanding messages the server should allow this client. If this number is too small, your subscription will spend much of its time idle as it waits for an acknowledgment to come back from the client. If it's too big, you waste resources and can start causing time out messages depending on the speed of your processing. - -### Connecting to one stream - -The code below shows how to connect to an existing subscription group for a specific stream: - -@[code{subscribe-to-persistent-subscription-to-stream}](@grpc:persistent_subscriptions.py;persistent-subscriptions.js;persistent-subscriptions.ts;persistent_subscriptions/PersistentSubscriptions.java;persistent-subscriptions/Program.cs;persistentSubscriptions.go;persistent_subscriptions.rs) - -| Parameter | Description | -|:----------------------|:---------------------------------------------------------------------------------------------| -| `stream` | The stream the persistent subscription is on. | -| `groupName` | The name of the subscription group to subscribe to. | -| `eventAppeared` | The action to call when an event arrives over the subscription. | -| `subscriptionDropped` | The action to call if the subscription is dropped. | -| `credentials` | The user credentials to use for this operation. | -| `bufferSize` | The number of in-flight messages this client is allowed. **Default: 10** | -| `autoAck` | Whether to automatically acknowledge messages after eventAppeared returns. **Default: true** | - -::: warning -The `autoAck` parameter will be deprecated in the next client release. You'll need to explicitly [manage acknowledgements](#acknowledgements). -::: - -### Connecting to $all - -The code below shows how to connect to an existing subscription group for `$all`: - -@[code{subscribe-to-persistent-subscription-to-all}](@grpc:persistent_subscriptions.py;persistent-subscriptions.js;persistent-subscriptions.ts;persistent_subscriptions/PersistentSubscriptions.java;persistent-subscriptions/Program.cs;persistentSubscriptions.go;persistent_subscriptions.rs) - -The `SubscribeToAllAsync` method is identical to the `SubscribeToStreamAsync` method, except that you don't need to specify a stream name. - -## Acknowledgements - -Clients must acknowledge (or not acknowledge) messages in the competing consumer model. - -If processing is successful, you must send an Ack (acknowledge) to the server to let it know that the message has been handled. If processing fails for some reason, then you can Nack (not acknowledge) the message and tell the server how to handle the failure. - -@[code{subscribe-to-persistent-subscription-with-manual-acks}](@grpc:persistent_subscriptions.py;persistent-subscriptions.js;persistent-subscriptions.ts;persistent_subscriptions/PersistentSubscriptions.java;persistent-subscriptions/Program.cs;persistentSubscriptions.go;persistent_subscriptions.rs) - -The _Nack event action_ describes what the server should do with the message: - -| Action | Description | -|:----------|:---------------------------------------------------------------------| -| `Unknown` | The client does not know what action to take. Let the server decide. | -| `Park` | Park the message and do not resend. Put it on poison queue. | -| `Retry` | Explicitly retry the message. | -| `Skip` | Skip this message do not resend and do not put in poison queue. | - -## Consumer strategies - -When creating a persistent subscription, you can choose between a number of consumer strategies. - -### RoundRobin (default) - -Distributes events to all clients evenly. If the client `bufferSize` is reached, the client won't receive more events until it acknowledges or not acknowledges events in its buffer. - -This strategy provides equal load balancing between all consumers in the group. - -### DispatchToSingle - -Distributes events to a single client until the `bufferSize` is reached. After that, the next client is selected in a round-robin style, and the process repeats. - -This option can be seen as a fall-back scenario for high availability, when a single consumer processes all the events until it reaches its maximum capacity. When that happens, another consumer takes the load to free up the main consumer resources. - -### Pinned - -For use with an indexing projection such as the system `$by_category` projection. - -KurrentDB inspects the event for its source stream id, hashing the id to one of 1024 buckets assigned to individual clients. When a client disconnects, its buckets are assigned to other clients. When a client connects, it is assigned some existing buckets. This naively attempts to maintain a balanced workload. - -The main aim of this strategy is to decrease the likelihood of concurrency and ordering issues while maintaining load balancing. This is **not a guarantee**, and you should handle the usual ordering and concurrency issues. - -## Updating a subscription group - -You can edit the settings of an existing subscription group while it is running, you don't need to delete and recreate it to change settings. When you update the subscription group, it resets itself internally, dropping the connections and having them reconnect. You must have admin permissions to update a persistent subscription group. - -@[code{update-persistent-subscription}](@grpc:persistent_subscriptions.py;persistent-subscriptions.js;persistent-subscriptions.ts;persistent_subscriptions/PersistentSubscriptions.java;persistent-subscriptions/Program.cs;persistentSubscriptions.go;persistent_subscriptions.rs) - -| Parameter | Description | -|:--------------|:----------------------------------------------------| -| `stream` | The stream the persistent subscription is on. | -| `groupName` | The name of the subscription group to update. | -| `settings` | The settings to use when creating the subscription. | -| `credentials` | The user credentials to use for this operation. | - -## Persistent subscription settings - -Both the `Create` and `Update` methods take some settings for configuring the persistent subscription. - -The following table shows the configuration options you can set on a persistent subscription. - -| Option | Description | Default | -|:------------------------|:----------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------| -| `ResolveLinkTos` | Whether the subscription should resolve link events to their linked events. | `false` | -| `StartFrom` | The exclusive position in the stream or transaction file the subscription should start from. | `null` (start from the end of the stream) | -| `ExtraStatistics` | Whether to track latency statistics on this subscription. | `false` | -| `MessageTimeout` | The amount of time after which to consider a message as timed out and retried. | `30` (seconds) | -| `MaxRetryCount` | The maximum number of retries (due to timeout) before a message is considered to be parked. | `10` | -| `LiveBufferSize` | The size of the buffer (in-memory) listening to live messages as they happen before paging occurs. | `500` | -| `ReadBatchSize` | The number of events read at a time when paging through history. | `20` | -| `HistoryBufferSize` | The number of events to cache when paging through history. | `500` | -| `CheckPointAfter` | The amount of time to try to checkpoint after. | `2` seconds | -| `MinCheckPointCount` | The minimum number of messages to process before a checkpoint may be written. | `10` | -| `MaxCheckPointCount` | The maximum number of messages not checkpointed before forcing a checkpoint. | `1000` | -| `MaxSubscriberCount` | The maximum number of subscribers allowed. | `0` (unbounded) | -| `NamedConsumerStrategy` | The strategy to use for distributing events to client consumers. See the [consumer strategies](#consumer-strategies) in this doc. | `RoundRobin` | - -## Deleting a subscription group - -Remove a subscription group with the delete operation. Like the creation of groups, you rarely do this in your runtime code and is undertaken by an administrator running a script. - -@[code{delete-persistent-subscription}](@grpc:persistent_subscriptions.py;persistent-subscriptions.js;persistent-subscriptions.ts;persistent_subscriptions/PersistentSubscriptions.java;persistent-subscriptions/Program.cs;persistentSubscriptions.go;persistent_subscriptions.rs) - -| Parameter | Description | -|:--------------|:-----------------------------------------------| -| `stream` | The stream the persistent subscription is on. | -| `groupName` | The name of the subscription group to delete. | -| `credentials` | The user credentials to use for this operation | +Rust [Rust](/clients/grpc/rust/persistent-subscriptions.md) \ No newline at end of file diff --git a/docs/clients/grpc/projections.md b/docs/clients/grpc/projections.md index a88c2554..9303ba2d 100644 --- a/docs/clients/grpc/projections.md +++ b/docs/clients/grpc/projections.md @@ -1,245 +1,19 @@ ---- -order: 6 -title: Projections +--- +sitemap: + priority: 0 + changefreq: monthly --- -# Projection management - -The various gRPC client APIs include dedicated clients that allow you to manage projections. - -For a detailed explanation of projections, see the [server documentation](@server/features/projections/README.md). - -You can find the full sample code from this documentation page in the respective [clients repositories](https://github.com/kurrent-io/?q=client). - -## Required packages - -Install the client SDK package to your project. - -### NodeJS - -Add `@kurrent/kurrentdb-client` to your project using Yarn, NPM, or pnpm. - -TypeScript type declarations are included in the package. - -::: tabs -@tab npm -```bash -npm install --save @kurrent/kurrentdb-client -``` -@tab yarn -```bash -yarn add @kurrent/kurrentdb-client -``` -@tab pnpm -```bash -pnpm add @kurrent/kurrentdb-client -``` -::: - -### Java - -Add the `kurrentdb-client` dependency to your project using Maven or Gradle. - -::: tabs -@tab Maven -```xml - - io.kurrent - kurrentdb-client - 1.0.0 - -``` - -@tab Gradle -``` -implementation 'io.kurrent:kurrentdb-client:1.0.0' -``` - -For the most recent version of the KurrentDB client package, see [Maven Central](https://mvnrepository.com/artifact/io.kurrent/kurrentdb-client). -::: - -### Python - -Install the `kurrentdbclient` package from PyPI or use Poetry: - -::: tabs -@tab pip -```bash -pip install kurrentdbclient -``` -@tab Poetry -```bash -poetry add kurrentdbclient -``` -::: - -### Go - -Install the `kurrentdb` package using Go modules: - -```bash -go get github.com/kurrent-io/KurrentDB-Client-Go/kurrentdb -``` - -## Creating a client - -Projection management operations are exposed through a dedicated client. - -@[code{createClient}](@grpc:projection-management.js;projection-management.ts;quickstart.py;projection_management/ProjectionManagement.java;projection-management/Program.cs;quickstart.py;projectionManagement.go) - -## Create a projection - -Creates a projection that runs until the last event in the store, and then continues processing new events as they are appended to the store. The query parameter contains the JavaScript you want created as a projection. -Projections have explicit names, and you can enable or disable them via this name. - -@[code{CreateContinuous}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -Trying to create projections with the same name will result in an error: - -@[code{CreateContinuous_Conflict}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Restart the subsystem - -It is possible to restart the entire projection subsystem using the projections management client API. The user must be in the `$ops` or `$admin` group to perform this operation. - -@[code{RestartSubSystem}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Enable a projection - -Enables an existing projection by name. -Once enabled, the projection will start to process events even after restarting the server or the projection subsystem. -You must have access to a projection to enable it, see the [ACL documentation](@server/security/user-authorization.md). - -@[code{Enable}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -You can only enable an existing projection. When you try to enable a non-existing projection, you'll get an error: - -@[code{EnableNotFound}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Disable a projection - -Disables a projection, this will save the projection checkpoint. -Once disabled, the projection will not process events even after restarting the server or the projection subsystem. -You must have access to a projection to disable it, see the [ACL documentation](@server/security/user-authorization.md). - -@[code{Disable}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -You can only disable an existing projection. When you try to disable a non-existing projection, you'll get an error: - -@[code{DisableNotFound}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Delete a projection - -Deletes an existing projection. You must disable the projection before deleting it, running projections cannot be deleted. Deleting a projection includes deleting the checkpoint and the emitted streams. - -@[code{Delete}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -You can only delete an existing projection. When you try to delete a non-existing projection, you'll get an error: - -@[code{DeleteNotFound}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projections.py;projectionManagement.go) - -## Abort a projection - -Aborts a projection, this will not save the projection's checkpoint. - -@[code{Abort}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -You can only abort an existing projection. When you try to abort a non-existing projection, you'll get an error: - -@[code{Abort_NotFound}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Reset a projection - -Resets a projection, which causes deleting the projection checkpoint. This will force the projection to start afresh and re-emit events. Streams that are written to from the projection will also be soft-deleted. - -@[code{Reset}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -Resetting a projection that does not exist will result in an error. - -@[code{Reset_NotFound}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Update a projection - -Updates a projection with a given name. The query parameter contains the new JavaScript. Updating system projections using this operation is not supported at the moment. - -@[code{Update}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -You can only update an existing projection. When you try to update a non-existing projection, you'll get an error: - -@[code{Update_NotFound}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## List all projections - -Returns a list of all projections, user defined & system projections. -See the [projection details](#projection-details) section for an explanation of the returned values. - -@[code{ListAll}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## List continuous projections - -Returns a list of all continuous projections. -See the [projection details](#projection-details) section for an explanation of the returned values. - -@[code{ListContinuous}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Get status - -Gets the status of a named projection. -See the [projection details](#projection-details) section for an explanation of the returned values. - -@[code{GetStatus}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Get state - -Retrieves the state of a projection. - -@[code{GetState}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) - -## Get result - -Retrieves the result of the named projection and partition. +# Projections -@[code{GetResult}](@grpc:projection-management.js;projection-management.ts;projection_management/ProjectionManagement.java;projection-management/Program.cs;projections.py;projectionManagement.go) +To redirect you to the right page, please select a client: -## Projection Details +.NET [.NET](/clients/grpc/dotnet/projections.md) -[List all](#list-all-projections), [list continuous](#list-continuous-projections) and [get status](#get-status) all return the details and statistics of projections +Python [Python](/clients/grpc/python/projections.md) -| Field | Description | -|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `Name`, `EffectiveName` | The name of the projection | -| `Status` | A human readable string of the current statuses of the projection (see below) | -| `StateReason` | A human readable string explaining the reason of the current projection state | -| `CheckpointStatus` | A human readable string explaining the current operation performed on the checkpoint : `requested`, `writing` | -| `Mode` | `Continuous`, `OneTime` , `Transient` | -| `CoreProcessingTime` | The total time, in ms, the projection took to handle events since the last restart | -| `Progress` | The progress, in %, indicates how far this projection has processed event, in case of a restart this could be -1% or some number. It will be updated as soon as a new event is appended and processed | -| `WritesInProgress` | The number of write requests to emitted streams currently in progress, these writes can be batches of events | -| `ReadsInProgress` | The number of read requests currently in progress | -| `PartitionsCached` | The number of cached projection partitions | -| `Position` | The Position of the last processed event | -| `LastCheckpoint` | The Position of the last checkpoint of this projection | -| `EventsProcessedAfterRestart` | The number of events processed since the last restart of this projection | -| `BufferedEvents` | The number of events in the projection read buffer | -| `WritePendingEventsBeforeCheckpoint` | The number of events waiting to be appended to emitted streams before the pending checkpoint can be written | -| `WritePendingEventsAfterCheckpoint` | The number of events to be appended to emitted streams since the last checkpoint | -| `Version` | This is used internally, the version is increased when the projection is edited or reset | -| `Epoch` | This is used internally, the epoch is increased when the projection is reset | +Node.js [Node.js](/clients/grpc/nodejs/projections.md) -The `Status` string is a combination of the following values. -The first 3 are the most common one, as the other one are transient values while the projection is initialised or stopped +Java [Java](/clients/grpc/java/projections.md) -| Value | Description | -|--------------------|-------------------------------------------------------------------------------------------------------------------------| -| Running | The projection is running and processing events | -| Stopped | The projection is stopped and is no longer processing new events | -| Faulted | An error occurred in the projection, `StateReason` will give the fault details, the projection is not processing events | -| Initial | This is the initial state, before the projection is fully initialised | -| Suspended | The projection is suspended and will not process events, this happens while stopping the projection | -| LoadStateRequested | The state of the projection is being retrieved, this happens while the projection is starting | -| StateLoaded | The state of the projection is loaded, this happens while the projection is starting | -| Subscribed | The projection has successfully subscribed to its readers, this happens while the projection is starting | -| FaultedStopping | This happens before the projection is stopped due to an error in the projection | -| Stopping | The projection is being stopped | -| CompletingPhase | This happens while the projection is stopping | -| PhaseCompleted | This happens while the projection is stopping | +Go [Go](/clients/grpc/go/projections.md) \ No newline at end of file diff --git a/docs/clients/grpc/python/README.md b/docs/clients/grpc/python/README.md new file mode 100644 index 00000000..7994d9f5 --- /dev/null +++ b/docs/clients/grpc/python/README.md @@ -0,0 +1,9 @@ +--- +index: false +--- + +# Python + +Learn how to use the KurrentDB Python client library to interact with the database. + + \ No newline at end of file diff --git a/docs/clients/grpc/python/appending-events.md b/docs/clients/grpc/python/appending-events.md new file mode 100644 index 00000000..b490eb61 --- /dev/null +++ b/docs/clients/grpc/python/appending-events.md @@ -0,0 +1,87 @@ +--- +order: 2 +head: + - - title + - {} + - Appending Events | Python | Clients | Kurrent Docs +--- + +# Appending Events + +When you start working with KurrentDB, it is empty. The first meaningful operation is to add one or more events to the database using one of the available client SDKs. + +::: tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Append your first event + +The simplest way to append an event to KurrentDB is to create an `EventData` object and call `AppendToStream` method. + +@[code{append-to-stream}](@grpc:appending_events.py) + +`AppendToStream` takes a collection of `EventData`, which allows you to save more than one event in a single batch. + +Outside the example above, other options exist for dealing with different scenarios. + +::: tip +If you are new to Event Sourcing, please study the [Handling concurrency](#handling-concurrency) section below. +::: + +## Working with EventData + +Events appended to KurrentDB must be wrapped in an `EventData` object. This allows you to specify the event's content, the type of event, and whether it's in JSON format. In its simplest form, you need three arguments: **eventId**, **type**, and **data**. + +### eventId + +This takes the format of a `Uuid` and is used to uniquely identify the event you are trying to append. If two events with the same `Uuid` are appended to the same stream in quick succession, KurrentDB will only append one of the events to the stream. + +For example, the following code will only append a single event: + +@[code{append-duplicate-event}](@grpc:appending_events.py) + +![Duplicate Event](../images/duplicate-event.png) + +### type + +Each event should be supplied with an event type. This unique string is used to identify the type of event you are saving. + +It is common to see the explicit event code type name used as the type as it makes serialising and de-serialising of the event easy. However, we recommend against this as it couples the storage to the type and will make it more difficult if you need to version the event at a later date. + +### data + +Representation of your event data. It is recommended that you store your events as JSON objects. This allows you to take advantage of all of KurrentDB's functionality, such as projections. That said, you can save events using whatever format suits your workflow. Eventually, the data will be stored as encoded bytes. + +### metadata + +Storing additional information alongside your event that is part of the event itself is standard practice. This can be correlation IDs, timestamps, access information, etc. KurrentDB allows you to store a separate byte array containing this information to keep it separate. + +### isJson + +Simple boolean field to tell KurrentDB if the event is stored as json, true by default. + +## Handling concurrency + +When appending events to a stream, you can supply a *stream state* or *stream revision*. Your client uses this to inform KurrentDB of the state or version you expect the stream to be in when appending an event. If the stream isn't in that state, an exception will be thrown. + +For example, if you try to append the same record twice, expecting both times that the stream doesn't exist, you will get an exception on the second: + +@[code{append-with-no-stream}](@grpc:appending_events.py) + +There are three available stream states: +- `Any` +- `NoStream` +- `StreamExists` + +This check can be used to implement optimistic concurrency. When retrieving a stream from KurrentDB, note the current version number. When you save it back, you can determine if somebody else has modified the record in the meantime. + +@[code{append-with-concurrency-check}](@grpc:appending_events.py) + + + +## User credentials + +You can provide user credentials to append the data as follows. This will override the default credentials set on the connection. + +@[code{overriding-user-credentials}](@grpc:appending_events.py) + diff --git a/docs/clients/grpc/python/authentication.md b/docs/clients/grpc/python/authentication.md new file mode 100644 index 00000000..b77e0294 --- /dev/null +++ b/docs/clients/grpc/python/authentication.md @@ -0,0 +1,60 @@ +--- +title: Authentication +order: 7 +head: + - - title + - {} + - Authentication | Python | Clients | Kurrent Docs +--- + +## Client x.509 certificate + +X.509 certificates are digital certificates that use the X.509 public key infrastructure (PKI) standard to verify the identity of clients and servers. They play a crucial role in establishing a secure connection by providing a way to authenticate identities and establish trust. + +### Prerequisites + +1. KurrentDB 25.0 or greater, or EventStoreDB 24.10. +2. A commercial license with the User Certificates entitlement. +3. A valid x.509 certificate, which can be created using version `1.3` or higher of the [gencert tool](https://github.com/kurrent-io/es-gencert-cli). +4. The server must run in secure mode. See [Security Options](@server/security/protocol-security.md) for more information. +5. [Enable User Certificates plugin on the server](@server/security/user-authentication.md#user-x509-certificates) + +#### Generate user certificates + +The following command uses the [gencert tool](https://github.com/kurrent-io/es-gencert-cli) to generate a user certificate for the user `admin` that will expire in 10 days: + +::: tabs#os +@tab bash +```bash +./es-gencert-cli create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +@tab PowerShell +```powershell +.\es-gencert-cli.exe create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +::: + +### Connect to KurrentDB using an x.509 certificate + +To connect to KurrentDB using an x.509 certificate, you need to provide the +certificate and the private key to the client. If both username/password and +certificate authentication data are supplied, the client prioritizes user +credentials for authentication. The client will throw an error if the +certificate and the key are not both provided. + +::: tip +Please note that currently, password-protected private key files are not supported. +::: + +The client supports the following parameters: + +| Parameter | Description | +|----------------|--------------------------------------------------------------------------------| +| `userCertFile` | The file containing the X.509 user certificate in PEM format. | +| `userKeyFile` | The file containing the user certificate’s matching private key in PEM format. | + +To authenticate, include these two parameters in your connection string or constructor when initializing the client. + +Check the samples for the following clients: + +@[code{client-with-user-certificates}](@grpc:/user_certificates.py) \ No newline at end of file diff --git a/docs/clients/grpc/python/delete-stream.md b/docs/clients/grpc/python/delete-stream.md new file mode 100644 index 00000000..f254e7e3 --- /dev/null +++ b/docs/clients/grpc/python/delete-stream.md @@ -0,0 +1,39 @@ +--- +order: 9 +head: + - - title + - {} + - Deleting Events | Python | Clients | Kurrent Docs +--- + +# Deleting Events + +In KurrentDB, you can delete events and streams either partially or completely. Settings like $maxAge and $maxCount help control how long events are kept or how many events are stored in a stream, but they won't delete the entire stream. +When you need to fully remove a stream, KurrentDB offers two options: Soft Delete and Hard Delete. + +## Soft delete + +Soft delete in KurrentDB allows you to mark a stream for deletion without completely removing it, so you can still add new events later. While you can do this through the UI, using code is often better for automating the process, +handling many streams at once, or including custom rules. Code is especially helpful for large-scale deletions or when you need to integrate soft deletes into other workflows. + + +```python +client.delete_stream(stream_name, current_version=6) +``` + + +::: note +Clicking the delete button in the UI performs a soft delete, +setting the TruncateBefore value to remove all events up to a certain point. +While this marks the events for deletion, actual removal occurs during the next scavenging process. +The stream can still be reopened by appending new events. +::: + +## Hard delete + +Hard delete in KurrentDB permanently removes a stream and its events. While you can use the HTTP API, code is often better for automating the process, managing multiple streams, and ensuring precise control. Code is especially useful when you need to integrate hard delete into larger workflows or apply specific conditions. Note that when a stream is hard deleted, you cannot reuse the stream name, it will raise an exception if you try to append to it again. + + +```python +client.tombstone_stream(stream_name, current_version=4) +``` diff --git a/docs/clients/grpc/python/getting-started.md b/docs/clients/grpc/python/getting-started.md new file mode 100644 index 00000000..417f2981 --- /dev/null +++ b/docs/clients/grpc/python/getting-started.md @@ -0,0 +1,114 @@ +--- +order: 1 +head: + - - title + - {} + - Getting Started | Python | Clients | Kurrent Docs +--- + +# Getting Started + +Get started by connecting your application to KurrentDB. + +## Connecting to KurrentDB + +To connect your application to KurrentDB, instantiate and configure the client. + +::: tip Insecure clusters +All our GRPC clients are secure by default and must be configured to connect to an insecure server via [a connection string](#connection-string) or the client's configuration. +::: + +### Required packages + +Install the `kurrentdbclient` package from PyPI or use Poetry: + +::: tabs +@tab pip +```bash +pip install kurrentdbclient +``` +@tab Poetry +```bash +poetry add kurrentdbclient +``` +::: + + +### Connection string + +Each SDK has its own way of configuring the client, but the connection string can always be used. +The KurrentDB connection string supports two schemas: `kurrentdb://` for connecting to a single-node server, and `kurrentdb+discover://` for connecting to a multi-node cluster. The difference between the two schemas is that when using `kurrentdb://`, the client will connect directly to the node; with `kurrentdb+discover://` schema the client will use the gossip protocol to retrieve the cluster information and choose the right node to connect to. +Since version 22.10, ESDB supports gossip on single-node deployments, so `kurrentdb+discover://` schema can be used for connecting to any topology. + +The connection string has the following format: + +``` +kurrentdb+discover://admin:changeit@cluster.dns.name:2113 +``` + +There, `cluster.dns.name` is the name of a DNS `A` record that points to all the cluster nodes. Alternatively, you can list cluster nodes separated by comma instead of the cluster DNS name: + +``` +kurrentdb+discover://admin:changeit@node1.dns.name:2113,node2.dns.name:2113,node3.dns.name:2113 +``` + +There are a number of query parameters that can be used in the connection string to instruct the cluster how and where the connection should be established. All query parameters are optional. + +| Parameter | Accepted values | Default | Description | +|-----------------------|---------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------| +| `tls` | `true`, `false` | `true` | Use secure connection, set to `false` when connecting to a non-secure server or cluster. | +| `connectionName` | Any string | None | Connection name | +| `maxDiscoverAttempts` | Number | `10` | Number of attempts to discover the cluster. | +| `discoveryInterval` | Number | `100` | Cluster discovery polling interval in milliseconds. | +| `gossipTimeout` | Number | `5` | Gossip timeout in seconds, when the gossip call times out, it will be retried. | +| `nodePreference` | `leader`, `follower`, `random`, `readOnlyReplica` | `leader` | Preferred node role. When creating a client for write operations, always use `leader`. | +| `tlsVerifyCert` | `true`, `false` | `true` | In secure mode, set to `true` when using an untrusted connection to the node if you don't have the CA file available. Don't use in production. | +| `tlsCaFile` | String, file path | None | Path to the CA file when connecting to a secure cluster with a certificate that's not signed by a trusted CA. | +| `defaultDeadline` | Number | None | Default timeout for client operations, in milliseconds. Most clients allow overriding the deadline per operation. | +| `keepAliveInterval` | Number | `10` | Interval between keep-alive ping calls, in seconds. | +| `keepAliveTimeout` | Number | `10` | Keep-alive ping call timeout, in seconds. | +| `userCertFile` | String, file path | None | User certificate file for X.509 authentication. | +| `userKeyFile` | String, file path | None | Key file for the user certificate used for X.509 authentication. | + +When connecting to an insecure instance, specify `tls=false` parameter. For example, for a node running locally use `kurrentdb://localhost:2113?tls=false`. Note that usernames and passwords aren't provided there because insecure deployments don't support authentication and authorisation. + +### Creating a client + +First, create a client and get it connected to the database. + +@[code{createClient}](@grpc:quickstart.py) + +The client instance can be used as a singleton across the whole application. It doesn't need to open or close the connection. + +### Creating an event + +You can write anything to KurrentDB as events. The client needs a byte array as the event payload. Normally, you'd use a serialized object, and it's up to you to choose the serialization method. + +::: tip Server-side projections +User-defined server-side projections require events to be serialized in JSON format. + +We use JSON for serialization in the documentation examples. +::: + +The code snippet below creates an event object instance, serializes it, and adds it as a payload to the `EventData` structure, which the client can then write to the database. + +@[code{createEvent}](@grpc:quickstart.py) + +### Appending events + +Each event in the database has its own unique identifier (UUID). The database uses it to ensure idempotent writes, but it only works if you specify the stream revision when appending events to the stream. + +In the snippet below, we append the event to the stream `some-stream`. + +@[code{appendEvents}](@grpc:quickstart.py) + +Here we are appending events without checking if the stream exists or if the stream version matches the expected event version. See more advanced scenarios in [appending events documentation](./appending-events.md). + +### Reading events + +Finally, we can read events back from the `some-stream` stream. + +@[code{readStream}](@grpc:quickstart.py) + +When you read events from the stream, you get a collection of `ResolvedEvent` structures. The event payload is returned as a byte array and needs to be deserialized. See more advanced scenarios in [reading events documentation](./reading-events.md). + diff --git a/docs/clients/grpc/python/persistent-subscriptions.md b/docs/clients/grpc/python/persistent-subscriptions.md new file mode 100644 index 00000000..9f383af5 --- /dev/null +++ b/docs/clients/grpc/python/persistent-subscriptions.md @@ -0,0 +1,164 @@ +--- +order: 5 +head: + - - title + - {} + - Persistent Subscriptions | Python | Clients | Kurrent Docs +--- + +# Persistent Subscriptions + +Persistent subscriptions are similar to catch-up subscriptions, but there are two key differences: +- The subscription checkpoint is maintained by the server. It means that when your client reconnects to the persistent subscription, it will automatically resume from the last known position. +- It's possible to connect more than one event consumer to the same persistent subscription. In that case, the server will load-balance the consumers, depending on the defined strategy, and distribute the events to them. + +Because of those, persistent subscriptions are defined as subscription groups that are defined and maintained by the server. Consumer then connect to a particular subscription group, and the server starts sending event to the consumer. + +You can read more about persistent subscriptions in the [server documentation](@server/features/persistent-subscriptions.md). + +## Creating a subscription group + +The first step of dealing with a persistent subscription is to create a subscription group. You will receive an error if you attempt to create a subscription group multiple times. You must have admin permissions to create a persistent subscription group. + +### Subscribing to one stream + +The following sample shows how to create a subscription group for a persistent subscription where you want to receive events from a specific stream. It could be a normal stream, or a stream of links (like `$ce` category stream). + +@[code{create-persistent-subscription-to-stream}](@grpc:persistent_subscriptions.py) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to create. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +### Subscribing to $all + +The ability to subscribe to `$all` was introduced in EventStoreDB **21.10**. Persistent subscriptions to `$all` also support [filtering](subscriptions.md#server-side-filtering). + +You can create a subscription group on $all much the same way you would create a subscription group on a stream: + +@[code{create-persistent-subscription-to-all}](@grpc:persistent_subscriptions.py) + +## Connecting a consumer + +Once you have created a subscription group, clients can connect to it. A subscription in your application should only have the connection in your code, you should assume that the subscription already exists. + +The most important parameter to pass when connecting is the buffer size. This represents how many outstanding messages the server should allow this client. If this number is too small, your subscription will spend much of its time idle as it waits for an acknowledgment to come back from the client. If it's too big, you waste resources and can start causing time out messages depending on the speed of your processing. + +### Connecting to one stream + +The code below shows how to connect to an existing subscription group for a specific stream: + +@[code{subscribe-to-persistent-subscription-to-stream}](@grpc:persistent_subscriptions.py) + +| Parameter | Description | +|:----------------------|:---------------------------------------------------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to subscribe to. | +| `eventAppeared` | The action to call when an event arrives over the subscription. | +| `subscriptionDropped` | The action to call if the subscription is dropped. | +| `credentials` | The user credentials to use for this operation. | +| `bufferSize` | The number of in-flight messages this client is allowed. **Default: 10** | +| `autoAck` | Whether to automatically acknowledge messages after eventAppeared returns. **Default: true** | + +::: warning +The `autoAck` parameter will be deprecated in the next client release. You'll need to explicitly [manage acknowledgements](#acknowledgements). +::: + +### Connecting to $all + +The code below shows how to connect to an existing subscription group for `$all`: + +@[code{subscribe-to-persistent-subscription-to-all}](@grpc:persistent_subscriptions.py) + +The `SubscribeToAllAsync` method is identical to the `SubscribeToStreamAsync` method, except that you don't need to specify a stream name. + +## Acknowledgements + +Clients must acknowledge (or not acknowledge) messages in the competing consumer model. + +If processing is successful, you must send an Ack (acknowledge) to the server to let it know that the message has been handled. If processing fails for some reason, then you can Nack (not acknowledge) the message and tell the server how to handle the failure. + +@[code{subscribe-to-persistent-subscription-with-manual-acks}](@grpc:persistent_subscriptions.py) + +The _Nack event action_ describes what the server should do with the message: + +| Action | Description | +|:----------|:---------------------------------------------------------------------| +| `Unknown` | The client does not know what action to take. Let the server decide. | +| `Park` | Park the message and do not resend. Put it on poison queue. | +| `Retry` | Explicitly retry the message. | +| `Skip` | Skip this message do not resend and do not put in poison queue. | + +## Consumer strategies + +When creating a persistent subscription, you can choose between a number of consumer strategies. + +### RoundRobin (default) + +Distributes events to all clients evenly. If the client `bufferSize` is reached, the client won't receive more events until it acknowledges or not acknowledges events in its buffer. + +This strategy provides equal load balancing between all consumers in the group. + +### DispatchToSingle + +Distributes events to a single client until the `bufferSize` is reached. After that, the next client is selected in a round-robin style, and the process repeats. + +This option can be seen as a fall-back scenario for high availability, when a single consumer processes all the events until it reaches its maximum capacity. When that happens, another consumer takes the load to free up the main consumer resources. + +### Pinned + +For use with an indexing projection such as the system `$by_category` projection. + +KurrentDB inspects the event for its source stream id, hashing the id to one of 1024 buckets assigned to individual clients. When a client disconnects, its buckets are assigned to other clients. When a client connects, it is assigned some existing buckets. This naively attempts to maintain a balanced workload. + +The main aim of this strategy is to decrease the likelihood of concurrency and ordering issues while maintaining load balancing. This is **not a guarantee**, and you should handle the usual ordering and concurrency issues. + +## Updating a subscription group + +You can edit the settings of an existing subscription group while it is running, you don't need to delete and recreate it to change settings. When you update the subscription group, it resets itself internally, dropping the connections and having them reconnect. You must have admin permissions to update a persistent subscription group. + +@[code{update-persistent-subscription}](@grpc:persistent_subscriptions.py) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to update. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +## Persistent subscription settings + +Both the `Create` and `Update` methods take some settings for configuring the persistent subscription. + +The following table shows the configuration options you can set on a persistent subscription. + +| Option | Description | Default | +|:------------------------|:----------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------| +| `ResolveLinkTos` | Whether the subscription should resolve link events to their linked events. | `false` | +| `StartFrom` | The exclusive position in the stream or transaction file the subscription should start from. | `null` (start from the end of the stream) | +| `ExtraStatistics` | Whether to track latency statistics on this subscription. | `false` | +| `MessageTimeout` | The amount of time after which to consider a message as timed out and retried. | `30` (seconds) | +| `MaxRetryCount` | The maximum number of retries (due to timeout) before a message is considered to be parked. | `10` | +| `LiveBufferSize` | The size of the buffer (in-memory) listening to live messages as they happen before paging occurs. | `500` | +| `ReadBatchSize` | The number of events read at a time when paging through history. | `20` | +| `HistoryBufferSize` | The number of events to cache when paging through history. | `500` | +| `CheckPointAfter` | The amount of time to try to checkpoint after. | `2` seconds | +| `MinCheckPointCount` | The minimum number of messages to process before a checkpoint may be written. | `10` | +| `MaxCheckPointCount` | The maximum number of messages not checkpointed before forcing a checkpoint. | `1000` | +| `MaxSubscriberCount` | The maximum number of subscribers allowed. | `0` (unbounded) | +| `NamedConsumerStrategy` | The strategy to use for distributing events to client consumers. See the [consumer strategies](#consumer-strategies) in this doc. | `RoundRobin` | + +## Deleting a subscription group + +Remove a subscription group with the delete operation. Like the creation of groups, you rarely do this in your runtime code and is undertaken by an administrator running a script. + +@[code{delete-persistent-subscription}](@grpc:persistent_subscriptions.py) + +| Parameter | Description | +|:--------------|:-----------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to delete. | +| `credentials` | The user credentials to use for this operation | diff --git a/docs/clients/grpc/python/projections.md b/docs/clients/grpc/python/projections.md new file mode 100644 index 00000000..eea06c81 --- /dev/null +++ b/docs/clients/grpc/python/projections.md @@ -0,0 +1,179 @@ +--- +order: 6 +title: Projections +head: + - - title + - {} + - Projections | Python | Clients | Kurrent Docs +--- + +# Projection Management + +The various gRPC client APIs include dedicated clients that allow you to manage projections. + +For a detailed explanation of projections, see the [server documentation](@server/features/projections/README.md). + +You can find the full sample code from this documentation page in the respective [clients repositories](https://github.com/kurrent-io/?q=client). + +## Creating a client + +Projection management operations are exposed through a dedicated client. + +@[code{createClient}](@grpc:quickstart.py) + +## Create a projection + +Creates a projection that runs until the last event in the store, and then continues processing new events as they are appended to the store. The query parameter contains the JavaScript you want created as a projection. +Projections have explicit names, and you can enable or disable them via this name. + +@[code{CreateContinuous}](@grpc:projections.py) + +Trying to create projections with the same name will result in an error: + +@[code{CreateContinuous_Conflict}](@grpc:projections.py) + +## Restart the subsystem + +It is possible to restart the entire projection subsystem using the projections management client API. The user must be in the `$ops` or `$admin` group to perform this operation. + +@[code{RestartSubSystem}](@grpc:projections.py) + +## Enable a projection + +Enables an existing projection by name. +Once enabled, the projection will start to process events even after restarting the server or the projection subsystem. +You must have access to a projection to enable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Enable}](@grpc:projections.py) + +You can only enable an existing projection. When you try to enable a non-existing projection, you'll get an error: + +@[code{EnableNotFound}](@grpc:projections.py) + +## Disable a projection + +Disables a projection, this will save the projection checkpoint. +Once disabled, the projection will not process events even after restarting the server or the projection subsystem. +You must have access to a projection to disable it, see the [ACL documentation](@server/security/user-authorization.md). + +@[code{Disable}](@grpc:projections.py) + +You can only disable an existing projection. When you try to disable a non-existing projection, you'll get an error: + +@[code{DisableNotFound}](@grpc:projections.py) + +## Delete a projection + +Deletes an existing projection. You must disable the projection before deleting it, running projections cannot be deleted. Deleting a projection includes deleting the checkpoint and the emitted streams. + +@[code{Delete}](@grpc:projections.py) + +You can only delete an existing projection. When you try to delete a non-existing projection, you'll get an error: + +@[code{DeleteNotFound}](@grpc:projections.py) + +## Abort a projection + +Aborts a projection, this will not save the projection's checkpoint. + +@[code{Abort}](@grpc:projections.py) + +You can only abort an existing projection. When you try to abort a non-existing projection, you'll get an error: + +@[code{Abort_NotFound}](@grpc:projections.py) + +## Reset a projection + +Resets a projection, which causes deleting the projection checkpoint. This will force the projection to start afresh and re-emit events. Streams that are written to from the projection will also be soft-deleted. + +@[code{Reset}](@grpc:projections.py) + +Resetting a projection that does not exist will result in an error. + +@[code{Reset_NotFound}](@grpc:projections.py) + +## Update a projection + +Updates a projection with a given name. The query parameter contains the new JavaScript. Updating system projections using this operation is not supported at the moment. + +@[code{Update}](@grpc:projections.py) + +You can only update an existing projection. When you try to update a non-existing projection, you'll get an error: + +@[code{Update_NotFound}](@grpc:projections.py) + +## List all projections + +Returns a list of all projections, user defined & system projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListAll}](@grpc:projections.py) + +## List continuous projections + +Returns a list of all continuous projections. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{ListContinuous}](@grpc:projections.py) + +## Get status + +Gets the status of a named projection. +See the [projection details](#projection-details) section for an explanation of the returned values. + +@[code{GetStatus}](@grpc:projections.py) + +## Get state + +Retrieves the state of a projection. + +@[code{GetState}](@grpc:projections.py) + +## Get result + +Retrieves the result of the named projection and partition. + +@[code{GetResult}](@grpc:projections.py) + +## Projection Details + +[List all](#list-all-projections), [list continuous](#list-continuous-projections) and [get status](#get-status) all return the details and statistics of projections + +| Field | Description | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `Name`, `EffectiveName` | The name of the projection | +| `Status` | A human readable string of the current statuses of the projection (see below) | +| `StateReason` | A human readable string explaining the reason of the current projection state | +| `CheckpointStatus` | A human readable string explaining the current operation performed on the checkpoint : `requested`, `writing` | +| `Mode` | `Continuous`, `OneTime` , `Transient` | +| `CoreProcessingTime` | The total time, in ms, the projection took to handle events since the last restart | +| `Progress` | The progress, in %, indicates how far this projection has processed event, in case of a restart this could be -1% or some number. It will be updated as soon as a new event is appended and processed | +| `WritesInProgress` | The number of write requests to emitted streams currently in progress, these writes can be batches of events | +| `ReadsInProgress` | The number of read requests currently in progress | +| `PartitionsCached` | The number of cached projection partitions | +| `Position` | The Position of the last processed event | +| `LastCheckpoint` | The Position of the last checkpoint of this projection | +| `EventsProcessedAfterRestart` | The number of events processed since the last restart of this projection | +| `BufferedEvents` | The number of events in the projection read buffer | +| `WritePendingEventsBeforeCheckpoint` | The number of events waiting to be appended to emitted streams before the pending checkpoint can be written | +| `WritePendingEventsAfterCheckpoint` | The number of events to be appended to emitted streams since the last checkpoint | +| `Version` | This is used internally, the version is increased when the projection is edited or reset | +| `Epoch` | This is used internally, the epoch is increased when the projection is reset | + +The `Status` string is a combination of the following values. +The first 3 are the most common one, as the other one are transient values while the projection is initialised or stopped + +| Value | Description | +|--------------------|-------------------------------------------------------------------------------------------------------------------------| +| Running | The projection is running and processing events | +| Stopped | The projection is stopped and is no longer processing new events | +| Faulted | An error occurred in the projection, `StateReason` will give the fault details, the projection is not processing events | +| Initial | This is the initial state, before the projection is fully initialised | +| Suspended | The projection is suspended and will not process events, this happens while stopping the projection | +| LoadStateRequested | The state of the projection is being retrieved, this happens while the projection is starting | +| StateLoaded | The state of the projection is loaded, this happens while the projection is starting | +| Subscribed | The projection has successfully subscribed to its readers, this happens while the projection is starting | +| FaultedStopping | This happens before the projection is stopped due to an error in the projection | +| Stopping | The projection is being stopped | +| CompletingPhase | This happens while the projection is stopping | +| PhaseCompleted | This happens while the projection is stopping | diff --git a/docs/clients/grpc/python/reading-events.md b/docs/clients/grpc/python/reading-events.md new file mode 100644 index 00000000..cfb56974 --- /dev/null +++ b/docs/clients/grpc/python/reading-events.md @@ -0,0 +1,135 @@ +--- +order: 3 +head: + - - title + - {} + - Reading Events | Python | Clients | Kurrent Docs +--- + +# Reading Events + +There are two options for reading events from KurrentDB. You can either: + 1. Read from an individual stream, or + 2. Read from the `$all` stream, which will return all events in the store. + +Each event in KurrentDB belongs to an individual stream. When reading events, pick the name of the stream from which you want to read the events and choose whether to read the stream forwards or backwards. + +All events have a `StreamPosition` and a `Position`. `StreamPosition` is a *big int* (unsigned 64-bit integer) and represents the place of the event in the stream. `Position` is the event's logical position, and is represented by `CommitPosition` and a `PreparePosition`. Note that when reading events you will supply a different "position" depending on whether you are reading from an individual stream or the `$all` stream. + +:::tip +Check [connecting to KurrentDB instructions](getting-started.md#required-packages) to learn how to configure and use the client SDK. +::: + +## Reading from a stream + +You can read all the events or a sample of the events from individual streams, starting from any position in the stream, and can read either forward or backward. It is only possible to read events from a single stream at a time. You can read events from the global event log, which spans across streams. Learn more about this process in the [Read from `$all`](#reading-from-the-all-stream) section below. + +### Reading forwards + +The simplest way to read a stream forwards is to supply a stream name, read direction, and revision from which to start. The revision can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-stream}](@grpc:reading_events.py) + +This will return an enumerable that can be iterated on: + +@[code{iterate-stream}](@grpc:reading_events.py) + +There are a number of additional arguments you can provide when reading a stream, listed below. + +#### maxCount + +Passing in the max count will limit the number of events returned. + +#### resolveLinkTos + +When using projections to create new events, you can set whether the generated events are pointers to existing events. Setting this value to `true` tells KurrentDB to return the event as well as the event linking to it. + +#### configureOperationOptions + +You can use the `configureOperationOptions` argument to provide a function that will customise settings for each operation. + +#### userCredentials + +The `userCredentials` argument is optional. It is used to override the default credentials specified when creating the client instance. + +@[code{overriding-user-credentials}](@grpc:reading_events.py) + +### Reading from a revision + +Instead of providing the `StreamPosition` you can also provide a specific stream revision as a *big int* (unsigned 64-bit integer). + +@[code{read-from-stream-position}](@grpc:reading_events.py) + +### Reading backwards + +In addition to reading a stream forwards, streams can be read backwards. To read all the events backwards, set the *stream position* to the end: + +@[code{reading-backwards}](@grpc:reading_events.py) + +:::tip +Read one event backwards to find the last position in the stream. +::: + +### Checking if the stream exists + +Reading a stream returns a `ReadStreamResult`, which contains a property `ReadState`. This property can have the value `StreamNotFound` or `Ok`. + +It is important to check the value of this field before attempting to iterate an empty stream, as it will throw an exception. + +For example: + +@[code{checking-for-stream-presence}](@grpc:reading_events.py) + +## Reading from the $all stream + +Reading from the `$all` stream is similar to reading from an individual stream, but please note there are differences. One significant difference is the need to provide admin user account credentials to read from the `$all` stream. Additionally, you need to provide a transaction log position instead of a stream revision when reading from the `$all` stream. + +### Reading forwards + +The simplest way to read the `$all` stream forwards is to supply a read direction and the transaction log position from which you want to start. The transaction log postion can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-all-stream}](@grpc:reading_events.py) + +You can iterate asynchronously through the result: + +@[code{read-from-all-stream-iterate}](@grpc:reading_events.py) + +There are a number of additional arguments you can provide when reading the `$all` stream. + +#### maxCount + +Passing in the max count allows you to limit the number of events that returned. + +#### resolveLinkTos + +When using projections to create new events you can set whether the generated events are pointers to existing events. Setting this value to true will tell KurrentDB to return the event as well as the event linking to it. + +@[code{read-from-all-stream-resolving-link-Tos}](@grpc:reading_events.py) + +#### configureOperationOptions + +This argument is generic setting class for all operations that can be set on all operations executed against KurrentDB. + +#### userCredentials +The credentials used to read the data can be used by the subscription as follows. This will override the default credentials set on the connection. + +@[code{read-all-overriding-user-credentials}](@grpc:reading_events.py) + +### Reading backwards + +In addition to reading the `$all` stream forwards, it can be read backwards. To read all the events backwards, set the *position* to the end: + +@[code{read-from-all-stream-backwards}](@grpc:reading_events.py) + +:::tip +Read one event backwards to find the last position in the `$all` stream. +::: + +### Handling system events + +KurrentDB will also return system events when reading from the `$all` stream. In most cases you can ignore these events. + +All system events begin with `$` or `$$` and can be easily ignored by checking the `EventType` property. + +@[code{ignore-system-events}](@grpc:reading_events.py) + diff --git a/docs/clients/grpc/python/release-notes.md b/docs/clients/grpc/python/release-notes.md new file mode 100644 index 00000000..45ea0759 --- /dev/null +++ b/docs/clients/grpc/python/release-notes.md @@ -0,0 +1,11 @@ +--- +order: 10 +head: + - - title + - {} + - Release Notes | Python | Clients | Kurrent Docs +--- + +# Release Notes + +To stay up to date with the latest changes and improvements, including release history, changelogs, upgrade instructions, and migration guides, see the [KurrentDB Python client release notes](https://github.com/pyeventsourcing/kurrentdbclient/releases). diff --git a/docs/clients/grpc/python/subscriptions.md b/docs/clients/grpc/python/subscriptions.md new file mode 100644 index 00000000..b0080214 --- /dev/null +++ b/docs/clients/grpc/python/subscriptions.md @@ -0,0 +1,234 @@ +--- +order: 4 +head: + - - title + - {} + - Catch-up Subscriptions | Python | Clients | Kurrent Docs +--- + +# Catch-up Subscriptions + +Subscriptions allow you to subscribe to a stream and receive notifications about new events added to the stream. + +You provide an event handler and an optional starting point to the subscription. The handler is called for each event from the starting point onward. + +If events already exist, the handler will be called for each event one by one until it reaches the end of the stream. The server will then notify the handler whenever a new event appears. + +:::tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Subscribing from the start + +If you need to process all the events in the store, including historical events, you'll need to subscribe from the beginning. You can either subscribe to receive events from a single stream or subscribe to `$all` if you need to process all events in the database. + +### Subscribing to a stream + +The simplest stream subscription looks like the following : + +@[code{subscribe-to-stream}](@grpc:subscribing_to_stream.py) + +The provided handler will be called for every event in the stream. + +When you subscribe to a stream with link events, for example the `$ce` category stream, you need to set `resolveLinkTos` to `true`. Read more about it [below](#resolving-link-to-s). + +### Subscribing to `$all` + +Subscribing to `$all` is similar to subscribing to a single stream. The handler will be called for every event appended after the starting position. + +@[code{subscribe-to-all}](@grpc:subscribing_to_stream.py) + +## Subscribing from a specific position + +The previous examples subscribed to the stream from the beginning. That subscription invoked the handler for every event in the stream before waiting for new events. + +Both stream and $all subscriptions accept a starting position if you want to read from a specific point onward. If events already exist at the position you subscribe to, they will be read on the server side and sent to the subscription. + +Once caught up, the server will push any new events received on the streams to the client. There is no difference between catching up and live on the client side. + +::: warning +The positions provided to the subscriptions are exclusive. You will only receive the next event after the subscribed position. +::: + +### Subscribing to a stream + +To subscribe to a stream from a specific position, you must provide a *stream position*. This can be `Start`, `End` or a *big int* (unsigned 64 bit integer) position. + +The following subscribes to the stream `some-stream` at position `20`, this means that events `21` and onward will be handled: + +@[code{subscribe-to-stream-from-position}](@grpc:subscribing_to_stream.py) + +### Subscribing to $all + +Subscribing to the `$all` stream is similar to subscribing to a regular stream. The difference is how to specify the starting position. For the `$all` stream, provide a `Position` structure that consists of two big integers: the prepare and commit positions. Use `Start`, `End`, or create a `Position` from specific commit and prepare values. + +The corresponding `$all` subscription will subscribe from the event after the one at commit position `1056` and prepare position `1056`. + +Please note that this position will need to be a legitimate position in `$all`. + +@[code{subscribe-to-all-from-position}](@grpc:subscribing_to_stream.py) + +## Subscribing to a stream for live updates + +You can subscribe to a stream to get live updates by subscribing to the end of the stream: + +@[code{subscribe-to-stream-live}](@grpc:subscribing_to_stream.py) + +And the same works with `$all` : + +@[code{subscribe-to-all-live}](@grpc:subscribing_to_stream.py) + +This will not read through the history of the stream but will notify the handler when a new event appears in the respective stream. + +Keep in mind that when you subscribe to a stream from a specific position, as described [above](#subscribing-from-a-specific-position), you will also get live updates after your subscription catches up (processes all the historical events). + +## Resolving link-to events + +Link-to events point to events in other streams in KurrentDB. These are generally created by projections such as the `$by_event_type` projection which links events of the same event type into the same stream. This makes it easier to look up all events of a specific type. + +::: tip +[Filtered subscriptions](subscriptions.md#server-side-filtering) make it easier and faster to subscribe to all events of a specific type or matching a prefix. +::: + +When reading a stream you can specify whether to resolve link-to's. By default, link-to events are not resolved. You can change this behaviour by setting the `resolveLinkTos` parameter to `true`: + +@[code{subscribe-to-stream-resolving-linktos}](@grpc:subscribing_to_stream.py) + +## Dropped subscriptions + +When a subscription stops or experiences an error, it will be dropped. The subscription provides a `subscriptionDropped` callback, which will get called when the subscription breaks. + +The `subscriptionDropped` callback allows you to inspect the reason why the subscription dropped, as well as any exceptions that occurred. + +The possible reasons for a subscription to drop are: + +| Reason | Why it might happen | +|:------------------|:---------------------------------------------------------------------------------------------------------------------| +| `Disposed` | The client canceled or disposed of the subscription. | +| `SubscriberError` | An error occurred while handling an event in the subscription handler. | +| `ServerError` | An error occurred on the server, and the server closed the subscription. Check the server logs for more information. | + +Bear in mind that a subscription can also drop because it is slow. The server tried to push all the live events to the subscription when it is in the live processing mode. If the subscription gets the reading buffer overflow and won't be able to acknowledge the buffer, it will break. + +### Handling subscription drops + +An application, which hosts the subscription, can go offline for some time for different reasons. It could be a crash, infrastructure failure, or a new version deployment. As you rarely would want to reprocess all the events again, you'd need to store the current position of the subscription somewhere, and then use it to restore the subscription from the point where it dropped off: + +@[code{subscribe-to-stream-subscription-dropped}](@grpc:subscribing_to_stream.py) + +When subscribed to `$all` you want to keep the event's position in the `$all` stream. As mentioned previously, the `$all` stream position consists of two big integers (prepare and commit positions), not one: + +@[code{subscribe-to-all-subscription-dropped}](@grpc:subscribing_to_stream.py) + +## User credentials + +The user creating a subscription must have read access to the stream it's subscribing to, and only admin users may subscribe to `$all` or create filtered subscriptions. + +The code below shows how you can provide user credentials for a subscription. When you specify subscription credentials explicitly, it will override the default credentials set for the client. If you don't specify any credentials, the client will use the credentials specified for the client, if you specified those. + +@[code{overriding-user-credentials}](@grpc:subscribing_to_stream.py) + +## Server-side filtering + +KurrentDB allows you to filter the events whilst subscribing to the `$all` stream to only receive the events you care about. + +You can filter by event type or stream name using a regular expression or a prefix. Server-side filtering is currently only available on the `$all` stream. + +::: tip +Server-side filtering was introduced as a simpler alternative to projections. You should consider filtering before creating a projection to include the events you care about. +::: + +A simple stream prefix filter looks like this: + +@[code{stream-prefix-filtered-subscription}](@grpc:subscribing_to_stream.py) + +The filtering API is described more in-depth in the [filtering section](subscriptions.md#server-side-filtering). + +### Filtering out system events + +There are events in KurrentDB called system events. These are prefixed with a `$` and under most circumstances you won't care about these. They can be filtered out by passing in a `SubscriptionFilterOptions` when subscribing to the `$all` stream. + +@[code{exclude-system}](@grpc:server_side_filtering.py) + +::: tip +`$stats` events are no longer stored in KurrentDB by default so there won't be as many `$` events as before. +::: + +### Filtering by event type + +If you only want to subscribe to events of a given type, there are two options. You can either use a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `EventTypeFilter.Prefix`. + +@[code{event-type-prefix}](@grpc:server_side_filtering.py) + +This will only subscribe to events with a type that begin with `customer-`. + +#### Filtering by regular expression + +It might be advantageous to provide a regular expression when you want to subscribe to multiple event types. + +@[code{event-type-regex}](@grpc:server_side_filtering.py) + +This will subscribe to any event that begins with `user` or `company`. + +### Filtering by stream name + +To subscribe to a stream by name, choose either a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `StreamFilter.Prefix`. + +@[code{stream-prefix}](@grpc:server_side_filtering.py) + +This will only subscribe to all streams with a name that begins with `user-`. + +#### Filtering by regular expression + +To subscribe to multiple streams, use a regular expression. + +@[code{stream-regex}](@grpc:server_side_filtering.py) + +This will subscribe to any stream with a name that begins with `account` or `savings`. + +## Checkpointing + +When a catch-up subscription is used to process an `$all` stream containing many events, the last thing you want is for your application to crash midway, forcing you to restart from the beginning. + +### What is a checkpoint? + +A checkpoint is the position of an event in the `$all` stream to which your application has processed. By saving this position to a persistent store (e.g., a database), it allows your catch-up subscription to: +- Recover from crashes by reading the checkpoint and resuming from that position +- Avoid reprocessing all events from the start + +To create a checkpoint, store the event's commit or prepare position. + +::: warning +If your database contains events created by the legacy TCP client using the [transaction feature](https://docs.kurrent.io/clients/tcp/dotnet/21.2/appending.html#transactions), you should store both the commit and prepare positions together as your checkpoint. +::: + +### Updating checkpoints at regular intervals +The client SDK provides a way to notify your application after processing a configurable number of events. This allows you to periodically save a checkpoint at regular intervals. + +@[code{checkpoint}](@grpc:server_side_filtering.py) + +By default, the checkpoint notification is sent after every 32 non-system events processed from $all. + +### Configuring the checkpoint interval +You can adjust the checkpoint interval to change how often the client is notified. + +@[code{checkpoint-with-interval}](@grpc:server_side_filtering.py) + +By configuring this parameter, you can balance between reducing checkpoint overhead and ensuring quick recovery in case of a failure. + +::: info +The checkpoint interval parameter configures the database to notify the client after `n` * 32 number of events where `n` is defined by the parameter. + +For example: +- If `n` = 1, a checkpoint notification is sent every 32 events. +- If `n` = 2, the notification is sent every 64 events. +- If `n` = 3, it is sent every 96 events, and so on. +::: diff --git a/docs/clients/grpc/reading-events.md b/docs/clients/grpc/reading-events.md index 4fcf8da1..8683069f 100644 --- a/docs/clients/grpc/reading-events.md +++ b/docs/clients/grpc/reading-events.md @@ -1,131 +1,21 @@ --- -order: 3 +sitemap: + priority: 0 + changefreq: monthly --- -# Reading events +# Reading Events -There are two options for reading events from KurrentDB. You can either: - 1. Read from an individual stream, or - 2. Read from the `$all` stream, which will return all events in the store. +To redirect you to the right page, please select a client: -Each event in KurrentDB belongs to an individual stream. When reading events, pick the name of the stream from which you want to read the events and choose whether to read the stream forwards or backwards. +.NET [.NET](/clients/grpc/dotnet/reading-events.md) -All events have a `StreamPosition` and a `Position`. `StreamPosition` is a *big int* (unsigned 64-bit integer) and represents the place of the event in the stream. `Position` is the event's logical position, and is represented by `CommitPosition` and a `PreparePosition`. Note that when reading events you will supply a different "position" depending on whether you are reading from an individual stream or the `$all` stream. +Python [Python](/clients/grpc/python/reading-events.md) -:::tip -Check [connecting to KurrentDB instructions](getting-started.md#required-packages) to learn how to configure and use the client SDK. -::: +Node.js [Node.js](/clients/grpc/nodejs/reading-events.md) -## Reading from a stream +Java [Java](/clients/grpc/java/reading-events.md) -You can read all the events or a sample of the events from individual streams, starting from any position in the stream, and can read either forward or backward. It is only possible to read events from a single stream at a time. You can read events from the global event log, which spans across streams. Learn more about this process in the [Read from `$all`](#reading-from-the-all-stream) section below. - -### Reading forwards - -The simplest way to read a stream forwards is to supply a stream name, read direction, and revision from which to start. The revision can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): - -@[code{read-from-stream}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -This will return an enumerable that can be iterated on: - -@[code{iterate-stream}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -There are a number of additional arguments you can provide when reading a stream, listed below. - -#### maxCount - -Passing in the max count will limit the number of events returned. - -#### resolveLinkTos - -When using projections to create new events, you can set whether the generated events are pointers to existing events. Setting this value to `true` tells KurrentDB to return the event as well as the event linking to it. - -#### configureOperationOptions - -You can use the `configureOperationOptions` argument to provide a function that will customise settings for each operation. - -#### userCredentials - -The `userCredentials` argument is optional. It is used to override the default credentials specified when creating the client instance. - -@[code{overriding-user-credentials}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -### Reading from a revision - -Instead of providing the `StreamPosition` you can also provide a specific stream revision as a *big int* (unsigned 64-bit integer). - -@[code{read-from-stream-position}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -### Reading backwards - -In addition to reading a stream forwards, streams can be read backwards. To read all the events backwards, set the *stream position* to the end: - -@[code{reading-backwards}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -:::tip -Read one event backwards to find the last position in the stream. -::: - -### Checking if the stream exists - -Reading a stream returns a `ReadStreamResult`, which contains a property `ReadState`. This property can have the value `StreamNotFound` or `Ok`. - -It is important to check the value of this field before attempting to iterate an empty stream, as it will throw an exception. - -For example: - -@[code{checking-for-stream-presence}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -## Reading from the $all stream - -Reading from the `$all` stream is similar to reading from an individual stream, but please note there are differences. One significant difference is the need to provide admin user account credentials to read from the `$all` stream. Additionally, you need to provide a transaction log position instead of a stream revision when reading from the `$all` stream. - -### Reading forwards - -The simplest way to read the `$all` stream forwards is to supply a read direction and the transaction log position from which you want to start. The transaction log postion can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): - -@[code{read-from-all-stream}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -You can iterate asynchronously through the result: - -@[code{read-from-all-stream-iterate}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -There are a number of additional arguments you can provide when reading the `$all` stream. - -#### maxCount - -Passing in the max count allows you to limit the number of events that returned. - -#### resolveLinkTos - -When using projections to create new events you can set whether the generated events are pointers to existing events. Setting this value to true will tell KurrentDB to return the event as well as the event linking to it. - -@[code{read-from-all-stream-resolving-link-Tos}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -#### configureOperationOptions - -This argument is generic setting class for all operations that can be set on all operations executed against KurrentDB. - -#### userCredentials -The credentials used to read the data can be used by the subscription as follows. This will override the default credentials set on the connection. - -@[code{read-all-overriding-user-credentials}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -### Reading backwards - -In addition to reading the `$all` stream forwards, it can be read backwards. To read all the events backwards, set the *position* to the end: - -@[code{read-from-all-stream-backwards}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) - -:::tip -Read one event backwards to find the last position in the `$all` stream. -::: - -### Handling system events - -KurrentDB will also return system events when reading from the `$all` stream. In most cases you can ignore these events. - -All system events begin with `$` or `$$` and can be easily ignored by checking the `EventType` property. - -@[code{ignore-system-events}](@grpc:reading_events.py;reading-events.js;reading-events.ts;reading_events/ReadingEvents.java;reading-events/Program.cs;readingEvents.go;reading_events.rs) +Go [Go](/clients/grpc/go/reading-events.md) +Rust [Rust](/clients/grpc/rust/reading-events.md) \ No newline at end of file diff --git a/docs/clients/grpc/release-notes.md b/docs/clients/grpc/release-notes.md index 9cf1921a..7108d64d 100644 --- a/docs/clients/grpc/release-notes.md +++ b/docs/clients/grpc/release-notes.md @@ -1,16 +1,21 @@ --- -order: 10 +sitemap: + priority: 0 + changefreq: monthly --- # Release Notes -Click a client below to stay up to date with its latest changes and improvements, view its detailed release history and changelogs, and access the release notes for breaking changes, upgrade instructions, and migration guides. +To redirect you to the right page, please select a client: -| Client | Release Notes | -|---------------|------------------------------------------------------------------------------| -| Node.js | [View Releases](https://github.com/kurrent-io/KurrentDB-Client-NodeJS/releases) | -| .NET | [View Releases](https://github.com/kurrent-io/KurrentDB-Client-Dotnet/releases) | -| Java | [View Releases](https://github.com/kurrent-io/KurrentDB-Client-Java/releases) | -| Go | [View Releases](https://github.com/kurrent-io/KurrentDB-Client-Go/releases) | -| Rust | [View Releases](https://github.com/kurrent-io/KurrentDB-Client-Rust/releases) | -| Python | [View Releases](https://github.com/pyeventsourcing/kurrentdbclient/releases) | +.NET [.NET](/clients/grpc/dotnet/release-notes.md) + +Python [Python](/clients/grpc/python/release-notes.md) + +Node.js [Node.js](/clients/grpc/nodejs/release-notes.md) + +Java [Java](/clients/grpc/java/release-notes.md) + +Go [Go](/clients/grpc/go/release-notes.md) + +Rust [Rust](/clients/grpc/rust/release-notes.md) \ No newline at end of file diff --git a/docs/clients/grpc/rust/README.md b/docs/clients/grpc/rust/README.md new file mode 100644 index 00000000..a654c32d --- /dev/null +++ b/docs/clients/grpc/rust/README.md @@ -0,0 +1,9 @@ +--- +index: false +--- + +# Rust + +Learn how to use the KurrentDB Rust client library to interact with the database. + + \ No newline at end of file diff --git a/docs/clients/grpc/rust/appending-events.md b/docs/clients/grpc/rust/appending-events.md new file mode 100644 index 00000000..31c7599c --- /dev/null +++ b/docs/clients/grpc/rust/appending-events.md @@ -0,0 +1,87 @@ +--- +order: 2 +head: + - - title + - {} + - Appending Events | Rust | Clients | Kurrent Docs +--- + +# Appending Events + +When you start working with KurrentDB, it is empty. The first meaningful operation is to add one or more events to the database using one of the available client SDKs. + +::: tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Append your first event + +The simplest way to append an event to KurrentDB is to create an `EventData` object and call `AppendToStream` method. + +@[code{append-to-stream}](@grpc:appending_events.rs) + +`AppendToStream` takes a collection of `EventData`, which allows you to save more than one event in a single batch. + +Outside the example above, other options exist for dealing with different scenarios. + +::: tip +If you are new to Event Sourcing, please study the [Handling concurrency](#handling-concurrency) section below. +::: + +## Working with EventData + +Events appended to KurrentDB must be wrapped in an `EventData` object. This allows you to specify the event's content, the type of event, and whether it's in JSON format. In its simplest form, you need three arguments: **eventId**, **type**, and **data**. + +### eventId + +This takes the format of a `Uuid` and is used to uniquely identify the event you are trying to append. If two events with the same `Uuid` are appended to the same stream in quick succession, KurrentDB will only append one of the events to the stream. + +For example, the following code will only append a single event: + +@[code{append-duplicate-event}](@grpc:appending_events.rs) + +![Duplicate Event](../images/duplicate-event.png) + +### type + +Each event should be supplied with an event type. This unique string is used to identify the type of event you are saving. + +It is common to see the explicit event code type name used as the type as it makes serialising and de-serialising of the event easy. However, we recommend against this as it couples the storage to the type and will make it more difficult if you need to version the event at a later date. + +### data + +Representation of your event data. It is recommended that you store your events as JSON objects. This allows you to take advantage of all of KurrentDB's functionality, such as projections. That said, you can save events using whatever format suits your workflow. Eventually, the data will be stored as encoded bytes. + +### metadata + +Storing additional information alongside your event that is part of the event itself is standard practice. This can be correlation IDs, timestamps, access information, etc. KurrentDB allows you to store a separate byte array containing this information to keep it separate. + +### isJson + +Simple boolean field to tell KurrentDB if the event is stored as json, true by default. + +## Handling concurrency + +When appending events to a stream, you can supply a *stream state* or *stream revision*. Your client uses this to inform KurrentDB of the state or version you expect the stream to be in when appending an event. If the stream isn't in that state, an exception will be thrown. + +For example, if you try to append the same record twice, expecting both times that the stream doesn't exist, you will get an exception on the second: + +@[code{append-with-no-stream}](@grpc:appending_events.rs) + +There are three available stream states: +- `Any` +- `NoStream` +- `StreamExists` + +This check can be used to implement optimistic concurrency. When retrieving a stream from KurrentDB, note the current version number. When you save it back, you can determine if somebody else has modified the record in the meantime. + +@[code{append-with-concurrency-check}](@grpc:appending_events.rs) + + + +## User credentials + +You can provide user credentials to append the data as follows. This will override the default credentials set on the connection. + +@[code{overriding-user-credentials}](@grpc:appending_events.rs) + diff --git a/docs/clients/grpc/rust/authentication.md b/docs/clients/grpc/rust/authentication.md new file mode 100644 index 00000000..a4abfa37 --- /dev/null +++ b/docs/clients/grpc/rust/authentication.md @@ -0,0 +1,61 @@ +--- +title: Authentication +order: 7 +head: + - - title + - {} + - Authentication | Rust | Clients | Kurrent Docs +--- + +## Client x.509 certificate + +X.509 certificates are digital certificates that use the X.509 public key infrastructure (PKI) standard to verify the identity of clients and servers. They play a crucial role in establishing a secure connection by providing a way to authenticate identities and establish trust. + +### Prerequisites + +1. KurrentDB 25.0 or greater, or EventStoreDB 24.10. +2. A commercial license with the User Certificates entitlement. +3. A valid x.509 certificate, which can be created using version `1.3` or higher of the [gencert tool](https://github.com/kurrent-io/es-gencert-cli). +4. The server must run in secure mode. See [Security Options](@server/security/protocol-security.md) for more information. +5. [Enable User Certificates plugin on the server](@server/security/user-authentication.md#user-x509-certificates) + +#### Generate user certificates + +The following command uses the [gencert tool](https://github.com/kurrent-io/es-gencert-cli) to generate a user certificate for the user `admin` that will expire in 10 days: + +::: tabs#os +@tab bash +```bash +./es-gencert-cli create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +@tab PowerShell +```powershell +.\es-gencert-cli.exe create-user -username admin -days 10 -ca-certificate ./es-ca/ca.crt -ca-key ./es-ca/ca.key +``` +::: + +### Connect to KurrentDB using an x.509 certificate + +To connect to KurrentDB using an x.509 certificate, you need to provide the +certificate and the private key to the client. If both username/password and +certificate authentication data are supplied, the client prioritizes user +credentials for authentication. The client will throw an error if the +certificate and the key are not both provided. + +::: tip +Please note that currently, password-protected private key files are not supported. +::: + +The client supports the following parameters: + +| Parameter | Description | +|----------------|--------------------------------------------------------------------------------| +| `userCertFile` | The file containing the X.509 user certificate in PEM format. | +| `userKeyFile` | The file containing the user certificate’s matching private key in PEM format. | + + +To authenticate, include these two parameters in your connection string or constructor when initializing the client. + +Check the samples for the following clients: + +@[code{client-with-user-certificates}](@grpc:/user_certificates.rs) \ No newline at end of file diff --git a/docs/clients/grpc/rust/delete-stream.md b/docs/clients/grpc/rust/delete-stream.md new file mode 100644 index 00000000..dee345a3 --- /dev/null +++ b/docs/clients/grpc/rust/delete-stream.md @@ -0,0 +1,44 @@ +--- +order: 9 +head: + - - title + - {} + - Deleting Events | Rust | Clients | Kurrent Docs +--- + +# Deleting Events + +In KurrentDB, you can delete events and streams either partially or completely. Settings like $maxAge and $maxCount help control how long events are kept or how many events are stored in a stream, but they won't delete the entire stream. +When you need to fully remove a stream, KurrentDB offers two options: Soft Delete and Hard Delete. + +## Soft delete + +Soft delete in KurrentDB allows you to mark a stream for deletion without completely removing it, so you can still add new events later. While you can do this through the UI, using code is often better for automating the process, +handling many streams at once, or including custom rules. Code is especially helpful for large-scale deletions or when you need to integrate soft deletes into other workflows. + + +```rust +let options = DeleteStreamOptions::default(); +client + .delete_stream(stream_name, &options) + .await?; +``` + +::: note +Clicking the delete button in the UI performs a soft delete, +setting the TruncateBefore value to remove all events up to a certain point. +While this marks the events for deletion, actual removal occurs during the next scavenging process. +The stream can still be reopened by appending new events. +::: + +## Hard delete + +Hard delete in KurrentDB permanently removes a stream and its events. While you can use the HTTP API, code is often better for automating the process, managing multiple streams, and ensuring precise control. Code is especially useful when you need to integrate hard delete into larger workflows or apply specific conditions. Note that when a stream is hard deleted, you cannot reuse the stream name, it will raise an exception if you try to append to it again. + + +```rust +let options = TombstoneStreamOptions::default(); +client + .tombstone_stream(stream_name, &options) + .await?; +``` diff --git a/docs/clients/grpc/rust/getting-started.md b/docs/clients/grpc/rust/getting-started.md new file mode 100644 index 00000000..45c90b29 --- /dev/null +++ b/docs/clients/grpc/rust/getting-started.md @@ -0,0 +1,102 @@ +--- +order: 1 +head: + - - title + - {} + - Getting Started | Rust | Clients | Kurrent Docs +--- + +# Getting Started + +Get started by connecting your application to KurrentDB. + +## Connecting to KurrentDB + +To connect your application to KurrentDB, instantiate and configure the client. + +::: tip Insecure clusters +All our GRPC clients are secure by default and must be configured to connect to an insecure server via [a connection string](#connection-string) or the client's configuration. +::: + +### Required packages + +No additional configuration is needed having Rust installed. For more information, see [https://rustup.rs](https://rustup.rs). + +### Connection string + +Each SDK has its own way of configuring the client, but the connection string can always be used. +The KurrentDB connection string supports two schemas: `kurrentdb://` for connecting to a single-node server, and `kurrentdb+discover://` for connecting to a multi-node cluster. The difference between the two schemas is that when using `kurrentdb://`, the client will connect directly to the node; with `kurrentdb+discover://` schema the client will use the gossip protocol to retrieve the cluster information and choose the right node to connect to. +Since version 22.10, ESDB supports gossip on single-node deployments, so `kurrentdb+discover://` schema can be used for connecting to any topology. + +The connection string has the following format: + +``` +kurrentdb+discover://admin:changeit@cluster.dns.name:2113 +``` + +There, `cluster.dns.name` is the name of a DNS `A` record that points to all the cluster nodes. Alternatively, you can list cluster nodes separated by comma instead of the cluster DNS name: + +``` +kurrentdb+discover://admin:changeit@node1.dns.name:2113,node2.dns.name:2113,node3.dns.name:2113 +``` + +There are a number of query parameters that can be used in the connection string to instruct the cluster how and where the connection should be established. All query parameters are optional. + +| Parameter | Accepted values | Default | Description | +|-----------------------|---------------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------| +| `tls` | `true`, `false` | `true` | Use secure connection, set to `false` when connecting to a non-secure server or cluster. | +| `connectionName` | Any string | None | Connection name | +| `maxDiscoverAttempts` | Number | `10` | Number of attempts to discover the cluster. | +| `discoveryInterval` | Number | `100` | Cluster discovery polling interval in milliseconds. | +| `gossipTimeout` | Number | `5` | Gossip timeout in seconds, when the gossip call times out, it will be retried. | +| `nodePreference` | `leader`, `follower`, `random`, `readOnlyReplica` | `leader` | Preferred node role. When creating a client for write operations, always use `leader`. | +| `tlsVerifyCert` | `true`, `false` | `true` | In secure mode, set to `true` when using an untrusted connection to the node if you don't have the CA file available. Don't use in production. | +| `tlsCaFile` | String, file path | None | Path to the CA file when connecting to a secure cluster with a certificate that's not signed by a trusted CA. | +| `defaultDeadline` | Number | None | Default timeout for client operations, in milliseconds. Most clients allow overriding the deadline per operation. | +| `keepAliveInterval` | Number | `10` | Interval between keep-alive ping calls, in seconds. | +| `keepAliveTimeout` | Number | `10` | Keep-alive ping call timeout, in seconds. | +| `userCertFile` | String, file path | None | User certificate file for X.509 authentication. | +| `userKeyFile` | String, file path | None | Key file for the user certificate used for X.509 authentication. | + +When connecting to an insecure instance, specify `tls=false` parameter. For example, for a node running locally use `kurrentdb://localhost:2113?tls=false`. Note that usernames and passwords aren't provided there because insecure deployments don't support authentication and authorisation. + +### Creating a client + +First, create a client and get it connected to the database. + +@[code{createClient}](@grpc:quickstart.rs) + +The client instance can be used as a singleton across the whole application. It doesn't need to open or close the connection. + +### Creating an event + +You can write anything to KurrentDB as events. The client needs a byte array as the event payload. Normally, you'd use a serialized object, and it's up to you to choose the serialization method. + +::: tip Server-side projections +User-defined server-side projections require events to be serialized in JSON format. + +We use JSON for serialization in the documentation examples. +::: + +The code snippet below creates an event object instance, serializes it, and adds it as a payload to the `EventData` structure, which the client can then write to the database. + +@[code{createEvent}](@grpc:quickstart.rs) + +### Appending events + +Each event in the database has its own unique identifier (UUID). The database uses it to ensure idempotent writes, but it only works if you specify the stream revision when appending events to the stream. + +In the snippet below, we append the event to the stream `some-stream`. + +@[code{appendEvents}](@grpc:quickstart.rs) + +Here we are appending events without checking if the stream exists or if the stream version matches the expected event version. See more advanced scenarios in [appending events documentation](./appending-events.md). + +### Reading events + +Finally, we can read events back from the `some-stream` stream. + +@[code{readStream}](@grpc:quickstart.rs) + +When you read events from the stream, you get a collection of `ResolvedEvent` structures. The event payload is returned as a byte array and needs to be deserialized. See more advanced scenarios in [reading events documentation](./reading-events.md). + diff --git a/docs/clients/grpc/rust/persistent-subscriptions.md b/docs/clients/grpc/rust/persistent-subscriptions.md new file mode 100644 index 00000000..c3a7c443 --- /dev/null +++ b/docs/clients/grpc/rust/persistent-subscriptions.md @@ -0,0 +1,164 @@ +--- +order: 5 +head: + - - title + - {} + - Persistent Subscriptions | Rust | Clients | Kurrent Docs +--- + +# Persistent Subscriptions + +Persistent subscriptions are similar to catch-up subscriptions, but there are two key differences: +- The subscription checkpoint is maintained by the server. It means that when your client reconnects to the persistent subscription, it will automatically resume from the last known position. +- It's possible to connect more than one event consumer to the same persistent subscription. In that case, the server will load-balance the consumers, depending on the defined strategy, and distribute the events to them. + +Because of those, persistent subscriptions are defined as subscription groups that are defined and maintained by the server. Consumer then connect to a particular subscription group, and the server starts sending event to the consumer. + +You can read more about persistent subscriptions in the [server documentation](@server/features/persistent-subscriptions.md). + +## Creating a subscription group + +The first step of dealing with a persistent subscription is to create a subscription group. You will receive an error if you attempt to create a subscription group multiple times. You must have admin permissions to create a persistent subscription group. + +### Subscribing to one stream + +The following sample shows how to create a subscription group for a persistent subscription where you want to receive events from a specific stream. It could be a normal stream, or a stream of links (like `$ce` category stream). + +@[code{create-persistent-subscription-to-stream}](@grpc:persistent_subscriptions.rs) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to create. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +### Subscribing to $all + +The ability to subscribe to `$all` was introduced in EventStoreDB **21.10**. Persistent subscriptions to `$all` also support [filtering](subscriptions.md#server-side-filtering). + +You can create a subscription group on $all much the same way you would create a subscription group on a stream: + +@[code{create-persistent-subscription-to-all}](@grpc:persistent_subscriptions.rs) + +## Connecting a consumer + +Once you have created a subscription group, clients can connect to it. A subscription in your application should only have the connection in your code, you should assume that the subscription already exists. + +The most important parameter to pass when connecting is the buffer size. This represents how many outstanding messages the server should allow this client. If this number is too small, your subscription will spend much of its time idle as it waits for an acknowledgment to come back from the client. If it's too big, you waste resources and can start causing time out messages depending on the speed of your processing. + +### Connecting to one stream + +The code below shows how to connect to an existing subscription group for a specific stream: + +@[code{subscribe-to-persistent-subscription-to-stream}](@grpc:persistent_subscriptions.rs) + +| Parameter | Description | +|:----------------------|:---------------------------------------------------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to subscribe to. | +| `eventAppeared` | The action to call when an event arrives over the subscription. | +| `subscriptionDropped` | The action to call if the subscription is dropped. | +| `credentials` | The user credentials to use for this operation. | +| `bufferSize` | The number of in-flight messages this client is allowed. **Default: 10** | +| `autoAck` | Whether to automatically acknowledge messages after eventAppeared returns. **Default: true** | + +::: warning +The `autoAck` parameter will be deprecated in the next client release. You'll need to explicitly [manage acknowledgements](#acknowledgements). +::: + +### Connecting to $all + +The code below shows how to connect to an existing subscription group for `$all`: + +@[code{subscribe-to-persistent-subscription-to-all}](@grpc:persistent_subscriptions.rs) + +The `SubscribeToAllAsync` method is identical to the `SubscribeToStreamAsync` method, except that you don't need to specify a stream name. + +## Acknowledgements + +Clients must acknowledge (or not acknowledge) messages in the competing consumer model. + +If processing is successful, you must send an Ack (acknowledge) to the server to let it know that the message has been handled. If processing fails for some reason, then you can Nack (not acknowledge) the message and tell the server how to handle the failure. + +@[code{subscribe-to-persistent-subscription-with-manual-acks}](@grpc:persistent_subscriptions.rs) + +The _Nack event action_ describes what the server should do with the message: + +| Action | Description | +|:----------|:---------------------------------------------------------------------| +| `Unknown` | The client does not know what action to take. Let the server decide. | +| `Park` | Park the message and do not resend. Put it on poison queue. | +| `Retry` | Explicitly retry the message. | +| `Skip` | Skip this message do not resend and do not put in poison queue. | + +## Consumer strategies + +When creating a persistent subscription, you can choose between a number of consumer strategies. + +### RoundRobin (default) + +Distributes events to all clients evenly. If the client `bufferSize` is reached, the client won't receive more events until it acknowledges or not acknowledges events in its buffer. + +This strategy provides equal load balancing between all consumers in the group. + +### DispatchToSingle + +Distributes events to a single client until the `bufferSize` is reached. After that, the next client is selected in a round-robin style, and the process repeats. + +This option can be seen as a fall-back scenario for high availability, when a single consumer processes all the events until it reaches its maximum capacity. When that happens, another consumer takes the load to free up the main consumer resources. + +### Pinned + +For use with an indexing projection such as the system `$by_category` projection. + +KurrentDB inspects the event for its source stream id, hashing the id to one of 1024 buckets assigned to individual clients. When a client disconnects, its buckets are assigned to other clients. When a client connects, it is assigned some existing buckets. This naively attempts to maintain a balanced workload. + +The main aim of this strategy is to decrease the likelihood of concurrency and ordering issues while maintaining load balancing. This is **not a guarantee**, and you should handle the usual ordering and concurrency issues. + +## Updating a subscription group + +You can edit the settings of an existing subscription group while it is running, you don't need to delete and recreate it to change settings. When you update the subscription group, it resets itself internally, dropping the connections and having them reconnect. You must have admin permissions to update a persistent subscription group. + +@[code{update-persistent-subscription}](@grpc:persistent_subscriptions.rs) + +| Parameter | Description | +|:--------------|:----------------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to update. | +| `settings` | The settings to use when creating the subscription. | +| `credentials` | The user credentials to use for this operation. | + +## Persistent subscription settings + +Both the `Create` and `Update` methods take some settings for configuring the persistent subscription. + +The following table shows the configuration options you can set on a persistent subscription. + +| Option | Description | Default | +|:------------------------|:----------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------| +| `ResolveLinkTos` | Whether the subscription should resolve link events to their linked events. | `false` | +| `StartFrom` | The exclusive position in the stream or transaction file the subscription should start from. | `null` (start from the end of the stream) | +| `ExtraStatistics` | Whether to track latency statistics on this subscription. | `false` | +| `MessageTimeout` | The amount of time after which to consider a message as timed out and retried. | `30` (seconds) | +| `MaxRetryCount` | The maximum number of retries (due to timeout) before a message is considered to be parked. | `10` | +| `LiveBufferSize` | The size of the buffer (in-memory) listening to live messages as they happen before paging occurs. | `500` | +| `ReadBatchSize` | The number of events read at a time when paging through history. | `20` | +| `HistoryBufferSize` | The number of events to cache when paging through history. | `500` | +| `CheckPointAfter` | The amount of time to try to checkpoint after. | `2` seconds | +| `MinCheckPointCount` | The minimum number of messages to process before a checkpoint may be written. | `10` | +| `MaxCheckPointCount` | The maximum number of messages not checkpointed before forcing a checkpoint. | `1000` | +| `MaxSubscriberCount` | The maximum number of subscribers allowed. | `0` (unbounded) | +| `NamedConsumerStrategy` | The strategy to use for distributing events to client consumers. See the [consumer strategies](#consumer-strategies) in this doc. | `RoundRobin` | + +## Deleting a subscription group + +Remove a subscription group with the delete operation. Like the creation of groups, you rarely do this in your runtime code and is undertaken by an administrator running a script. + +@[code{delete-persistent-subscription}](@grpc:persistent_subscriptions.rs) + +| Parameter | Description | +|:--------------|:-----------------------------------------------| +| `stream` | The stream the persistent subscription is on. | +| `groupName` | The name of the subscription group to delete. | +| `credentials` | The user credentials to use for this operation | diff --git a/docs/clients/grpc/rust/reading-events.md b/docs/clients/grpc/rust/reading-events.md new file mode 100644 index 00000000..43e06851 --- /dev/null +++ b/docs/clients/grpc/rust/reading-events.md @@ -0,0 +1,135 @@ +--- +order: 3 +head: + - - title + - {} + - Reading Events | Rust | Clients | Kurrent Docs +--- + +# Reading Events + +There are two options for reading events from KurrentDB. You can either: + 1. Read from an individual stream, or + 2. Read from the `$all` stream, which will return all events in the store. + +Each event in KurrentDB belongs to an individual stream. When reading events, pick the name of the stream from which you want to read the events and choose whether to read the stream forwards or backwards. + +All events have a `StreamPosition` and a `Position`. `StreamPosition` is a *big int* (unsigned 64-bit integer) and represents the place of the event in the stream. `Position` is the event's logical position, and is represented by `CommitPosition` and a `PreparePosition`. Note that when reading events you will supply a different "position" depending on whether you are reading from an individual stream or the `$all` stream. + +:::tip +Check [connecting to KurrentDB instructions](getting-started.md#required-packages) to learn how to configure and use the client SDK. +::: + +## Reading from a stream + +You can read all the events or a sample of the events from individual streams, starting from any position in the stream, and can read either forward or backward. It is only possible to read events from a single stream at a time. You can read events from the global event log, which spans across streams. Learn more about this process in the [Read from `$all`](#reading-from-the-all-stream) section below. + +### Reading forwards + +The simplest way to read a stream forwards is to supply a stream name, read direction, and revision from which to start. The revision can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-stream}](@grpc:reading_events.rs) + +This will return an enumerable that can be iterated on: + +@[code{iterate-stream}](@grpc:reading_events.rs) + +There are a number of additional arguments you can provide when reading a stream, listed below. + +#### maxCount + +Passing in the max count will limit the number of events returned. + +#### resolveLinkTos + +When using projections to create new events, you can set whether the generated events are pointers to existing events. Setting this value to `true` tells KurrentDB to return the event as well as the event linking to it. + +#### configureOperationOptions + +You can use the `configureOperationOptions` argument to provide a function that will customise settings for each operation. + +#### userCredentials + +The `userCredentials` argument is optional. It is used to override the default credentials specified when creating the client instance. + +@[code{overriding-user-credentials}](@grpc:reading_events.rs) + +### Reading from a revision + +Instead of providing the `StreamPosition` you can also provide a specific stream revision as a *big int* (unsigned 64-bit integer). + +@[code{read-from-stream-position}](@grpc:reading_events.rs) + +### Reading backwards + +In addition to reading a stream forwards, streams can be read backwards. To read all the events backwards, set the *stream position* to the end: + +@[code{reading-backwards}](@grpc:reading_events.rs) + +:::tip +Read one event backwards to find the last position in the stream. +::: + +### Checking if the stream exists + +Reading a stream returns a `ReadStreamResult`, which contains a property `ReadState`. This property can have the value `StreamNotFound` or `Ok`. + +It is important to check the value of this field before attempting to iterate an empty stream, as it will throw an exception. + +For example: + +@[code{checking-for-stream-presence}](@grpc:reading_events.rs) + +## Reading from the $all stream + +Reading from the `$all` stream is similar to reading from an individual stream, but please note there are differences. One significant difference is the need to provide admin user account credentials to read from the `$all` stream. Additionally, you need to provide a transaction log position instead of a stream revision when reading from the `$all` stream. + +### Reading forwards + +The simplest way to read the `$all` stream forwards is to supply a read direction and the transaction log position from which you want to start. The transaction log postion can either be a *stream position* `Start` or a *big int* (unsigned 64-bit integer): + +@[code{read-from-all-stream}](@grpc:reading_events.rs) + +You can iterate asynchronously through the result: + +@[code{read-from-all-stream-iterate}](@grpc:reading_events.rs) + +There are a number of additional arguments you can provide when reading the `$all` stream. + +#### maxCount + +Passing in the max count allows you to limit the number of events that returned. + +#### resolveLinkTos + +When using projections to create new events you can set whether the generated events are pointers to existing events. Setting this value to true will tell KurrentDB to return the event as well as the event linking to it. + +@[code{read-from-all-stream-resolving-link-Tos}](@grpc:reading_events.rs) + +#### configureOperationOptions + +This argument is generic setting class for all operations that can be set on all operations executed against KurrentDB. + +#### userCredentials +The credentials used to read the data can be used by the subscription as follows. This will override the default credentials set on the connection. + +@[code{read-all-overriding-user-credentials}](@grpc:reading_events.rs) + +### Reading backwards + +In addition to reading the `$all` stream forwards, it can be read backwards. To read all the events backwards, set the *position* to the end: + +@[code{read-from-all-stream-backwards}](@grpc:reading_events.rs) + +:::tip +Read one event backwards to find the last position in the `$all` stream. +::: + +### Handling system events + +KurrentDB will also return system events when reading from the `$all` stream. In most cases you can ignore these events. + +All system events begin with `$` or `$$` and can be easily ignored by checking the `EventType` property. + +@[code{ignore-system-events}](@grpc:reading_events.rs) + diff --git a/docs/clients/grpc/rust/release-notes.md b/docs/clients/grpc/rust/release-notes.md new file mode 100644 index 00000000..30af1d3a --- /dev/null +++ b/docs/clients/grpc/rust/release-notes.md @@ -0,0 +1,11 @@ +--- +order: 10 +head: + - - title + - {} + - Release Notes | Rust | Clients | Kurrent Docs +--- + +# Release Notes + +To stay up to date with the latest changes and improvements, including release history, changelogs, upgrade instructions, and migration guides, see the [KurrentDB Rust client release notes](https://github.com/kurrent-io/KurrentDB-Client-Rust/releases). diff --git a/docs/clients/grpc/rust/subscriptions.md b/docs/clients/grpc/rust/subscriptions.md new file mode 100644 index 00000000..b0bec855 --- /dev/null +++ b/docs/clients/grpc/rust/subscriptions.md @@ -0,0 +1,234 @@ +--- +order: 4 +head: + - - title + - {} + - Catch-up Subscriptions | Rust | Clients | Kurrent Docs +--- + +# Catch-up Subscriptions + +Subscriptions allow you to subscribe to a stream and receive notifications about new events added to the stream. + +You provide an event handler and an optional starting point to the subscription. The handler is called for each event from the starting point onward. + +If events already exist, the handler will be called for each event one by one until it reaches the end of the stream. The server will then notify the handler whenever a new event appears. + +:::tip +Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. +::: + +## Subscribing from the start + +If you need to process all the events in the store, including historical events, you'll need to subscribe from the beginning. You can either subscribe to receive events from a single stream or subscribe to `$all` if you need to process all events in the database. + +### Subscribing to a stream + +The simplest stream subscription looks like the following : + +@[code{subscribe-to-stream}](@grpc:subscribing_to_stream.rs) + +The provided handler will be called for every event in the stream. + +When you subscribe to a stream with link events, for example the `$ce` category stream, you need to set `resolveLinkTos` to `true`. Read more about it [below](#resolving-link-to-s). + +### Subscribing to `$all` + +Subscribing to `$all` is similar to subscribing to a single stream. The handler will be called for every event appended after the starting position. + +@[code{subscribe-to-all}](@grpc:subscribing_to_stream.rs) + +## Subscribing from a specific position + +The previous examples subscribed to the stream from the beginning. That subscription invoked the handler for every event in the stream before waiting for new events. + +Both stream and $all subscriptions accept a starting position if you want to read from a specific point onward. If events already exist at the position you subscribe to, they will be read on the server side and sent to the subscription. + +Once caught up, the server will push any new events received on the streams to the client. There is no difference between catching up and live on the client side. + +::: warning +The positions provided to the subscriptions are exclusive. You will only receive the next event after the subscribed position. +::: + +### Subscribing to a stream + +To subscribe to a stream from a specific position, you must provide a *stream position*. This can be `Start`, `End` or a *big int* (unsigned 64 bit integer) position. + +The following subscribes to the stream `some-stream` at position `20`, this means that events `21` and onward will be handled: + +@[code{subscribe-to-stream-from-position}](@grpc:subscribing_to_stream.rs) + +### Subscribing to $all + +Subscribing to the `$all` stream is similar to subscribing to a regular stream. The difference is how to specify the starting position. For the `$all` stream, provide a `Position` structure that consists of two big integers: the prepare and commit positions. Use `Start`, `End`, or create a `Position` from specific commit and prepare values. + +The corresponding `$all` subscription will subscribe from the event after the one at commit position `1056` and prepare position `1056`. + +Please note that this position will need to be a legitimate position in `$all`. + +@[code{subscribe-to-all-from-position}](@grpc:subscribing_to_stream.rs) + +## Subscribing to a stream for live updates + +You can subscribe to a stream to get live updates by subscribing to the end of the stream: + +@[code{subscribe-to-stream-live}](@grpc:subscribing_to_stream.rs) + +And the same works with `$all` : + +@[code{subscribe-to-all-live}](@grpc:subscribing_to_stream.rs) + +This will not read through the history of the stream but will notify the handler when a new event appears in the respective stream. + +Keep in mind that when you subscribe to a stream from a specific position, as described [above](#subscribing-from-a-specific-position), you will also get live updates after your subscription catches up (processes all the historical events). + +## Resolving link-to events + +Link-to events point to events in other streams in KurrentDB. These are generally created by projections such as the `$by_event_type` projection which links events of the same event type into the same stream. This makes it easier to look up all events of a specific type. + +::: tip +[Filtered subscriptions](subscriptions.md#server-side-filtering) make it easier and faster to subscribe to all events of a specific type or matching a prefix. +::: + +When reading a stream you can specify whether to resolve link-to's. By default, link-to events are not resolved. You can change this behaviour by setting the `resolveLinkTos` parameter to `true`: + +@[code{subscribe-to-stream-resolving-linktos}](@grpc:subscribing_to_stream.rs) + +## Dropped subscriptions + +When a subscription stops or experiences an error, it will be dropped. The subscription provides a `subscriptionDropped` callback, which will get called when the subscription breaks. + +The `subscriptionDropped` callback allows you to inspect the reason why the subscription dropped, as well as any exceptions that occurred. + +The possible reasons for a subscription to drop are: + +| Reason | Why it might happen | +|:------------------|:---------------------------------------------------------------------------------------------------------------------| +| `Disposed` | The client canceled or disposed of the subscription. | +| `SubscriberError` | An error occurred while handling an event in the subscription handler. | +| `ServerError` | An error occurred on the server, and the server closed the subscription. Check the server logs for more information. | + +Bear in mind that a subscription can also drop because it is slow. The server tried to push all the live events to the subscription when it is in the live processing mode. If the subscription gets the reading buffer overflow and won't be able to acknowledge the buffer, it will break. + +### Handling subscription drops + +An application, which hosts the subscription, can go offline for some time for different reasons. It could be a crash, infrastructure failure, or a new version deployment. As you rarely would want to reprocess all the events again, you'd need to store the current position of the subscription somewhere, and then use it to restore the subscription from the point where it dropped off: + +@[code{subscribe-to-stream-subscription-dropped}](@grpc:subscribing_to_stream.rs) + +When subscribed to `$all` you want to keep the event's position in the `$all` stream. As mentioned previously, the `$all` stream position consists of two big integers (prepare and commit positions), not one: + +@[code{subscribe-to-all-subscription-dropped}](@grpc:subscribing_to_stream.rs) + +## User credentials + +The user creating a subscription must have read access to the stream it's subscribing to, and only admin users may subscribe to `$all` or create filtered subscriptions. + +The code below shows how you can provide user credentials for a subscription. When you specify subscription credentials explicitly, it will override the default credentials set for the client. If you don't specify any credentials, the client will use the credentials specified for the client, if you specified those. + +@[code{overriding-user-credentials}](@grpc:subscribing_to_stream.rs) + +## Server-side filtering + +KurrentDB allows you to filter the events whilst subscribing to the `$all` stream to only receive the events you care about. + +You can filter by event type or stream name using a regular expression or a prefix. Server-side filtering is currently only available on the `$all` stream. + +::: tip +Server-side filtering was introduced as a simpler alternative to projections. You should consider filtering before creating a projection to include the events you care about. +::: + +A simple stream prefix filter looks like this: + +@[code{stream-prefix-filtered-subscription}](@grpc:subscribing_to_stream.rs) + +The filtering API is described more in-depth in the [filtering section](subscriptions.md#server-side-filtering). + +### Filtering out system events + +There are events in KurrentDB called system events. These are prefixed with a `$` and under most circumstances you won't care about these. They can be filtered out by passing in a `SubscriptionFilterOptions` when subscribing to the `$all` stream. + +@[code{exclude-system}](@grpc:server_side_filtering.rs) + +::: tip +`$stats` events are no longer stored in KurrentDB by default so there won't be as many `$` events as before. +::: + +### Filtering by event type + +If you only want to subscribe to events of a given type, there are two options. You can either use a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `EventTypeFilter.Prefix`. + +@[code{event-type-prefix}](@grpc:server_side_filtering.rs) + +This will only subscribe to events with a type that begin with `customer-`. + +#### Filtering by regular expression + +It might be advantageous to provide a regular expression when you want to subscribe to multiple event types. + +@[code{event-type-regex}](@grpc:server_side_filtering.rs) + +This will subscribe to any event that begins with `user` or `company`. + +### Filtering by stream name + +To subscribe to a stream by name, choose either a regular expression or a prefix. + +#### Filtering by prefix + +If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `StreamFilter.Prefix`. + +@[code{stream-prefix}](@grpc:server_side_filtering.rs) + +This will only subscribe to all streams with a name that begins with `user-`. + +#### Filtering by regular expression + +To subscribe to multiple streams, use a regular expression. + +@[code{stream-regex}](@grpc:server_side_filtering.rs) + +This will subscribe to any stream with a name that begins with `account` or `savings`. + +## Checkpointing + +When a catch-up subscription is used to process an `$all` stream containing many events, the last thing you want is for your application to crash midway, forcing you to restart from the beginning. + +### What is a checkpoint? + +A checkpoint is the position of an event in the `$all` stream to which your application has processed. By saving this position to a persistent store (e.g., a database), it allows your catch-up subscription to: +- Recover from crashes by reading the checkpoint and resuming from that position +- Avoid reprocessing all events from the start + +To create a checkpoint, store the event's commit or prepare position. + +::: warning +If your database contains events created by the legacy TCP client using the [transaction feature](https://docs.kurrent.io/clients/tcp/dotnet/21.2/appending.html#transactions), you should store both the commit and prepare positions together as your checkpoint. +::: + +### Updating checkpoints at regular intervals +The client SDK provides a way to notify your application after processing a configurable number of events. This allows you to periodically save a checkpoint at regular intervals. + +@[code{checkpoint}](@grpc:server_side_filtering.rs) + +By default, the checkpoint notification is sent after every 32 non-system events processed from $all. + +### Configuring the checkpoint interval +You can adjust the checkpoint interval to change how often the client is notified. + +@[code{checkpoint-with-interval}](@grpc:server_side_filtering.rs) + +By configuring this parameter, you can balance between reducing checkpoint overhead and ensuring quick recovery in case of a failure. + +::: info +The checkpoint interval parameter configures the database to notify the client after `n` * 32 number of events where `n` is defined by the parameter. + +For example: +- If `n` = 1, a checkpoint notification is sent every 32 events. +- If `n` = 2, the notification is sent every 64 events. +- If `n` = 3, it is sent every 96 events, and so on. +::: diff --git a/docs/clients/grpc/subscriptions.md b/docs/clients/grpc/subscriptions.md index 112e6fdc..070ff884 100644 --- a/docs/clients/grpc/subscriptions.md +++ b/docs/clients/grpc/subscriptions.md @@ -1,230 +1,16 @@ ---- -order: 4 ---- -# Catch-up subscriptions +# Catch-up Subscriptions -Subscriptions allow you to subscribe to a stream and receive notifications about new events added to the stream. +To continue, please select your client: -You provide an event handler and an optional starting point to the subscription. The handler is called for each event from the starting point onward. +[.NET](/clients/grpc/dotnet/subscriptions.md) -If events already exist, the handler will be called for each event one by one until it reaches the end of the stream. The server will then notify the handler whenever a new event appears. +[Python](/clients/grpc/python/subscriptions.md) -:::tip -Check the [Getting Started](getting-started.md) guide to learn how to configure and use the client SDK. -::: +[Node.js](/clients/grpc/nodejs/subscriptions.md) -## Subscribing from the start +[Java](/clients/grpc/java/subscriptions.md) -If you need to process all the events in the store, including historical events, you'll need to subscribe from the beginning. You can either subscribe to receive events from a single stream or subscribe to `$all` if you need to process all events in the database. +[Go](/clients/grpc/go/subscriptions.md) -### Subscribing to a stream - -The simplest stream subscription looks like the following : - -@[code{subscribe-to-stream}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -The provided handler will be called for every event in the stream. - -When you subscribe to a stream with link events, for example the `$ce` category stream, you need to set `resolveLinkTos` to `true`. Read more about it [below](#resolving-link-to-s). - -### Subscribing to `$all` - -Subscribing to `$all` is similar to subscribing to a single stream. The handler will be called for every event appended after the starting position. - -@[code{subscribe-to-all}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -## Subscribing from a specific position - -The previous examples subscribed to the stream from the beginning. That subscription invoked the handler for every event in the stream before waiting for new events. - -Both stream and $all subscriptions accept a starting position if you want to read from a specific point onward. If events already exist at the position you subscribe to, they will be read on the server side and sent to the subscription. - -Once caught up, the server will push any new events received on the streams to the client. There is no difference between catching up and live on the client side. - -::: warning -The positions provided to the subscriptions are exclusive. You will only receive the next event after the subscribed position. -::: - -### Subscribing to a stream - -To subscribe to a stream from a specific position, you must provide a *stream position*. This can be `Start`, `End` or a *big int* (unsigned 64 bit integer) position. - -The following subscribes to the stream `some-stream` at position `20`, this means that events `21` and onward will be handled: - -@[code{subscribe-to-stream-from-position}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -### Subscribing to $all - -Subscribing to the `$all` stream is similar to subscribing to a regular stream. The difference is how to specify the starting position. For the `$all` stream, provide a `Position` structure that consists of two big integers: the prepare and commit positions. Use `Start`, `End`, or create a `Position` from specific commit and prepare values. - -The corresponding `$all` subscription will subscribe from the event after the one at commit position `1056` and prepare position `1056`. - -Please note that this position will need to be a legitimate position in `$all`. - -@[code{subscribe-to-all-from-position}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -## Subscribing to a stream for live updates - -You can subscribe to a stream to get live updates by subscribing to the end of the stream: - -@[code{subscribe-to-stream-live}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -And the same works with `$all` : - -@[code{subscribe-to-all-live}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -This will not read through the history of the stream but will notify the handler when a new event appears in the respective stream. - -Keep in mind that when you subscribe to a stream from a specific position, as described [above](#subscribing-from-a-specific-position), you will also get live updates after your subscription catches up (processes all the historical events). - -## Resolving link-to's - -Link-to events point to events in other streams in KurrentDB. These are generally created by projections such as the `$by_event_type` projection which links events of the same event type into the same stream. This makes it easier to look up all events of a specific type. - -::: tip -[Filtered subscriptions](subscriptions.md#server-side-filtering) make it easier and faster to subscribe to all events of a specific type or matching a prefix. -::: - -When reading a stream you can specify whether to resolve link-to's. By default, link-to events are not resolved. You can change this behaviour by setting the `resolveLinkTos` parameter to `true`: - -@[code{subscribe-to-stream-resolving-linktos}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -## Dropped subscriptions - -When a subscription stops or experiences an error, it will be dropped. The subscription provides a `subscriptionDropped` callback, which will get called when the subscription breaks. - -The `subscriptionDropped` callback allows you to inspect the reason why the subscription dropped, as well as any exceptions that occurred. - -The possible reasons for a subscription to drop are: - -| Reason | Why it might happen | -|:------------------|:---------------------------------------------------------------------------------------------------------------------| -| `Disposed` | The client canceled or disposed of the subscription. | -| `SubscriberError` | An error occurred while handling an event in the subscription handler. | -| `ServerError` | An error occurred on the server, and the server closed the subscription. Check the server logs for more information. | - -Bear in mind that a subscription can also drop because it is slow. The server tried to push all the live events to the subscription when it is in the live processing mode. If the subscription gets the reading buffer overflow and won't be able to acknowledge the buffer, it will break. - -### Handling subscription drops - -An application, which hosts the subscription, can go offline for some time for different reasons. It could be a crash, infrastructure failure, or a new version deployment. As you rarely would want to reprocess all the events again, you'd need to store the current position of the subscription somewhere, and then use it to restore the subscription from the point where it dropped off: - -@[code{subscribe-to-stream-subscription-dropped}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -When subscribed to `$all` you want to keep the event's position in the `$all` stream. As mentioned previously, the `$all` stream position consists of two big integers (prepare and commit positions), not one: - -@[code{subscribe-to-all-subscription-dropped}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -## User credentials - -The user creating a subscription must have read access to the stream it's subscribing to, and only admin users may subscribe to `$all` or create filtered subscriptions. - -The code below shows how you can provide user credentials for a subscription. When you specify subscription credentials explicitly, it will override the default credentials set for the client. If you don't specify any credentials, the client will use the credentials specified for the client, if you specified those. - -@[code{overriding-user-credentials}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -## Server-side filtering - -KurrentDB allows you to filter the events whilst subscribing to the `$all` stream to only receive the events you care about. - -You can filter by event type or stream name using a regular expression or a prefix. Server-side filtering is currently only available on the `$all` stream. - -::: tip -Server-side filtering was introduced as a simpler alternative to projections. You should consider filtering before creating a projection to include the events you care about. -::: - -A simple stream prefix filter looks like this: - -@[code{stream-prefix-filtered-subscription}](@grpc:subscribing_to_stream.py;subscribing-to-streams.js;subscribing-to-streams.ts;subscribing_to_stream/SubscribingToStream.java;subscribing-to-streams/Program.cs;subscribingToStream.go;subscribing_to_stream.rs) - -The filtering API is described more in-depth in the [filtering section](subscriptions.md#server-side-filtering). - -### Filtering out system events - -There are events in KurrentDB called system events. These are prefixed with a `$` and under most circumstances you won't care about these. They can be filtered out by passing in a `SubscriptionFilterOptions` when subscribing to the `$all` stream. - -@[code{exclude-system}](@grpc:server_side_filtering.py;server-side-filtering.js;server-side-filtering.ts;server_side_filtering/ServerSideFiltering.java;server-side-filtering/Program.cs;serverSideFiltering.go;server_side_filtering.rs) - -::: tip -`$stats` events are no longer stored in KurrentDB by default so there won't be as many `$` events as before. -::: - -### Filtering by event type - -If you only want to subscribe to events of a given type, there are two options. You can either use a regular expression or a prefix. - -#### Filtering by prefix - -If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `EventTypeFilter.Prefix`. - -@[code{event-type-prefix}](@grpc:server_side_filtering.py;server-side-filtering.js;server-side-filtering.ts;server_side_filtering/ServerSideFiltering.java;server-side-filtering/Program.cs;serverSideFiltering.go;server_side_filtering.rs) - -This will only subscribe to events with a type that begin with `customer-`. - -#### Filtering by regular expression - -It might be advantageous to provide a regular expression when you want to subscribe to multiple event types. - -@[code{event-type-regex}](@grpc:server_side_filtering.py;server-side-filtering.js;server-side-filtering.ts;server_side_filtering/ServerSideFiltering.java;server-side-filtering/Program.cs;serverSideFiltering.go;server_side_filtering.rs) - -This will subscribe to any event that begins with `user` or `company`. - -### Filtering by stream name - -To subscribe to a stream by name, choose either a regular expression or a prefix. - -#### Filtering by prefix - -If you want to filter by prefix, pass in a `SubscriptionFilterOptions` to the subscription with an `StreamFilter.Prefix`. - -@[code{stream-prefix}](@grpc:server_side_filtering.py;server-side-filtering.js;server-side-filtering.ts;server_side_filtering/ServerSideFiltering.java;server-side-filtering/Program.cs;serverSideFiltering.go;server_side_filtering.rs) - -This will only subscribe to all streams with a name that begins with `user-`. - -#### Filtering by regular expression - -To subscribe to multiple streams, use a regular expression. - -@[code{stream-regex}](@grpc:server_side_filtering.py;server-side-filtering.js;server-side-filtering.ts;server_side_filtering/ServerSideFiltering.java;server-side-filtering/Program.cs;serverSideFiltering.go;server_side_filtering.rs) - -This will subscribe to any stream with a name that begins with `account` or `savings`. - -## Checkpointing - -When a catch-up subscription is used to process an `$all` stream containing many events, the last thing you want is for your application to crash midway, forcing you to restart from the beginning. - -### What is a checkpoint? - -A checkpoint is the position of an event in the `$all` stream to which your application has processed. By saving this position to a persistent store (e.g., a database), it allows your catch-up subscription to: -- Recover from crashes by reading the checkpoint and resuming from that position -- Avoid reprocessing all events from the start - -To create a checkpoint, store the event's commit or prepare position. - -::: warning -If your database contains events created by the legacy TCP client using the [transaction feature](https://docs.kurrent.io/clients/tcp/dotnet/21.2/appending.html#transactions), you should store both the commit and prepare positions together as your checkpoint. -::: - -### Updating checkpoints at regular intervals -The client SDK provides a way to notify your application after processing a configurable number of events. This allows you to periodically save a checkpoint at regular intervals. - -@[code{checkpoint}](@grpc:server_side_filtering.py;server-side-filtering.js;server-side-filtering.ts;server_side_filtering/ServerSideFiltering.java;server-side-filtering/Program.cs;serverSideFiltering.go;server_side_filtering.rs) - -By default, the checkpoint notification is sent after every 32 non-system events processed from $all. - -### Configuring the checkpoint interval -You can adjust the checkpoint interval to change how often the client is notified. - -@[code{checkpoint-with-interval}](@grpc:server_side_filtering.py;server-side-filtering.js;server-side-filtering.ts;server_side_filtering/ServerSideFiltering.java;server-side-filtering/Program.cs;serverSideFiltering.go;server_side_filtering.rs) - -By configuring this parameter, you can balance between reducing checkpoint overhead and ensuring quick recovery in case of a failure. - -::: info -The checkpoint interval parameter configures the database to notify the client after `n` * 32 number of events where `n` is defined by the parameter. - -For example: -- If `n` = 1, a checkpoint notification is sent every 32 events. -- If `n` = 2, the notification is sent every 64 events. -- If `n` = 3, it is sent every 96 events, and so on. -::: +[Rust](/clients/grpc/rust/subscriptions.md) \ No newline at end of file