diff --git a/dev-site.yml b/dev-site.yml index 69da4a9..22adedc 100644 --- a/dev-site.yml +++ b/dev-site.yml @@ -4,7 +4,7 @@ runtime: site: title: Solution Patterns for Cloud Native Architectures (Dev Mode) url: http://localhost:3000/ - start_page: solution-pattern-template::index.adoc + start_page: solution-pattern-event-mesh-for-microservices::index.adoc content: sources: @@ -12,14 +12,12 @@ content: branches: HEAD start_path: documentation asciidoc: - attributes: - title: Red Hat Solution Patterns (Dev Mode) extensions: - ./lib/remote-include-processor.js - ./lib/tab-block.js ui: bundle: - url: https://github.com/redhat-solution-patterns/course-ui/releases/download/v0.1.16/ui-bundle.zip + url: https://github.com/redhat-solution-patterns/course-ui/releases/download/v0.1.17/ui-bundle.zip snapshot: true supplemental_files: ./supplemental-ui output: diff --git a/documentation/antora.yml b/documentation/antora.yml index f922ae3..8566f37 100644 --- a/documentation/antora.yml +++ b/documentation/antora.yml @@ -1,5 +1,5 @@ -name: solution-pattern-template -title: Template Tutorial +name: solution-pattern-event-mesh-for-microservices +title: Event Mesh For Applications version: master nav: - modules/ROOT/nav.adoc diff --git a/documentation/modules/ROOT/assets/images/red_hat_open-hybrid-cloud.png b/documentation/modules/ROOT/assets/images/red_hat_open-hybrid-cloud.png deleted file mode 100644 index 709f8bb..0000000 Binary files a/documentation/modules/ROOT/assets/images/red_hat_open-hybrid-cloud.png and /dev/null differ diff --git a/documentation/modules/ROOT/assets/images/solution-odc.png b/documentation/modules/ROOT/assets/images/solution-odc.png new file mode 100644 index 0000000..2dc3afe Binary files /dev/null and b/documentation/modules/ROOT/assets/images/solution-odc.png differ diff --git a/documentation/modules/ROOT/nav.adoc b/documentation/modules/ROOT/nav.adoc index 436ff5a..577f841 100644 --- a/documentation/modules/ROOT/nav.adoc +++ b/documentation/modules/ROOT/nav.adoc @@ -1,6 +1,6 @@ * xref:index.adoc[{counter:module}. Home page] ** xref:index.adoc#use-cases[{module}.{counter:submodule1} Use cases] -** xref:index.adoc#_the_story_behind_this_solution_pattern[{module}.{counter:submodule1} The story behind this solution pattern] +** xref:index.adoc#_the_story[{module}.{counter:submodule1} The story behind this solution pattern] ** xref:index#_the_solution[{module}.{counter:submodule1} The solution] * xref:02-architecture.adoc[{counter:module}. Architecture] @@ -9,17 +9,11 @@ ** xref:02-architecture.adoc#more_tech[{module}.{counter:submodule2}. More about the technology stack] * xref:03-demo.adoc[{counter:module}. See the Solution in Action] -** xref:03-demo.adoc#_demonstration[{module}.{counter:submodule3}. Demonstration] -** xref:03-demo.adoc#_run_the_demonstration[{module}.{counter:submodule3}. Run this demonstration] -*** xref:03-demo.adoc#_before_getting_started[{module}.{counter:submodule3}. Pre-requisites] -*** xref:03-demo.adoc#_installing_the_demo[{module}.{counter:submodule3}. Installing the demo] -*** xref:03-demo.adoc#_walkthrough_guide[{module}.{counter:submodule3}. Walkthrough guide] - -* xref:04-workshop.adoc[{counter:module}. Workshop] -** xref:04-workshop.adoc#_installing_the_workshop_environment[{module}.{counter:submodule4}. Installing the workshop environment] -*** xref:04-workshop.adoc#_before_getting_started[{module}.{counter:submodule4}. Pre-requisites] -*** xref:04-workshop.adoc#_installing_the_environment[{module}.{counter:submodule4}. Installing the environment] -** xref:04-workshop.adoc#deliver_wksp[{module}.{counter:submodule4}. Delivering the workshop] +** xref:03-demo.adoc#_initial_application[{module}.{counter:submodule3}. Initial application] +** xref:03-demo.adoc#_refactoring_plan[{module}.{counter:submodule3}. Refactoring plan] +** xref:03-demo.adoc#_run_this_demonstration[{module}.{counter:submodule3}. Run this demonstration] +** xref:03-demo.adoc#_in_depth_refactoring[{module}.{counter:submodule3}. In-depth look at the refactoring] +** xref:03-demo.adoc#_conclusion[{module}.{counter:submodule3}. Conclusion] * xref:developer-resources.adoc[{counter:module}. Developer Resources] diff --git a/documentation/modules/ROOT/pages/01-pattern.adoc b/documentation/modules/ROOT/pages/01-pattern.adoc index 33fca49..be42ca4 100644 --- a/documentation/modules/ROOT/pages/01-pattern.adoc +++ b/documentation/modules/ROOT/pages/01-pattern.adoc @@ -1,7 +1,30 @@ +[#_the_story] == The story behind this solution pattern -A description of the story that was used to build the demo and architectures of this SP. +Cabs is a fictional transportation company. +Engineers at Cabs were struggling with their transactional-style, monolith application. +They recently started an effort to modernize it. +The team saw a https://www.youtube.com/watch?v=Rc5IO6S6ZOk[talk on _Event Mesh_]. +The talk presented the advantages of moving beyond transactional architectures in favor of eventual consistency. +By leveraging event meshes with technologies like Knative, developers can achieve decoupled, reliable microservices without extensive re-engineering. +This solution addresses inefficiencies and aligns distributed systems with real-world business processes. +The team had figured out they could leverage the https://martinfowler.com/bliki/CQRS.html[_CQRS_] pattern together with _Knative's Event Mesh_ +to modernize their application in a non-extrusive way. + +[#_the_solution] == The Solution -This is a summary of the solution +The core of this solution is an event mesh -- a dynamic, flexible infrastructure layer that routes, retries, and processes asynchronous events across distributed components. +Using _Knative Eventing_ and _CloudEvents_, this pattern enables: + +- *Reliable delivery* of events with retry mechanisms. +- Simplified *asynchronous workflows*. +- *Decoupling* of services to improve scalability, testability and resilience. + +The approach of Cabs engineering team is to extract well-defined services, using the _CQRS_ pattern to identify _Commands_ from _Queries_, and model the +_Commands_ as _Events_. +Those events will be routed to the _Event Mesh_ with regular _HTTP_ messages. +The _CloudEvents_ library ensures the proper serialization of the events to and from the _HTTP_ messages. +The _Event Mesh_ +will handle not only the persistence of the events in-flight, but also all the required logic of retry in case of endpoint failure. diff --git a/documentation/modules/ROOT/pages/02-architecture.adoc b/documentation/modules/ROOT/pages/02-architecture.adoc index fba8860..f6eb7f4 100644 --- a/documentation/modules/ROOT/pages/02-architecture.adoc +++ b/documentation/modules/ROOT/pages/02-architecture.adoc @@ -1,104 +1,259 @@ -= Solution Pattern: Name Template += Building Apps around the Event Mesh :sectnums: :sectlinks: :doctype: book = Architecture -Introduction for the architecture of this solution pattern. +The architecture of an event mesh-enabled system is a paradigm shift from traditional transactional designs to an eventual consistency model. +This design aligns better with real-world processes, where different parts of a system may operate asynchronously yet collaboratively. -== Common Challenges +In this section, we explore the technologies, flow, and structure that make the event mesh architecture resilient, scalable, and developer-friendly. [#tech_stack] == Technology Stack -// Change links and text here as you see fit. +Here's the list of technologies used in this solution and its examples: + * Red Hat supported products ** https://www.redhat.com/en/technologies/cloud-computing/openshift[Red Hat OpenShift] -** Red Hat Application Foundation -*** https://access.redhat.com/products/quarkus[Quarkus] -*** https://www.redhat.com/en/technologies/cloud-computing/openshift/openshift-streams-for-apache-kafka[Kafka Streams] -* Other open source products: -** https://www.postgresql.org/[PostgreSQL database] -** https://helm.sh/[Helm] +— Orchestrate containerized applications. +Based on https://kubernetes.io/[Kubernetes]. +** https://www.redhat.com/en/technologies/cloud-computing/openshift/serverless[Red Hat OpenShift Serverless] +— Provides the _Event Mesh_ and _Serverless_ capabilities. +Based on https://knative.dev[Knative] project. +** https://developers.redhat.com/products/streams-for-apache-kafka[Streams for Apache Kafka] +— (Optional) Provides a persistence for _Event Mesh_, likely needed in production. +Based on https://strimzi.io/[Strimzi] project. +* Other open source technologies: +** https://cloudevents.io/[CloudEvents] — Provides a standard for event metadata +** https://opentelemetry.io/[OpenTelemetry] — (Optional) Facilitates tracing for observability. +** Rust and Java — Implementation examples. +_Kubernetes_, _Knative_, _Strimzi_, _CloudEvents_, and _OpenTelemetry_ are https://landscape.cncf.io/[CNCF projects]. [#in_depth] == An in-depth look at the solution's architecture -Technical description including all or some of the following: architecture ir ed diagrams. In-depth details of the decisions made and solutions used. Description of each service and what it is used for. Description of any integration. +Building applications around the Event Mesh is a solution that can be applied both to existing and new projects. +For existing applications, domain boundaries can guide the division into modular components, which may evolve into separate services. +These modules will generate commands intended for the _Event Mesh_, which will then route these events to their designated endpoints. +The _Event Mesh_ will allow for such transition, gradually making the system not only more responsive but also better suited to the real-world business logic. + +=== Problem Analysis + +Traditional systems often enforce strict transactional consistency, which can impede application performance and compromise business logic. +For instance, upon the completion of a ride-sharing service at the end-user's destination, the system should reliably capture this real-world event. +The capture should be performed regardless of any potential operational disruptions affecting dependent services (e.g., invoicing). + +In such scenarios, transactional applications, which typically encompass a number of steps in a user workflow, make all the steps undone when an error is returned. +This prevents any data from being changed, and causes the loss of real-world intent of end-users, and results in an adverse user experience and a deviation from the genuine business process, potentially leading to customer dissatisfaction. + +=== Solution Breakdown + +The concept of employing the _Event Mesh_ as a central, reliable hub for dispatching commands, as events, lies at the heart of this solution. +This approach aligns closely with the Command Query Responsibility Segregation (_CQRS_) pattern, which distinctly categorizes commands and queries. +Commands, in this context, are modeled as asynchronous events, designed to undergo processing by the _Event Mesh_. +On the other hand, queries are synchronous operations, safe to retry, ensuring no loss of data integrity due to transient errors. + +The primary responsibility of the _Event Mesh_ is twofold. +Firstly, it persists the incoming events, thereby maintaining a record of changes in the system's state. +Secondly, it routes these events to their respective endpoints, ensuring that the appropriate microservices are notified and can subsequently update their internal states based on the event data. + +The mesh's inherent resilience is further bolstered by its built-in retry strategies (linear or exponential backoff), which it employs when encountering operational failures. +This mechanism ensures that the system retries the operation until it succeeds, thus mitigating the risk of data loss or system disruption due to transient issues. + +By integrating the _Event Mesh_ into the system architecture, several architectural benefits are achieved: + +* **Decomposition of the application into independently functioning services**: +This approach facilitates a division of labor, with each service handling specific responsibilities -- the https://en.wikipedia.org/wiki/Domain-driven_design[Domain-driven design] approach fits here quite well. +This not only enhances maintainability but also fosters scalability, as services can be independently scaled based on their demands. + +* **Improved business alignment**: +By embracing an eventual consistency model, the _Event Mesh_ aligns closely with the inherent nature of most real-world business processes. +Unlike traditional transactional systems that strive for immediate, irreversible consistency, the _Event Mesh_ allows for a more flexible and adaptive approach to data consistency. +This results in better alignment with business requirements, as it supports scenarios where multiple services collaborate and synchronize their operations, making the whole state eventually consistent, without the constraint of strict, synchronous consistency. -=== Using images +* **Improved Resilience**: +The _Event Mesh_'s error-handling mechanism, through retries and event persistence, aids in minimizing the impact of failures on the end user. +This is crucial as it prevents bugs and outages from becoming visible to the user, thereby preserving the system's perceived responsiveness. -image::red_hat_open-hybrid-cloud.png[width=30%] +* **Enhanced system performance**: +The system becomes more responsive, as the end user no longer needs to wait for multiple, often independent, operations to complete successfully. +The _Event Mesh_'s event-driven model, coupled with the retries and event persistence, ensures that critical state changes are propagated swiftly and reliably, thereby improving the overall user experience. -{empty} +=== _Event Mesh_ Flow -=== Embedding HTML +The event-driven flow enables eventual consistent collaboration and state synchronization between services, fostering a resilient, scalable, and developer-friendly system architecture. -++++ -
-

Embed HTML by surrounding it with with four +s before and after.

- View the ascii doc to learn more -
-++++ +A usual flow may look like: +1. An end-user application sends an _HTTP_ request to the _Event Mesh_. +Such message can be understood as a _Command_ type event. +2. The _Event Mesh_ (Broker) persists the event in a queue (like an Apache Kafka topic, but the implementation is hidden from the user). +After _Event Mesh_ persists safely the data, it returns a successful _HTTP_ response with the `202 Accepted` return code. +At this point, the operation could already be considered successful, from the end-user point of view. +It will eventually settle correctly in all downstream systems. +3. The _Event Mesh_ routes the event to the appropriate endpoint based on the CloudEvent's metadata and configured triggering rules. +4. The endpoints receive the events and process them, updating their internal states and potentially emitting new events for downstream consumers. +The potential events are transmitted to the _Event Mesh_. +5. The dispatch loop continues until the event queue is empty and all the events are processed successfully. +The failures are automatically retried by the _Event Mesh_, with an increasing pauses between retries to avoid overloading the system. -=== Different decorators +// TODO: Replace with custom graphics +image::https://img.plantuml.biz/plantuml/svg/TPFDReGW483lFCNKkpw0XytMRDCqxJQRDdq05iULB885j35Dtxs8k2xxSuEPRpvm1jV6KcsxHf07MsE3m51t0gbCLMS5bqZCaSkMQjh0kBL3Yz0gCVWSOK9r9IHFFKeBMpHr0hy4WAccLNAC9Q-IMjuZ55eTKIT0JLWwxBl33Xr2goFr6RyYVuHKIfIe8TbofXKOr3rdQAxa6-tKsi3d17X7Y8MGuqjgwPuQNF1DSKvkYbZw8dl56PU7I3j5yPPsAGZYm5wAtvNb5MUk7qf6xlF4V81hmbdf6nue6y0Cnc9prOQGVMnRhvksqHK3CNzuCUf3B2tLZqnNOIevxBgzuAO676TgPYhJ_53RRELg8OUlrgdH-ybKjm1-XexPkTPoOsTFF1R815OZVUVK84tTlUB273xSmyGRN3oW-zoDPb_0brVDLijJoU4PhG4kAmLqxwgWd58aFjzNdTx1gMCX0XgPqgKXQvIb-_d-0G00[width=100%] + +//// +Editor: https://editor.plantuml.com/uml/TPFDReGW483lFCNKkpw0XytMRDCqxJQRDdq05iULB885j35Dtxs8k2xxSuEPRpvm1jV6KcsxHf07MsE3m51t0gbCLMS5bqZCaSkMQjh0kBL3Yz0gCVWSOK9r9IHFFKeBMpHr0hy4WAccLNAC9Q-IMjuZ55eTKIT0JLWwxBl33Xr2goFr6RyYVuHKIfIe8TbofXKOr3rdQAxa6-tKsi3d17X7Y8MGuqjgwPuQNF1DSKvkYbZw8dl56PU7I3j5yPPsAGZYm5wAtvNb5MUk7qf6xlF4V81hmbdf6nue6y0Cnc9prOQGVMnRhvksqHK3CNzuCUf3B2tLZqnNOIevxBgzuAO676TgPYhJ_53RRELg8OUlrgdH-ybKjm1-XexPkTPoOsTFF1R815OZVUVK84tTlUB273xSmyGRN3oW-zoDPb_0brVDLijJoU4PhG4kAmLqxwgWd58aFjzNdTx1gMCX0XgPqgKXQvIb-_d-0G00 + +@startuml +!theme cerulean-outline +skinparam linetype polyline + +cloud "Event Mesh" { + component "Knative Broker" as Broker + queue "Kafka" as Kafka +} + +folder "Micro services" { + component "Drivers Service" as DriversService + database "Drivers DB" as DriversDB + component "Invoicing Service" as InvoiceService + database "Invoicing DB" as InvoiceDB + component "Notification Service" as NotificationService +} + +component "Legacy system" { + component "Legacy App" as Legacy + database "Legacy DB" as DB +} + +Legacy -down-> Broker: Publish events +Legacy .right.> DB : Update data +Broker .right.> Kafka : Persist events +DriversService .left.> Broker: Publish events +Broker --> DriversService: Route events +Broker --> InvoiceService: Route events +Broker --> NotificationService: Route events +DriversService ..> DriversDB: Gets info about drivers +InvoiceService ..> InvoiceDB: Update Invoice records +@enduml +//// + +The diagram illustrates the example flow of events between the applications, the Knative _Event Mesh_, and the datastores which persist settled state of the system. [TIP] ==== -This is a Tip +Notice the applications aren't pulling the events from the queue. +In fact they aren't aware of any. +The _Event Mesh_ is the one controlling the flow, and retrying when needed. + +There are *no additional* libraries needed to consume events from _Event Mesh_. +The _Event Mesh_ pushes the events as _CloudEvents_ encoded as _REST_ messages. ==== [NOTE] ==== -This is a NOTE -==== +The exponential backoff algorithm used by _Event Mesh_ is configurable. +It uses the following formula to calculate the backoff period: `+backoffDelay * 2^+`, where the backoff delay is a base number of seconds, and number of retries is automatically tracked by the _Event Mesh_. -[WARNING] -==== -This is a WARNING +A dead letter sink can also be configured to send events in case they exceed the maximum retry number, which is also configurable. ==== -[IMPORTANT] -==== -This is IMPORTANT -==== +=== _Work Ledger_ analogy + +A good way of thinking about the _Event Mesh_ and its persistent queue backend is the _Work Ledger_ analogy. +Like in the olden days, the clerk kept his to-do work in the _Work Ledger_ (e.g. a tray for paper forms). +Then he was picking the next form, and processing it, making changes within the physical file cabinets. +In case of rare and unexpected issues (e.g. invoicing dept being unavailable), the clerk would just put the data back onto the _Work Ledger_ to be processed later. + +The _Event Mesh_ is processing the data in very similar fashion. +The data is held in the _Event Mesh_ only until the system successfully consumes it. + +=== Differences from the _Event Sourcing_ + +The _Event Mesh_ pattern could be mistaken for _Event Sourcing_, as both are Event-Driven approaches (_EDA_) to application architecture. +However, _Event Mesh_ has few improvements over the shortcomings of _Event Sourcing_ approach. + +The data is held in the _Event Mesh_ only until the system successfully consumes it, settling the data in various datastores to a consistent state. +This effectively avoids the need to keep the applications backward compatible with all the events ever emitted. +Introducing breaking changes in the event schema is as easy as making sure to consume all the events of given type from the _Event Mesh_. +This also works for the systems which can't have downtime windows. +The applications could have short-lived backward compatible layers for such situations. +When all the events are processed, the backward compatible code may be removed simplifying the maintenance. +Because in the long-term, the regular datastores are the source of truth for the system, all traditional techniques for application maintenance apply well. +It is also, easier to understand for developers, as it avoids sophisticated event handlers logic, and reconciling into the read database abstraction. -=== Creating tables - -[cols="1a,1a,1a"] +=== Differences from the _Service Mesh_ + +Worth pointing out are the differences from the _Service Mesh_ pattern. +The _Service Mesh_ pattern is intended to improve the resilience of synchronous communications which return the response. +The _Service Mesh_ effectively raises the uptime of the dependent endpoints by retrying and backoff policies. +The uptime can't be raised to 100%, though, so _Service Mesh_ still can lose the messages. + +The table below captures the key differences: + +[cols=".^1,.^2,.^2"] |=== -| *Column A* | Column *A* | _Column C_ -| -* Lorem Ipsum -* Lorem Ipsum +|| Service Mesh | Event Mesh | -* Lorem Ipsum -* Lorem Ipsum +*Similarities* + +2+a| + +* Flexibility +* Robustness +* Decoupling | -* Lorem Ipsum -* Lorem Ipsum +*Differences* +a| +* Synchronous +* Request and response +* Better for queries + +a| +* Asynchronous +* Events +* Better for commands + |=== +=== Supporting Legacy Systems + +One of the strengths of an _Event Mesh_ architecture is its ability to integrate seamlessly with legacy systems, making them more resilient and adaptable. +Legacy applications can be retrofitted to produce and consume events through lightweight adapters. +For instance: + +* A monolithic legacy application can send events for specific operations, instead of handling all logic internally in transactional fashion. +* Event listeners can be introduced incrementally, enabling the legacy app to subscribe to events without refactoring its core logic. -=== Content that can be copied +* This approach decouples old systems from rigid workflows, allowing for gradual modernization while ensuring operational continuity. -Click below to copy the content -[.console-input] -[source,shell script] ----- -oc version #openshift cli client -oc login --token= --server= ----- +=== Improving Resilience in Applications +Traditional systems often rely on synchronous calls and transactions, which can cascade failures across components. +Replacing these with asynchronous event-driven communication reduces dependencies and makes the system https://en.wikipedia.org/wiki/Eventual_consistency[_Eventually Consistent_]. + +For example, invoicing and notification services in a ride-sharing platform can process events independently, ensuring that downtime in one service does not block the entire workflow. + +Retry mechanisms provided by the _Event Mesh_ guarantee that transient failures are handled gracefully without data loss. [#more_tech] -== About the Technology Stack +== More about the Technology Stack + +It's worth noting that _Knative's Event Mesh_ is completely transparent to the applications. +The applications publish and consume events, usually via +_HTTP REST_, and the only thing that is required is the _CloudEvents_ format. + +The _CloudEvents_ format provides a common envelope for events with metadata that every event needs, such as identifier, type, timestamps, or source information. +The format is a _CNCF_ standard supported by a number of projects and doesn't enforce the use of any library. + +This makes the investment in _Knative's Event Mesh_ safe in terms of vendor lock-in. +Architects can be assured that their options remain open and that solutions can be easily reconfigured down the road. -If you want to include more details about the tech stack you used, this is the place. \ No newline at end of file +What's more, relying on well-known and easy-to-deploy _CloudEvents_, typically over _HTTP_, makes testing simple and straightforward. +Developers don't need complex development environments because the _Event Mesh_ integration can be easily tested with regular _REST_ testing that most developers are familiar with. diff --git a/documentation/modules/ROOT/pages/03-demo.adoc b/documentation/modules/ROOT/pages/03-demo.adoc index 2a335cb..7cc670e 100644 --- a/documentation/modules/ROOT/pages/03-demo.adoc +++ b/documentation/modules/ROOT/pages/03-demo.adoc @@ -1,34 +1,919 @@ -= Solution Pattern: Name Template += Building Apps around the Event Mesh :sectnums: :sectlinks: :doctype: book = See the Solution in Action -== Demonstration +Let's take a look at the following example. +We'll be looking at a Cabs ride-sharing application, that resembles real-world solutions of similar kind. +The application is written in a popular Java framework. -Include here content related to potential existing demos: blogs, articles, recorded videos, walkthrough guides, tutorials. +[#_initial_application] +== Initial application -[#demo-video] -=== Watch a demonstration +[IMPORTANT] +==== +The following example should be considered as suboptimal, most likely a counterexample! +==== -In this video you can see xpto: +Here's a method from the Cabs application that handles the completion of a ride. -video::3yULVMdqJ98[youtube, width=800, height=480] +[source,java] +---- +@Transactional // <1> +public void completeTransit(Long driverId, UUID requestUUID, Address destinationAddress) { + destinationAddress = addressRepository.save(destinationAddress); // <2> + TransitDetailsDTO transitDetails = transitDetailsFacade.find(requestUUID); + if (!driverService.exists(driverId)) { + throw new IllegalArgumentException("Driver does not exist, id = " + driverId); + } + Address from = addressRepository.getByHash(transitDetails.from.getHash()); + Address to = addressRepository.getByHash(destinationAddress.getHash()); -Next, you can learn how to walkthrough this demo. + Money finalPrice = completeTransitService.completeTransit( + driverId, requestUUID, from, to); // <2> + Money driverFee = driverFeeService.calculateDriverFee(finalPrice, driverId); + driverService.markNotOccupied(driverId); // <2> -== Run the demonstration + transitDetailsFacade.transitCompleted(requestUUID, Instant.now(clock), + finalPrice, driverFee); // <2> + awardsService.registerMiles(transitDetails.client.getId(), transitDetails.transitId); // <2> + invoiceGenerator.generate(finalPrice.toInt(), + transitDetails.client.getName() + " " + + transitDetails.client.getLastName()); // <2> + + eventsEventSender.publish(new TransitCompleted( + transitDetails.client.getId(), transitDetails.transitId, + transitDetails.from.getHash(), destinationAddress.getHash(), + transitDetails.started, Instant.now(clock), Instant.now(clock)) + ); // <2> +} +---- + +[CAUTION] +==== +There are issues with the above method. + +<1> It uses the `+@Transactional+` annotation, and modifies multiple unrelated data stores. +<2> It merges different, business domains. +==== + +Similar methods are, unfortunately, quite common in business applications. +At first glance, many developers don't see any problems with similar code. +Let's break down the problems in detail. + +=== Overuse of transactional processing + +The transactional processing has been the cornerstone of many business applications. +However, in most cases, the transactional processing isn't the best fit for real-world processes. + +In our example, when the ride finishes, that's a real-world situation. +However, the example uses the `+@Transactional+` annotation, and operate on number of unrelated data. +This means that when one of those operations fails, the whole processing will be rolled back. +In effect, the end-user will receive a nasty error message. + +[NOTE] +==== +Outages from the dependent services must not invalidate the main intent. +In fact, all the operations in this example could happen independently, and at different, yet reasonable times. +==== + +=== Bundling of different logical domains + +Our example is also very chatty, and hard to understand at first glance. +In fact, this is quite common in similar applications. +The code starts small, easy to understand. +When new features are added, it keeps growing as developers cramp new instructions into methods like `+completeTransit+`. + +Of course, the developers could re-architect the code, to extract instructions to a separate blocks, but that is just a half-measure. +Still, the application will do all the operations, starting from `+completeTransit+` method in the same time, and within the same _"script"_. + +[#_refactoring_plan] +== Refactoring plan + +In this section, we'll plan the refactoring of the Cabs application. +The refactoring will be limited to make the process easy to understand. +The scope of the refactoring will be the extraction of drivers module, which is already a separate domain in the codebase. +Within the scope of the `+completeTransit+` method, we'll need to shift the way we calculate the fee for the driver. +The calculation will be done asynchronously, and when the driver module publishes the calculation result, it will be saved back into the database. + +The base for the refactoring is the _Event Mesh_ pattern, and the asynchronous communication will be done with _Cloud Events_. + +After the refactoring, the `+completeTransit+` code will use two event types: + +* `+CalculateFee+` -- a command event, to calculate fee for the driver for given ride +* `+DriverFeeCalculated+` -- an information event, fired when the calculator does the logic to calculate the requested fee. + +The diagram below shows the sequence of operations that happen when we initiate refactored `+completeTransit+` code. + +// TODO: Replace with custom graphics +image::https://www.plantuml.com/plantuml/svg/VP1DJiCm58JtFiMZ-rmWYwgqeHkeX2WNUBK7Ok4ubdyYzVQuZKbe5TZ5olTcFiqcHFOnTKOyn1OTIC8d0xPLdwBH5iBb_rfgnpRIwWMVBC_qwDoAED3ul4MUBKSzW9u6vES1eRsYMzz_mT-YZS-W3tJeLUwyOdlW23zeYJkK8vyuZ52p5O9bRk687uTYLgrB4zNqcav6XvPsR6GocTsZQ8d2L1aV3slQzVP3-uuKpCNgB1JkEwQpzI_FcjxoL5XgcUvdMioVL4soi-iuIOQcE5N259RYPgKYMNJ-3lfdkMPRqp7s7lJkjQFBvWihR61Lwimt[width=100%] + +//// +Online editor: +https://www.plantuml.com/plantuml/uml/VP1DJiCm58JtFiMZ-rmWYwgqeHkeX2WNUBK7Ok4ubdyYzVQuZKbe5TZ5olTcFiqcHFOnTKOyn1OTIC8d0xPLdwBH5iBb_rfgnpRIwWMVBC_qwDoAED3ul4MUBKSzW9u6vES1eRsYMzz_mT-YZS-W3tJeLUwyOdlW23zeYJkK8vyuZ52p5O9bRk687uTYLgrB4zNqcav6XvPsR6GocTsZQ8d2L1aV3slQzVP3-uuKpCNgB1JkEwQpzI_FcjxoL5XgcUvdMioVL4soi-iuIOQcE5N259RYPgKYMNJ-3lfdkMPRqp7s7lJkjQFBvWihR61Lwimt + +@startuml +!theme materia-outline +participant "Legacy App" as Legacy +participant "Knative _Event Mesh_" as Broker +participant "Drivers Module" as FeeService +participant "Database" as DB + +activate Legacy +Legacy -> Broker : Publish CalculateFee Event +Broker --> Legacy: Confirm delivery +deactivate Legacy + +Broker -> FeeService: Route CalculateFee Event +activate FeeService +FeeService --> Broker: Publish DriverFeeCalculated Event +deactivate FeeService + +Broker -> Legacy: Route DriverFeeCalculated Event +activate Legacy +Legacy -> DB: Store Trip Data +deactivate Legacy +@enduml +//// + +The diagram illustrates the flow of events between the legacy application, the _Knative Event Mesh_, the fee calculator service, and the datastore. + +[#_run_this_demonstration] +== Run this demonstration + +Next, you can learn how to walk through this demo. === Before getting started -To run this demo, you will need xpto. Adding to that, make sure to have: -* ABC -* XYZ -* XPTO +We'll be using the Red Hat OpenShift Container Platform (OCP) 4.x cluster, so make sure you have it available in your environment. + +You could use the https://developers.redhat.com/products/openshift/overview[Red Hat's Developer Sandbox] to spin up an instance for you. + +Alternatively, you can use the https://developers.redhat.com/products/openshift-local/overview[OpenShift Local] installation. +Make sure to give it enough resources to fit the Serverless Operator and our demo application. === Installing the demo -Installation guide and basic test of the demo installation if needed + +==== Installing the Serverless Operator + +To install the Serverless Operator, follow https://docs.openshift.com/serverless/1.35/install/preparing-serverless-install.html[the documentation steps]. + +The *TL;DR* version would be to apply the following manifest, and wait until the operator is ready: + +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-serverless +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: openshift-serverless + namespace: openshift-serverless +spec: {} +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: serverless-operator + namespace: openshift-serverless +spec: + channel: stable + name: serverless-operator + source: redhat-operators + sourceNamespace: openshift-marketplace +---- + +Here are commands to apply the above manifests. + +[.console-input] +[source,shell] +---- +git clone https://github.com/openshift-knative/cabs-usvc +oc apply -f cabs-usvc/deploy/serverless/operator.yaml +oc wait csv/serverless-operator.v1.35.0 \ + --for 'jsonpath={.status.conditions[?(@.phase == "Succeeded")]}' +---- + +[CAUTION] +==== +Replace the `+v1.35.0+` with the actual version of the Serverless Operator. +==== + +Here's the expected output + +[source,shell] +---- +namespace/openshift-serverless created +operatorgroup.operators.coreos.com/openshift-serverless created +subscription.operators.coreos.com/serverless-operator created +clusterserviceversion.operators.coreos.com/serverless-operator.v1.35.0 condition met +---- + +==== Installing the Serving and Eventing components + +To install the Serving and Eventing components, follow https://docs.openshift.com/serverless/1.35/install/installing-knative-serving.html[the Serving documentation steps] and https://docs.openshift.com/serverless/1.35/install/installing-knative-eventing.html[the Eventing documentation steps]. + +Again for *TL;DR* version for small, development purposes, you could apply the following manifests, and wait until the components are ready for operation: + +[source,yaml] +---- +apiVersion: v1 +kind: Namespace +metadata: + name: knative-serving +--- +apiVersion: operator.knative.dev/v1beta1 +kind: KnativeServing +metadata: + name: knative-serving + namespace: knative-serving +spec: + high-availability: + replicas: 1 +--- +apiVersion: v1 +kind: Namespace +metadata: + name: knative-eventing +--- +apiVersion: operator.knative.dev/v1beta1 +kind: KnativeEventing +metadata: + name: knative-eventing + namespace: knative-eventing +spec: + high-availability: + replicas: 1 +---- + +Here are commands to apply the above manifests. + +[.console-input] +[source,shell] +---- +oc apply \ + -f cabs-usvc/deploy/serverless/serving.yaml \ + -f cabs-usvc/deploy/serverless/eventing.yaml + +oc wait knativeserving/knative-serving \ + --namespace knative-serving \ + --for 'condition=Ready=True' +oc wait knativeeventing/knative-eventing \ + --namespace knative-eventing \ + --for 'condition=Ready=True' +---- + +Here's the expected output + +[source,shell] +---- +Warning: resource namespaces/knative-serving is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by oc apply. oc apply should only be used on resources created declaratively by either oc create --save-config or oc apply. The missing annotation will be patched automatically. +namespace/knative-serving configured +knativeserving.operator.knative.dev/knative-serving created +Warning: resource namespaces/knative-eventing is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by oc apply. oc apply should only be used on resources created declaratively by either oc create --save-config or oc apply. The missing annotation will be patched automatically. +namespace/knative-eventing configured +knativeeventing.operator.knative.dev/knative-eventing created +knativeserving.operator.knative.dev/knative-serving condition met +knativeeventing.operator.knative.dev/knative-eventing condition met +---- + +==== Installing the demo applications + +To install the Demo application, apply the following manifests. + +* https://github.com/openshift-knative/cabs-usvc/blob/main/deploy/apps/legacy.yaml[The legacy application] +* https://github.com/openshift-knative/cabs-usvc/blob/main/deploy/db/redis.yaml[The Drivers database] +* https://github.com/openshift-knative/cabs-usvc/blob/main/deploy/apps/drivers.yaml[The Drivers module] + +Here are commands to apply the above manifests. + +[.console-input] +[source,shell] +---- +oc create ns demo +oc apply \ + -f cabs-usvc/deploy/db/redis.yaml \ + -f cabs-usvc/deploy/apps/drivers.yaml \ + -f cabs-usvc/deploy/apps/legacy.yaml + +oc wait ksvc/drivers \ + --namespace demo \ + --for condition=Ready=True +oc wait ksvc/legacy \ + --namespace demo \ + --for condition=Ready=True +---- + +Here's the expected output + +[source,shell] +---- +namespace/demo created +pod/redis created +service/redis created +service.serving.knative.dev/drivers created +service.serving.knative.dev/legacy created +service.serving.knative.dev/drivers condition met +service.serving.knative.dev/legacy condition met +---- + +==== Configuring the Event Mesh + +To configure the Event Mesh, apply the following manifests. + +* https://github.com/openshift-knative/cabs-usvc/blob/main/deploy/mesh/broker.yaml[_Broker_] +* https://github.com/openshift-knative/cabs-usvc/blob/main/deploy/mesh/sources.yaml[Sources] +* https://github.com/openshift-knative/cabs-usvc/blob/main/deploy/mesh/triggers.yaml[Triggers] + +Here are commands to apply the above manifests. + +[.console-input] +[source,shell] +---- +oc apply \ + -f cabs-usvc/deploy/mesh/broker.yaml \ + -f cabs-usvc/deploy/mesh/sources.yaml \ + -f cabs-usvc/deploy/mesh/triggers.yaml + +oc wait broker/default \ + --namespace demo \ + --for condition=Ready=True +oc wait sinkbinding/drivers-binding \ + --namespace demo \ + --for condition=Ready=True +oc wait sinkbinding/legacy-binding \ + --namespace demo \ + --for condition=Ready=True +oc wait trigger/trg-drivers \ + --namespace demo \ + --for condition=Ready=True +oc wait trigger/trg-drivers \ + --namespace demo \ + --for condition=Ready=True +---- + +Here's the expected output + +[source,shell] +---- +broker.eventing.knative.dev/default created +sinkbinding.sources.knative.dev/drivers-binding created +sinkbinding.sources.knative.dev/legacy-binding created +trigger.eventing.knative.dev/trg-drivers created +trigger.eventing.knative.dev/trg-legacy created +broker.eventing.knative.dev/default condition met +sinkbinding.sources.knative.dev/drivers-binding condition met +sinkbinding.sources.knative.dev/legacy-binding condition met +trigger.eventing.knative.dev/trg-drivers condition met +trigger.eventing.knative.dev/trg-drivers condition met +---- + +The OpenShift Container Platform provides can provide a clear visualization of our deployed solution. + +image::solution-odc.png[width=100%] + +The console shows two sink bindings on the left, and they are feeding the events from the applications to the _Broker_ (depicted in the center). +The _Broker_ is the centralized infrastructure piece that ensures a proper decoupling of the services. +On the right, you could see the two applications deployed as _Knative_ services, and two triggers (as lines) that configure the _Event Mesh_ to feed appropriate events to the applications. === Walkthrough guide -How to run through the demo + +With the demo pieces deployed on the cluster, we could go ahead with testing the functionality. + +For the sake of brevity, the legacy application, at startup, prepares some development data in the in-memory database its running on. +We will leverage that data to complete transit without the hassle of simulating the whole ride. + +Because we use serverless deployments, the services could be scaled to zero. +This fact makes it a bit harder to listen to the application logs. +We recommend using https://github.com/stern/stern[`+stern+` tool] to easily listen to both apps, even across scale to zero periods. + +[.console-input] +[source,shell] +---- +stern \ + --namespace demo \ + --container user-container \ + '(legacy|drivers).*' +---- + +Alternatively, you can use a regular `+oc+` command and a bit of scripting: + +[.console-input] +[source,shell] +---- +oc logs \ + --selector app=legacy \ + --namespace demo \ + --follow & + +while [ $(oc get pod --namespace demo --selector app=drivers -o name | wc -l) -eq 0 ]; do \ + sleep 1; done && oc wait pod \ + --namespace demo \ + --selector app=drivers \ + --for condition=Ready=True && \ + oc logs \ + --selector app=drivers \ + --namespace demo \ + --follow +---- + +In the second terminal, call the legacy endpoint by sending a _POST_ message like the following: + +[.console-input] +[source,shell] +---- +curl -Lk -v -X POST -H 'Content-Type: application/json' \ + $(oc get ksvc legacy --namespace demo -o jsonpath='{.status.url}')/transits/8/complete \ + --data-binary @- << EOF +{ + "country": "Polska", + "city": "Warszawa", + "street": "Żytnia", + "buildingNumber": 32, + "hash": -580606919 +} +EOF +---- + +You should observe the cURL command succeeded, and return the ride data. +Moreover, the logs of both applications should be updated. + +On the _Legacy_ application you could see the log line, with shows the application is sending the _Cloud Event_ to the _Event Mesh_: + +---- +INFO 1 --- [nio-8080-exec-1] i.l.cabs.common.cloudevents.Publisher : +Publishing event to http://broker-ingress.knative-eventing.svc.cluster.local/demo/default : +CloudEvent{id='83720fe5-02ee-4a3e-9b22-5c287fb68d10',source=usvc://cabs/legacy, +type='cabs.drivers.calculate-fee', datacontenttype='application/json', +subject='4e630a96-4d5c-488c-a53b-9554c0bcb97e',time=2025-02-04T17:32:20.638351262Z, +data=BytesCloudEventData{value=[123, 34, 100, 114, 105, 118, 101, 114, 45, 105, +100, 34, 58, 49, 57, 57, 51, 52, 51, 50, 53, 53, 50, 44, 34, 116, 114, 97, 110, +115, 105, 116, 45, 112, 114, 105, 99, 101, 34, 58, 53, 49, 48, 48, 125]}, +extensions={}} +---- + +You can notice the `+cabs.drivers.calculate-fee+` event was later routed to the _Drivers_ service, which calculated the fee. +After the fee was calculated, the `+cabs.drivers.driver-fee+` event was published back into the _Event Mesh_. + +---- +[INFO drivers::app::events] Received event: + CloudEvent: + specversion: '1.0' + id: 'f94792bc-9c38-4db1-8da6-b6a28d1b4847' + type: 'cabs.drivers.calculate-fee' + source: 'usvc://cabs/legacy' + datacontenttype: 'application/json' + subject: '005be37e-8971-4a5b-b5e7-dd18de3c1184' + time: '2025-02-04T17:48:11.641317948+00:00' + knativearrivaltime: '2025-02-04T17:48:11.655926003Z' + Binary data: "{\"driver-id\":1993432552,\"transit-price\":5100}" + +[DEBUG drivers::drivers::service] calculate fee for: Subject { + id: Some("005be37e-8971-4a5b-b5e7-dd18de3c1184"), + entity: CalculateFeeEvent { + driver_id: Identifier(1993432552), + transit_price: Money(5100) } } +[DEBUG drivers::drivers::service] fee value: Money(4856) +[DEBUG drivers::support::cloudevents] sending cabs.drivers.driver-fee event to + http://broker-ingress.knative-eventing.svc.cluster.local/demo/default: + Event { attributes: V10(Attributes { id: "939babd7-6a85-4859-b45b-66087aba9418", + ty: "cabs.drivers.driver-fee", source: "usvc://cabs/drivers", + datacontenttype: Some("application/json"), dataschema: None, + subject: Some("005be37e-8971-4a5b-b5e7-dd18de3c1184"), + time: Some(2025-02-04T17:48:12.897943139Z) }), + data: Some(Json(Object {"driver-id": Number(1993432552), "fee": Number(4856)})), + extensions: {} } +---- + +In the end, the `+cabs.drivers.driver-fee+` event was routed to the _Legacy_ application, by _Event Mesh_. +You could see the evidence of it in the logs. + +---- +INFO 1 --- [nio-8080-exec-2] i.l.c.ride.details.TransitDetailsFacade : + Driver fee calculated for transit 005be37e-8971-4a5b-b5e7-dd18de3c1184: 48.56 +---- + +[#_in_depth_refactoring] +== In-depth look at the refactoring + +In this section, we'll refactor the Cabs application. +The refactoring will be limited to make the process easy to understand. +The scope of the refactoring will be the extraction of drivers module, which is already a separate domain in the codebase. +Within the scope of the `+completeTransit+` method, we'll need to shift the way we calculate the fee for the driver. +The calculation will be done asynchronously, and when the driver module publishes the calculation result, it will be saved back into the database. + +The base for the refactoring is the _Event Mesh_ pattern, and the asynchronous communication will be done with _Cloud Events_. + +=== Drivers module + +The functionality around drivers is already quite separated in the codebase, so it is a good staring point to extract into a separate module. +The drivers module will become a standalone web service, deployed on the _Kubernetes_ cluster. +The implementation of the drivers module will be done with _Rust_ for this example. + +Here's the _Rust_ code for calculate fee functionality. +The entrypoint is the _Cloud Event_ of type `cabs.drivers.calculate-fee` we are expecting the _Event Mesh_ will route. + +[source,rust] +---- +impl Service { + pub async fn calculate_fee(&mut self, ce: Event) -> Result<()> { + let calc_fee_intent = Self::unwrap_calculatefee(ce)?; // <1> + let subject = calc_fee_intent.id.clone(); + + log::debug!("calculate fee for: {:?}", calc_fee_intent); + let drv = self.repo.get(&calc_fee_intent.entity.driver_id).await?; + + let fee = drv.calculate_fee(&calc_fee_intent.entity.transit_price); // <2> + + log::debug!("fee value: {:?}", fee); + + let driverfee_event = DriverFeeEvent { + driver_id: calc_fee_intent.entity.driver_id, + fee, + }; // <3> + + let mut builder = driverfee_event.to_builder(); // <3> + if let Some(id) = subject { + builder = builder.subject(id); + } // <3> + let ce = builder.build().map_err(error::ErrorInternalServerError)?; // <3> + + Sender::new(&self.config).send(ce).await?; // <4> + + Ok(()) + } + // [..] +} +---- + +In the above code, we are doing the following: + +<1> We are unwrapping _Cloud Event_ envelope into an internal, domain, fee value object. +<2> We are calculating the fee value using some domain logic. +<3> We are wrapping the calculated fee value into a new _Cloud Event_. +<4> We are sending the fee, as _Cloud Event_, back to the _Event Mesh_ using _HTTP REST_ client. + +Of course, in order for this method to be called, we need to route the event from the HTTP listener: + +[source,rust] +---- +pub fn routes() -> impl HttpServiceFactory + 'static { + web::resource("/").route(web::post().to(recv)) +} + +async fn recv( + ce: Event, + state: web::Data, + binding: web::Data, +) -> Result { + log::info!("Received event:\n{}", ce); + + let mut svc = service::new(state, binding).await?; + + match ce.ty() { + "cabs.drivers.calculate-fee" => svc.calculate_fee(ce).await, + _ => Err(error::ErrorBadRequest("unsupported event type")), + }?; + + Ok(HttpResponse::Ok().finish()) +} +---- + +[NOTE] +==== +The example above uses a simple switch statement to determine the route for the given type of the event. +In a real application, you would probably use a more complex logic to determine which method should be called. +==== + +Let's see also the _Cloud Event_ sender, that uses the _HTTP REST_ client to send events to the _Event Mesh_: + +[source,rust] +---- +impl Sender { + pub async fn send(&self, ce: Event) -> Result<()> { + log::debug!("sending {} event to {}:\n{:?}", ce.ty(), &self.sink, ce,); + + let response = self + .client + .post(&self.sink) // <1> + .event(ce) + .map_err(error::ErrorInternalServerError)? + .send() + .await + .map_err(error::ErrorInternalServerError)?; + + match response.status().is_success() { + true => Ok(()), + false => { + log::error!("failed to send event: {:#?}", response); + Err(error::ErrorInternalServerError(format!( + "failed to send event: {}", + response.status() + ))) + } + } + } +} +---- + +<1> The client uses _POST_ method, to send the _JSON_ representation of the event to the sink. +The _sink_ is the URL of the target, in this case the url of the _Event Mesh_. + +=== Event Mesh + +In this section, we'll use the _Event Mesh_ setup to communication between the extracted Drivers module and the different parts of the application. + +Here's the configuration of the _Event Mesh_'s central component, the _Broker_, which will be used in this example. +The _Broker_ here is the _Knative_ component, and will be deployed in the _Kubernetes_ cluster. + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1 +kind: Broker +metadata: + name: default + namespace: demo +spec: + delivery: + backoffDelay: PT0.2S # <1> + backoffPolicy: exponential # <2> + retry: 10 # <3> +---- + +<1> The `+backoffDelay+` is the delay between retries, and we use `+200ms+` in this example. +<2> The `+backoffPolicy+` is set to `+exponential+`, which means that the delay will be doubled each time. +<3> The `+retry+` is the number of times we retry before giving up. + +[IMPORTANT] +==== +In our example, the policy is `+exponential+`, and the `+retry+` is 10, which means that after approximately 6 min and 50 sec the event will be dropped (or routed to the `+deadLetterSink+` if configured). +==== + +[NOTE] +==== +A `+deadLetterSink+` option could be configured for the _Broker_ to send the events that failed to be delivered in time to a back-up location. +Events captured in a back-up location can be re-transmitted into the _Event Mesh_ later by reconfiguring the _Mesh_ (after resolving the outage or deploying a bug fix). +==== + +=== Legacy application changes + +The last part of the refactoring will be the changes needed in our legacy Java application. +We need to remove the _Drivers_ logic and send events to the _Event Mesh_ instead. +We also need to accept new events coming from the _Event Mesh_, as the calculated fee will be transmitted as such. + +Here's the refactored code: + +[source,java] +---- +public void completeTransit(UUID requestUUID, AddressDTO destinationAddress) { + // ... + Money finalPrice = completeTransitService.completeTransit(driverId, requestUUID, from, to); + // ... + driverFeeService.calculateDriverFee(requestUUID, finalPrice, driverId); // <1> + // ... +} + +@EventListener // <2> +public void driverFeeCalculated(DriverFee driverFee) { // <3> + Objects.requireNonNull(driverFee.ctx.getSubject()); + UUID id = UUID.fromString(driverFee.ctx.getSubject()); + transitDetailsFacade.driverFeeCalculated(id, driverFee.data.fee); +} +---- + +[NOTE] +==== +<1> Notice, we are just invoking the `+calculateDriverFee+`, that doesn't return anything. +It's asynchronous. +<2> We are using the `@EventListener` annotation to listen for the domain events within the application. +Don't confuse this with _Cloud Events_ that are sent and received outside the application. +<3> The exact fee is calculated by the _Drivers_ module, and we'll be notified later, with the `+driverFeeCalculated+` method. +==== + +To communicate with the _Event Mesh_, we need to add a new _Cloud Event_ sender and listener. +That's being done similarly, as in the case of _Rust_ application. + +Below, you can see how you may implement the _Cloud Event_ sender: + +[source,java] +---- +@Service +public class DriverFeeService { + + private final CloudEventSender eventSender; + + @Autowired + public DriverFeeService(EventSender eventSender) { + this.eventSender = eventSender; + } + + public void calculateDriverFee(UUID rideId, Money transitPrice, Long driverId) { + eventSender.send(new CalculateFee( + rideId, + driverId, + transitPrice.toInt() + )); + } +} + +@Service +public class CloudEventSender { + + private static final Logger log = LoggerFactory.getLogger(EventSender.class); + + private final KnativeConfig knative; + private final List> converters; + + @Autowired + CloudEventSender(KnativeConfig knative, List> converters) { + this.knative = knative; + this.converters = converters; + } + + public void send(Object event) { + try { + unsafeSend(event); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private void unsafeSend(T event) throws IOException { + Into convert = (Into) converters.stream() + .filter(c -> c.accepts(event)) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException( + "Cannot find converter for " + event.getClass())); + CloudEvent ce = convert.into(event); + URL url = knative.getSink(); + log.info("Publishing event to {} : {}", url, ce); + HttpURLConnection http = (HttpURLConnection) url.openConnection(); + http.setRequestMethod("POST"); + http.setDoOutput(true); + http.setDoInput(true); + + HttpMessageWriter messageWriter = createMessageWriter(http); + messageWriter.writeBinary(ce); + + int code = http.getResponseCode(); + if (code < 200 || code >= 300) { + throw new IOException("Unexpected response code " + code); + } + } +} +---- + +Once again, notice this is just a simple _HTTP_ client doing the _POST_ request, with the body being the JSON representation of the _CloudEvent_. + +The last part to see is the _HTTP_ listener on the legacy application side. +This listener will be responsible for receiving events from _Knative's Event Mesh_ and converting them into our custom event type: + +[source,java] +---- +@RestController +public class CloudEventReceiver { + private static final Logger log = LoggerFactory.getLogger(Receiver.class); + + private final EventsPublisher eventsPublisher; + private final List> froms; + + @Autowired + Receiver(EventsPublisher eventsPublisher, List> froms) { + this.eventsPublisher = eventsPublisher; + this.froms = froms; + } + + @PostMapping("/") + public void receive(@RequestBody CloudEvent event) { + log.info("Received event: {}", event); + + for (From from : froms) { + if (from.matches(event)) { + Event ev = from.fromCloudEvent(event); // <1> + eventsPublisher.publish(ev); // <2> + return; + } + } + + throw new IllegalStateException("No matching event type consumer found"); + } +} +---- + +<1> We unwrap the _CloudEvent_ into our domain event type (in the example that's the `+DriverFeeCalculated+` type) +<2> And publish it withing the application, using the framework's _EventsPublisher_ implementation. +The domain events will be transmitted to the methods annotated with `@EventListener`. + +[CAUTION] +==== +Don't confuse the framework's _EventsPublisher_ with _Cloud Event_ sender and receiver. +==== + +=== The wiring of our _Event Mesh_ + +To complete the solution, we need to configure the _Event Mesh_. +The configuration describes the rules for receiving and sending events from and to the _Event Mesh_ and the application modules. + +Here are the sources in our case: + +[source,yaml] +---- +apiVersion: sources.knative.dev/v1 +kind: SinkBinding +metadata: + name: drivers-binding + namespace: demo +spec: + sink: + ref: + apiVersion: eventing.knative.dev/v1 + kind: Broker + name: default + namespace: demo + subject: + apiVersion: serving.knative.dev/v1 + kind: Service + name: drivers + namespace: demo +--- +apiVersion: sources.knative.dev/v1 +kind: SinkBinding +metadata: + name: legacy-binding + namespace: demo +spec: + sink: + ref: + apiVersion: eventing.knative.dev/v1 + kind: Broker + name: default + namespace: demo + subject: + apiVersion: serving.knative.dev/v1 + kind: Service + name: legacy + namespace: demo +---- + +We are using the _SinkBinding_ resource to bind an event source (the _Service_) with an event sink (_Broker_). +We have two applications that will feed their events into the _Event Mesh_, so we need two _SinkBinding_ resources. + +Lastly, we have to configure the _Broker_ to send events from the _Event Mesh_ to the expected application modules. +We use the _Trigger_ resource for this purpose. + +[source,yaml] +---- +apiVersion: eventing.knative.dev/v1 +kind: Trigger +metadata: + name: trg-drivers + namespace: demo +spec: + broker: default + filter: + attributes: + type: cabs.drivers.calculate-fee # <1> + subscriber: + ref: + apiVersion: serving.knative.dev/v1 + kind: Service + name: drivers + namespace: demo +--- +apiVersion: eventing.knative.dev/v1 +kind: Trigger +metadata: + name: trg-legacy + namespace: demo +spec: + broker: default + filter: + attributes: + type: cabs.drivers.driver-fee # <1> + subscriber: + ref: + apiVersion: serving.knative.dev/v1 + kind: Service + name: legacy + namespace: demo +---- + +<1> Note, we specify the type of the event, as a filter. + +[#_conclusion] +== Conclusion + +Let's step back and take a look at what we have accomplished. + +The refactored application code fragment is now distributed, resilient, and eventually consistent. +It will gracefully handle the failures that may happen while calculating the driver's fee. +The _Event Mesh_ will make sure to retry the event delivery, in case of failures on either side. + +We could extend the refactoring, even further, with the same principle, making the whole application modern, responsible, and without incorrect, unnecessary, transactional behavior. + + diff --git a/documentation/modules/ROOT/pages/04-workshop.adoc b/documentation/modules/ROOT/pages/04-workshop.adoc deleted file mode 100644 index 86ce843..0000000 --- a/documentation/modules/ROOT/pages/04-workshop.adoc +++ /dev/null @@ -1,13 +0,0 @@ -= Solution Pattern: Name Template -:sectnums: -:sectlinks: -:doctype: book - -= Workshop - -what is this workshop and what will be learned - -== Installing the workshop environment -=== Before getting started -=== Installing the environment -== Delivering the workshop diff --git a/documentation/modules/ROOT/pages/_attributes.adoc b/documentation/modules/ROOT/pages/_attributes.adoc index ad2bb5a..11fd746 100644 --- a/documentation/modules/ROOT/pages/_attributes.adoc +++ b/documentation/modules/ROOT/pages/_attributes.adoc @@ -1,2 +1,2 @@ :experimental: -:source-highlighter: highlightjs +:source-highlighter: highlightjs \ No newline at end of file diff --git a/documentation/modules/ROOT/pages/developer-resources.adoc b/documentation/modules/ROOT/pages/developer-resources.adoc index 4c8b5f2..4783fa3 100644 --- a/documentation/modules/ROOT/pages/developer-resources.adoc +++ b/documentation/modules/ROOT/pages/developer-resources.adoc @@ -1,11 +1,15 @@ -= Solution Patterns: Template Name += Building Apps around the Event Mesh :sectnums: :sectlinks: :doctype: book -= Developer Resources - -* Add link to the git repo of the code -* Add links to whitepapers, ebooks, learning paths, product pages +== Developer Resources +* https://github.com/openshift-knative/cabs-usvc[Demo source code] — _The example code used in this solution, based on the https://github.com/legacyfighter/cabs-java[LegacyFighter Java app]_ +* https://youtu.be/Rc5IO6S6ZOk[Let's get meshy! Microservices are easy with Event Mesh] — _The talk that served a base for this solution_ +* https://www.redhat.com/en/technologies/cloud-computing/openshift/serverless[Red Hat OpenShift Serverless] +* https://knative.dev/docs/eventing/event-mesh/[Knative Event Mesh] +* https://bit.ly/knative-tutorial[Knative Tutorial] +* https://developers.redhat.com/[Red Hat Developer] +* https://docs.openshift.com/serverless/[Red Hat OpenShift Serverless Documentation] diff --git a/documentation/modules/ROOT/pages/index.adoc b/documentation/modules/ROOT/pages/index.adoc index e76e7f1..e367748 100644 --- a/documentation/modules/ROOT/pages/index.adoc +++ b/documentation/modules/ROOT/pages/index.adoc @@ -1,28 +1,43 @@ -= Solution Patterns: Template Name += Building Apps around the Event Mesh :page-layout: home :sectnums: :sectlinks: :doctype: book -A brief introduction of this solution pattern. +Understanding how to structure an application can be challenging. Different +architectural patterns often lack insight into how to build day-to-day solutions +that are often at least as complex as the business they operate in. -_Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum_ +This solution pattern is written to make it obvious and to show simple, yet +elegant, correct, and comprehensive way of building software, either in +greenfield or even in legacy projects. This solution leverages the +https://www.redhat.com/en/technologies/cloud-computing/openshift/serverless[Red Hat OpenShift Serverless] +Eventing component, which implements the https://www.redhat.com/en/topics/integration/what-is-an-event-mesh[Event Mesh] pattern. -*Contributors:* _Your Name(s) here (can add links to your profile)_ +*Contributors:* https://github.com/cardil[Chris Suszynski] ++++
++++ [NOTE] ==== -Solutions Patterns help you understand the art of the possible with Red Hat's portfolio, and not intended to be used as is for production environments. You are welcome use any part of this solution pattern for your own workloads. +Solution Patterns help you understand the art of the possible with Red Hat's +portfolio, and not intended to be used as is for production environments. You +are welcome to use any part of this solution pattern for your own workloads. ==== [#use-cases] == Use cases -Common use cases that can be address with this architecture are: +Event mesh pattern simplifies the operational complexity of distributed +applications, making architectures more resilient and scalable. -- xyz -- xyz +Common use cases that can be addressed with this architecture include: + +- *Correctness* and *consistency* across distributed applications and services, + without the hassles of _Event Sourcing_. +- *Modernising* legacy applications by extracting smaller *microservices* into a + distributed and eventually consistent system. +- *Enabling communication* with external services, that can be easily tested + using a common *developer toolkit*. include::01-pattern.adoc[] diff --git a/site.yml b/site.yml index 801df30..965ac1f 100644 --- a/site.yml +++ b/site.yml @@ -4,7 +4,7 @@ runtime: site: title: Solution Patterns for Cloud Native Architectures url: https://redhat-solution-patterns.github.io/solution-patterns/ - start_page: solution-pattern-template::index.adoc + start_page: solution-pattern-event-mesh-for-microservices::index.adoc content: sources: @@ -21,7 +21,7 @@ asciidoc: ui: bundle: - url: https://github.com/redhat-solution-patterns/course-ui/releases/download/v0.1.16/ui-bundle.zip + url: https://github.com/redhat-solution-patterns/course-ui/releases/download/v0.1.17/ui-bundle.zip snapshot: true supplemental_files: - path: ./supplemental-ui