replaced existing library wholesale with Alpakka solution

This commit is contained in:
Aaron Stannard 2019-07-10 09:00:20 -05:00
Родитель aa26d1573c
Коммит 45c1c7750c
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: C9208B815A0785EE
55 изменённых файлов: 2855 добавлений и 318 удалений

99
.editorconfig Normal file
Просмотреть файл

@ -0,0 +1,99 @@
# EditorConfig is awesome:http://EditorConfig.org
# top-most EditorConfig file
root = true
# Don't use tabs for indentation.
[*]
indent_style = space
# (Please don't specify an indent_size here; that has too many unintended consequences.)
# Code files
[*.{cs,csx}]
indent_size = 4
# Xml project files
[*.{csproj}]
indent_size = 2
# Xml config files
[*.{props,targets,ruleset,config,nuspec,resx,vsixmanifest,vsct}]
indent_size = 2
# JSON files
[*.json]
indent_size = 2
# proto
[*.proto]
indent_size = 2
# Dotnet code style settings:
[*.{cs}]
# Avoid "this." if not necessary
dotnet_style_qualification_for_field = false:none
dotnet_style_qualification_for_property = false:none
dotnet_style_qualification_for_method = false:none
dotnet_style_qualification_for_event = false:none
# Use language keywords instead of framework type names for type references
dotnet_style_predefined_type_for_locals_parameters_members = true:warning
dotnet_style_predefined_type_for_member_access = true:warning
# Suggest more modern language features when available
dotnet_style_object_initializer = true:warning
dotnet_style_collection_initializer = true:warning
dotnet_style_coalesce_expression = true:warning
dotnet_style_null_propagation = true:warning
dotnet_style_explicit_tuple_names = true:warning
# Naming
dotnet_naming_rule.async_methods_end_in_async.symbols = any_async_methods
dotnet_naming_rule.async_methods_end_in_async.style = end_in_async
dotnet_naming_rule.async_methods_end_in_async.severity = warning
dotnet_naming_symbols.any_async_methods.applicable_kinds = method
dotnet_naming_symbols.any_async_methods.applicable_accessibilities = *
dotnet_naming_symbols.any_async_methods.required_modifiers = async
dotnet_naming_style.end_in_async.required_suffix = Async
dotnet_naming_style.end_in_async.capitalization = pascal_case
# CSharp code style settings:
[*.cs]
# Prefer "var" everywhere
csharp_style_var_for_built_in_types = true:suggestion
csharp_style_var_when_type_is_apparent = true:suggestion
csharp_style_var_elsewhere = true:suggestion
# Expression-bodied members
csharp_style_expression_bodied_methods = true:none
csharp_style_expression_bodied_constructors = false:none
csharp_style_expression_bodied_operators = true:none
csharp_style_expression_bodied_properties = true:none
csharp_style_expression_bodied_indexers = true:none
csharp_style_expression_bodied_accessors = false:none
# Suggest more modern language features when available
csharp_style_pattern_matching_over_is_with_cast_check = true:warning
csharp_style_pattern_matching_over_as_with_null_check = true:warning
csharp_style_inlined_variable_declaration = true:warning
csharp_prefer_simple_default_expression = true:warning
csharp_style_throw_expression = true:warning
csharp_style_conditional_delegate_call = true:warning
csharp_prefer_braces = false:none
# Formatting
csharp_preserve_single_line_statements = false
# VS 2017 15.5
[*.cs]
csharp_prefer_inferred_tuple_names = true:warning
csharp_prefer_inferred_anonymous_type_member_names = true:warning
csharp_style_pattern_local_over_anonymous_function = true:warning
csharp_style_deconstructed_variable_declaration = true:warning
dotnet_style_prefer_is_null_check_over_reference_equality_method = true:warning
dotnet_style_require_accessibility_modifiers = true:warning

64
.gitattributes поставляемый Normal file
Просмотреть файл

@ -0,0 +1,64 @@
###############################################################################
# Set default behavior to automatically normalize line endings.
###############################################################################
* text=auto
*.csv -text
###############################################################################
# Set default behavior for command prompt diff.
#
# This is need for earlier builds of msysgit that does not have it on by
# default for csharp files.
# Note: This is only used by command line
###############################################################################
#*.cs diff=csharp
###############################################################################
# Set the merge driver for project and solution files
#
# Merging from the command prompt will add diff markers to the files if there
# are conflicts (Merging from VS is not affected by the settings below, in VS
# the diff markers are never inserted). Diff markers may cause the following
# file extensions to fail to load in VS. An alternative would be to treat
# these files as binary and thus will always conflict and require user
# intervention with every merge. To do so, just uncomment the entries below
###############################################################################
#*.sln merge=binary
#*.csproj merge=binary
#*.vbproj merge=binary
#*.vcxproj merge=binary
#*.vcproj merge=binary
#*.dbproj merge=binary
#*.fsproj merge=binary
#*.lsproj merge=binary
#*.wixproj merge=binary
#*.modelproj merge=binary
#*.sqlproj merge=binary
#*.wwaproj merge=binary
###############################################################################
# behavior for image files
#
# image files are treated as binary by default.
###############################################################################
#*.jpg binary
#*.png binary
#*.gif binary
###############################################################################
# diff behavior for common document formats
#
# Convert binary document formats to text before diffing them. This feature
# is only available from the command line. Turn it on by uncommenting the
# entries below.
###############################################################################
#*.doc diff=astextplain
#*.DOC diff=astextplain
#*.docx diff=astextplain
#*.DOCX diff=astextplain
#*.dot diff=astextplain
#*.DOT diff=astextplain
#*.pdf diff=astextplain
#*.PDF diff=astextplain
#*.rtf diff=astextplain
#*.RTF diff=astextplain

3
.gitignore поставляемый
Просмотреть файл

@ -202,4 +202,5 @@ FakesAssemblies/
tools/
build/
.nuget/
.dotnet/
.dotnet/
.idea

Просмотреть файл

@ -9,7 +9,16 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SimpleProducer", "examples\
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Akka.Streams.Kafka", "src\Akka.Streams.Kafka\Akka.Streams.Kafka.csproj", "{21CFA085-3156-4766-9B2E-B5F1747DF87F}"
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Akka.Streams.Kafka.Tests", "src\Akka.Streams.Kafka.Tests\Akka.Streams.Kafka.Tests.csproj", "{8A6E9E88-1BE5-48D0-B603-AEA151D68AB4}"
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Akka.Streams.Kafka.Tests", "src\Akka.Streams.Kafka.Tests\Akka.Streams.Kafka.Tests.csproj", "{8A6E9E88-1BE5-48D0-B603-AEA151D68AB4}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "examples", "examples", "{DBBF6380-3734-49B5-8BF6-74A7C33DFA55}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{568F6963-99B1-4E1C-8394-52E4064D6F32}"
ProjectSection(SolutionItems) = preProject
.editorconfig = .editorconfig
docker-compose.yml = docker-compose.yml
README.md = README.md
EndProjectSection
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
@ -37,6 +46,10 @@ Global
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{ECDA0400-203E-4647-81B4-224954F79520} = {DBBF6380-3734-49B5-8BF6-74A7C33DFA55}
{F30D604D-22C5-415E-8F10-EBE0534F3885} = {DBBF6380-3734-49B5-8BF6-74A7C33DFA55}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {4B399516-A19F-4B0E-9AA9-CD6197619BE0}
EndGlobalSection

201
LICENSE.md Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-2017 Akka.NET Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Просмотреть файл

@ -6,6 +6,5 @@
<packageSources>
<clear />
<add key="nuget.org" value="https://api.nuget.org/v3/index.json" />
<add key="kafkanightly" value="https://ci.appveyor.com/nuget/confluent-kafka-dotnet" />
</packageSources>
</configuration>

Просмотреть файл

@ -3,7 +3,7 @@
Akka Streams Kafka is an Akka Streams connector for Apache Kafka.
## Builds
[![Build status](https://ci.appveyor.com/api/projects/status/uveg350ptdydkes9/branch/dev?svg=true)](https://ci.appveyor.com/project/ravengerUA/akka-streams-kafka/branch/dev)
[![Build status](https://ci.appveyor.com/api/projects/status/0glh2fi8uic17vl4/branch/dev?svg=true)](https://ci.appveyor.com/project/akkadotnet-contrib/akka-streams-kafka/branch/dev)
## Producer
@ -22,33 +22,51 @@ var producerSettings = new ProducerSettings<Null, string>(system, null, new Stri
.WithBootstrapServers("localhost:9092");
```
### Producer as a Sink
`Producer.PlainSink` is the easiest way to publish messages. The sink consumes `ProducerRecord` elements which contains a topic name to which the record is being sent.
In addition to programmatic construction of the ProducerSettings it can also be created from configuration (application.conf). By default when creating ProducerSettings with the ActorSystem parameter it uses the config section akka.kafka.producer.
```C#
Source
.From(Enumerable.Range(500, 601))
.Select(c => c.ToString())
.Select(elem => new ProduceRecord<Null, string>("topic1", null, elem))
.RunWith(Producer.PlainSink(producerSettings), materializer);
```
The materialized value of the sink is a `Task` which is completed with result when the stream completes or with exception if an error occurs.
akka.kafka.producer {
# Tuning parameter of how many sends that can run in parallel.
parallelism = 100
### Producer as a Flow
Sometimes there is a need for publishing messages in the middle of the stream processing, not as the last step, and then you can use `Producer.CreateFlow`
# How long to wait for `Producer.Flush`
flush-timeout = 10s
# Fully qualified config path which holds the dispatcher configuration
# to be used by the producer stages. Some blocking may occur.
# When this value is empty, the dispatcher configured for the stream
# will be used.
use-dispatcher = "akka.kafka.default-dispatcher"
}
```
### Producer as a Sink
`KafkaProducer.PlainSink` is the easiest way to publish messages. The sink consumes `MessageAndMeta` elements which contains a topic name to which the record is being sent, an optional partition number, and an optional key and value.
```C#
Source
.From(Enumerable.Range(1, 100))
.Select(c => c.ToString())
.Select(elem => new ProduceRecord<Null, string>("topic1", null, elem))
.Via(Producer.CreateFlow(producerSettings))
.Select(elem => new MessageAndMeta<Null, string> { Topic = "topic1", Message = new Message<Null, string> { Value = elem } })
.RunWith(KafkaProducer.PlainSink(producerSettings), materializer);
```
The materialized value of the sink is a `Task` which is completed with result when the stream completes or with exception if an error occurs.
### Producer as a Flow
Sometimes there is a need for publishing messages in the middle of the stream processing, not as the last step, and then you can use `KafkaProducer.PlainFlow`
```C#
Source
.From(Enumerable.Range(1, 100))
.Select(c => c.ToString())
.Select(elem => new MessageAndMeta<Null, string> { Topic = "topic1", Message = new Message<Null, string> { Value = elem } })
.Via(KafkaProducer.PlainFlow(producerSettings))
.Select(record =>
{
Console.WriteLine($"Producer: {record.Topic}/{record.Partition} {record.Offset}: {record.Value}");
return record;
})
.RunWith(Sink.Ignore<Message<Null, string>>(), materializer);
.RunWith(Sink.Ignore<DeliveryReport<Null, string>>(), materializer);
```
## Consumer
@ -73,7 +91,7 @@ var consumerSettings = ConsumerSettings<Null, string>.Create(system, null, new S
```C#
var subscription = Subscriptions.Assignment(new TopicPartition("akka", 0));
Consumer.PlainSource(consumerSettings, subscription)
KafkaConsumer.PlainSource(consumerSettings, subscription)
.RunForeach(result =>
{
Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}");
@ -81,7 +99,7 @@ Consumer.PlainSource(consumerSettings, subscription)
```
### Committable Consumer
The `Consumer.CommittableSource` makes it possible to commit offset positions to Kafka.
The `KafkaConsumer.CommittableSource` makes it possible to commit offset positions to Kafka.
Compared to auto-commit this gives exact control of when a message is considered consumed.
@ -90,7 +108,7 @@ If you need to store offsets in anything other than Kafka, `PlainSource` should
This is useful when “at-least once delivery” is desired, as each message will likely be delivered one time but in failure cases could be duplicated.
```C#
Consumer.CommitableSource(consumerSettings, Subscriptions.Topics("topic1"))
KafkaConsumer.CommitableSource(consumerSettings, Subscriptions.Topics("topic1"))
.SelectAsync(1, elem =>
{
return elem.CommitableOffset.Commit();

30
docker-compose.yml Normal file
Просмотреть файл

@ -0,0 +1,30 @@
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:4.0.0
ports:
- 32181:32181
environment:
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"
kafka:
image: confluentinc/cp-kafka:4.0.0
ports:
- 29092:29092
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_DELETE_TOPIC_ENABLE: "true"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"

Просмотреть файл

@ -3,12 +3,10 @@ using System.Text;
using Akka.Actor;
using Akka.Configuration;
using Akka.Streams;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using Consumer = Akka.Streams.Kafka.Dsl.Consumer;
using System.Collections.Generic;
namespace SimpleConsumer
{
@ -25,18 +23,18 @@ namespace SimpleConsumer
var materializer = system.Materializer();
var consumerSettings = ConsumerSettings<Null, string>.Create(system, null, new StringDeserializer(Encoding.UTF8))
.WithBootstrapServers("localhost:9092")
.WithBootstrapServers("localhost:29092")
.WithGroupId("group1");
var subscription = Subscriptions.Topics("akka100");
Consumer.PlainSource(consumerSettings, subscription)
.Throttle(5, TimeSpan.FromSeconds(1), 5, ThrottleMode.Shaping)
KafkaConsumer.PlainSource(consumerSettings, subscription)
.RunForeach(result =>
{
Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}");
}, materializer);
Console.ReadLine();
}
}

Просмотреть файл

@ -2,13 +2,9 @@
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp1.6</TargetFramework>
<TargetFramework>netcoreapp2.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Confluent.Kafka" Version="0.11.3" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\Akka.Streams.Kafka\Akka.Streams.Kafka.csproj" />
</ItemGroup>

Просмотреть файл

@ -1,16 +1,14 @@
using System;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Akka.Actor;
using Akka.Configuration;
using Akka.Streams;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using Producer = Akka.Streams.Kafka.Dsl.Producer;
namespace SimpleProducer
{
@ -27,27 +25,19 @@ namespace SimpleProducer
var materializer = system.Materializer();
var producerSettings = ProducerSettings<Null, string>.Create(system, null, new StringSerializer(Encoding.UTF8))
.WithBootstrapServers("localhost:9092");
.WithBootstrapServers("localhost:29092");
// producer as a Sink
//Source
// .From(Enumerable.Range(1, 200))
// .Select(c => c.ToString())
// .Select(elem => new ProduceRecord<Null, string>("akka5", null, elem))
// .RunWith(Producer.PlainSink(producerSettings), materializer);
// producer as a Flow
Source
.Cycle(() => Enumerable.Range(1, 100).GetEnumerator())
.Select(c => c.ToString())
.Select(elem => new ProduceRecord<Null, string>("akka100", null, elem))
.Via(Producer.CreateFlow(producerSettings))
.Select(elem => new MessageAndMeta<Null, string> { Topic = "akka100", Message = new Message<Null, string> { Value = elem }})
.Via(KafkaProducer.PlainFlow(producerSettings))
.Select(record =>
{
Console.WriteLine($"Producer: {record.Topic}/{record.Partition} {record.Offset}: {record.Value}");
return record;
})
.RunWith(Sink.Ignore<Message<Null, string>>(), materializer);
.RunWith(Sink.Ignore<DeliveryReport<Null, string>>(), materializer);
// TODO: producer as a Commitable Sink

Просмотреть файл

@ -2,13 +2,9 @@
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp1.6</TargetFramework>
<TargetFramework>netcoreapp2.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Confluent.Kafka" Version="0.11.3" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\Akka.Streams.Kafka\Akka.Streams.Kafka.csproj" />
</ItemGroup>

99
src/.editorconfig Normal file
Просмотреть файл

@ -0,0 +1,99 @@
# EditorConfig is awesome:http://EditorConfig.org
# top-most EditorConfig file
root = true
# Don't use tabs for indentation.
[*]
indent_style = space
# (Please don't specify an indent_size here; that has too many unintended consequences.)
# Code files
[*.{cs,csx}]
indent_size = 4
# Xml project files
[*.{csproj}]
indent_size = 2
# Xml config files
[*.{props,targets,ruleset,config,nuspec,resx,vsixmanifest,vsct}]
indent_size = 2
# JSON files
[*.json]
indent_size = 2
# proto
[*.proto]
indent_size = 2
# Dotnet code style settings:
[*.{cs}]
# Avoid "this." if not necessary
dotnet_style_qualification_for_field = false:none
dotnet_style_qualification_for_property = false:none
dotnet_style_qualification_for_method = false:none
dotnet_style_qualification_for_event = false:none
# Use language keywords instead of framework type names for type references
dotnet_style_predefined_type_for_locals_parameters_members = true:warning
dotnet_style_predefined_type_for_member_access = true:warning
# Suggest more modern language features when available
dotnet_style_object_initializer = true:warning
dotnet_style_collection_initializer = true:warning
dotnet_style_coalesce_expression = true:warning
dotnet_style_null_propagation = true:warning
dotnet_style_explicit_tuple_names = true:warning
# Naming
dotnet_naming_rule.async_methods_end_in_async.symbols = any_async_methods
dotnet_naming_rule.async_methods_end_in_async.style = end_in_async
dotnet_naming_rule.async_methods_end_in_async.severity = warning
dotnet_naming_symbols.any_async_methods.applicable_kinds = method
dotnet_naming_symbols.any_async_methods.applicable_accessibilities = *
dotnet_naming_symbols.any_async_methods.required_modifiers = async
dotnet_naming_style.end_in_async.required_suffix = Async
dotnet_naming_style.end_in_async.capitalization = pascal_case
# CSharp code style settings:
[*.cs]
# Prefer "var" everywhere
csharp_style_var_for_built_in_types = true:suggestion
csharp_style_var_when_type_is_apparent = true:suggestion
csharp_style_var_elsewhere = true:suggestion
# Expression-bodied members
csharp_style_expression_bodied_methods = true:none
csharp_style_expression_bodied_constructors = false:none
csharp_style_expression_bodied_operators = true:none
csharp_style_expression_bodied_properties = true:none
csharp_style_expression_bodied_indexers = true:none
csharp_style_expression_bodied_accessors = false:none
# Suggest more modern language features when available
csharp_style_pattern_matching_over_is_with_cast_check = true:warning
csharp_style_pattern_matching_over_as_with_null_check = true:warning
csharp_style_inlined_variable_declaration = true:warning
csharp_prefer_simple_default_expression = true:warning
csharp_style_throw_expression = true:warning
csharp_style_conditional_delegate_call = true:warning
csharp_prefer_braces = false:none
# Formatting
csharp_preserve_single_line_statements = false
# VS 2017 15.5
[*.cs]
csharp_prefer_inferred_tuple_names = true:warning
csharp_prefer_inferred_anonymous_type_member_names = true:warning
csharp_style_pattern_local_over_anonymous_function = true:warning
csharp_style_deconstructed_variable_declaration = true:warning
dotnet_style_prefer_is_null_check_over_reference_equality_method = true:warning
dotnet_style_require_accessibility_modifiers = true:warning

Просмотреть файл

@ -2,24 +2,20 @@
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp1.1</TargetFramework>
<TargetFramework>netcoreapp2.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Akka.Streams.TestKit" Version="1.3.5" />
<PackageReference Include="Akka.TestKit.Xunit2" Version="1.3.5" />
<PackageReference Include="FluentAssertions" Version="4.19.4" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="15.3.0" />
<PackageReference Include="xunit" Version="2.3.0-*" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.3.0-*" />
<PackageReference Include="FluentAssertions" Version="5.2.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="15.6.2" />
<PackageReference Include="xunit" Version="2.3.1" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.3.1" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Akka.Streams.Kafka\Akka.Streams.Kafka.csproj" />
</ItemGroup>
<ItemGroup>
<Service Include="{82a7f48d-3b50-4b1e-b82e-3ada8210c358}" />
</ItemGroup>
</Project>

Просмотреть файл

@ -1,17 +1,15 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Akka.Configuration;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.TestKit;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using FluentAssertions;
using Xunit;
using Xunit.Abstractions;
@ -19,7 +17,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
{
public class CommittableSourceIntegrationTests : Akka.TestKit.Xunit2.TestKit
{
private const string KafkaUrl = "localhost:9092";
private const string KafkaUrl = "localhost:29092";
private const string InitialMsg = "initial msg in topic, required to create the topic before any consumer subscribes to it";
@ -42,9 +40,10 @@ namespace Akka.Streams.Kafka.Tests.Integration
private async Task GivenInitializedTopic(string topic)
{
var producer = ProducerSettings.CreateKafkaProducer();
await producer.ProduceAsync(topic, null, InitialMsg, 0);
producer.Dispose();
using (var producer = ProducerSettings.CreateKafkaProducer())
{
await producer.ProduceAsync(topic, new Message<Null, string> { Value = InitialMsg });
}
}
private ConsumerSettings<Null, string> CreateConsumerSettings(string group)
@ -66,12 +65,12 @@ namespace Akka.Streams.Kafka.Tests.Integration
await Source
.From(Enumerable.Range(1, elementsCount))
.Select(elem => new ProduceRecord<Null, string>(topic1, null, elem.ToString()))
.RunWith(Dsl.Producer.PlainSink(ProducerSettings), _materializer);
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem.ToString() } })
.RunWith(KafkaProducer.PlainSink(ProducerSettings), _materializer);
var consumerSettings = CreateConsumerSettings(group1);
var probe = Dsl.Consumer
var probe = KafkaConsumer
.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.Where(c => !c.Record.Value.Equals(InitialMsg))
.Select(c => c.Record.Value)
@ -95,21 +94,19 @@ namespace Akka.Streams.Kafka.Tests.Integration
await Source
.From(Enumerable.Range(1, 100))
.Select(elem => new ProduceRecord<Null, string>(topic1, null, elem.ToString()))
.RunWith(Dsl.Producer.PlainSink(ProducerSettings), _materializer);
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem.ToString() } })
.RunWith(KafkaProducer.PlainSink(ProducerSettings), _materializer);
var consumerSettings = CreateConsumerSettings(group1);
var committedElements = new ConcurrentQueue<string>();
var (_, probe1) = Dsl.Consumer.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
var (_, probe1) = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.WhereNot(c => c.Record.Value == InitialMsg)
.SelectAsync(10, elem =>
{
return elem.CommitableOffset.Commit().ContinueWith(t =>
{
committedElements.Enqueue(elem.Record.Value);
return Done.Instance;
});
elem.CommitableOffset.Commit();
committedElements.Enqueue(elem.Record.Value);
return Task.FromResult(Done.Instance);
})
.ToMaterialized(this.SinkProbe<Done>(), Keep.Both)
.Run(_materializer);
@ -125,7 +122,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
// Await.result(control.isShutdown, remainingOrDefault)
var probe2 = Dsl.Consumer.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
var probe2 = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.Select(_ => _.Record.Value)
.RunWith(this.SinkProbe<string>(), _materializer);
@ -135,8 +132,8 @@ namespace Akka.Streams.Kafka.Tests.Integration
// some concurrent publish
await Source
.From(Enumerable.Range(101, 100))
.Select(elem => new ProduceRecord<Null, string>(topic1, null, elem.ToString()))
.RunWith(Dsl.Producer.PlainSink(ProducerSettings), _materializer);
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem.ToString() } })
.RunWith(KafkaProducer.PlainSink(ProducerSettings), _materializer);
probe2.Request(100);
foreach (var i in Enumerable.Range(committedElements.Count + 1, 100).Select(c => c.ToString()))
@ -145,7 +142,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
probe2.Cancel();
// another consumer should see all
var probe3 = Dsl.Consumer.CommittableSource(consumerSettings.WithGroupId(group2), Subscriptions.Assignment(new TopicPartition(topic1, 0)))
var probe3 = KafkaConsumer.CommittableSource(consumerSettings.WithGroupId(group2), Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.WhereNot(c => c.Record.Value == InitialMsg)
.Select(_ => _.Record.Value)
.RunWith(this.SinkProbe<string>(), _materializer);

Просмотреть файл

@ -5,20 +5,20 @@ using System.Text;
using System.Threading.Tasks;
using Akka.Configuration;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.TestKit;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using FluentAssertions;
using Xunit;
using Xunit.Abstractions;
using Producer = Akka.Streams.Kafka.Dsl.Producer;
namespace Akka.Streams.Kafka.Tests.Integration
{
public class PlainSinkIntegrationTests : Akka.TestKit.Xunit2.TestKit
{
private const string KafkaUrl = "localhost:9092";
private const string KafkaUrl = "localhost:29092";
private const string InitialMsg = "initial msg in topic, required to create the topic before any consumer subscribes to it";
private readonly ActorMaterializer _materializer;
@ -28,16 +28,18 @@ namespace Akka.Streams.Kafka.Tests.Integration
private string CreateGroup(int number) => $"group-{number}-{Uuid}";
public PlainSinkIntegrationTests(ITestOutputHelper output)
: base(ConfigurationFactory.FromResource<ConsumerSettings<object, object>>("Akka.Streams.Kafka.reference.conf"), null, output)
: base(ConfigurationFactory
.FromResource<ConsumerSettings<object, object>>("Akka.Streams.Kafka.reference.conf"), null, output)
{
_materializer = Sys.Materializer();
}
private async Task GivenInitializedTopic(string topic)
{
var producer = ProducerSettings.CreateKafkaProducer();
await producer.ProduceAsync(topic, null, InitialMsg, 0);
producer.Dispose();
using (var producer = ProducerSettings.CreateKafkaProducer())
{
await producer.ProduceAsync(topic, new Message<Null, string> { Value = InitialMsg });
}
}
private ProducerSettings<Null, string> ProducerSettings =>
@ -67,7 +69,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
var task = new TaskCompletionSource<NotUsed>();
int messagesReceived = 0;
consumer.OnMessage += (sender, message) =>
consumer.OnRecord += (sender, message) =>
{
messagesReceived++;
if (messagesReceived == 100)
@ -77,8 +79,8 @@ namespace Akka.Streams.Kafka.Tests.Integration
await Source
.From(Enumerable.Range(1, 100))
.Select(c => c.ToString())
.Select(elem => new ProduceRecord<Null, string>(topic1, null, elem))
.RunWith(Producer.PlainSink(ProducerSettings), _materializer);
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem } })
.RunWith(KafkaProducer.PlainSink(ProducerSettings), _materializer);
var dateTimeStart = DateTime.UtcNow;
@ -95,7 +97,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
messagesReceived.Should().Be(100);
}
[Fact]
[Fact(Skip = "Not implemented yet")]
public async Task PlainSink_should_fail_stage_if_broker_unavailable()
{
var topic1 = CreateTopic(1);
@ -105,14 +107,15 @@ namespace Akka.Streams.Kafka.Tests.Integration
var config = ProducerSettings<Null, string>.Create(Sys, null, new StringSerializer(Encoding.UTF8))
.WithBootstrapServers("localhost:10092");
Action act = () => Source
var probe = Source
.From(Enumerable.Range(1, 100))
.Select(c => c.ToString())
.Select(elem => new ProduceRecord<Null, string>(topic1, null, elem))
.RunWith(Producer.PlainSink(config), _materializer).Wait();
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem } })
.Via(KafkaProducer.PlainFlow(config))
.RunWith(this.SinkProbe<DeliveryReport<Null, string>>(), _materializer);
// TODO: find a better way to test FailStage
act.ShouldThrow<AggregateException>().WithInnerException<KafkaException>();
probe.ExpectSubscription();
probe.OnError(new KafkaException(ErrorCode.Local_Transport));
}
}
}

Просмотреть файл

@ -6,14 +6,13 @@ using System.Text;
using System.Threading.Tasks;
using Akka.Configuration;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Supervision;
using Akka.Streams.TestKit;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using FluentAssertions;
using MessagePack.Resolvers;
using Xunit;
using Xunit.Abstractions;
@ -21,7 +20,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
{
public class PlainSourceIntegrationTests : Akka.TestKit.Xunit2.TestKit
{
private const string KafkaUrl = "localhost:9092";
private const string KafkaUrl = "localhost:29092";
private const string InitialMsg = "initial msg in topic, required to create the topic before any consumer subscribes to it";
@ -51,10 +50,11 @@ namespace Akka.Streams.Kafka.Tests.Integration
private async Task GivenInitializedTopic(string topic)
{
var producer = ProducerSettings.CreateKafkaProducer();
await producer.ProduceAsync(topic, null, InitialMsg, 0);
producer.Flush(TimeSpan.FromSeconds(1));
producer.Dispose();
using (var producer = ProducerSettings.CreateKafkaProducer())
{
await producer.ProduceAsync(topic, new Message<Null, string> { Value = InitialMsg });
producer.Flush(TimeSpan.FromSeconds(1));
}
}
private ConsumerSettings<Null, string> CreateConsumerSettings(string group)
@ -69,13 +69,13 @@ namespace Akka.Streams.Kafka.Tests.Integration
{
await Source
.From(range)
.Select(elem => new ProduceRecord<Null, string>(topic, null, elem.ToString()))
.RunWith(Dsl.Producer.PlainSink(producerSettings), _materializer);
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic, Message = new Message<Null, string> { Value = elem.ToString() } })
.RunWith(KafkaProducer.PlainSink(producerSettings), _materializer);
}
private TestSubscriber.Probe<string> CreateProbe(ConsumerSettings<Null, string> consumerSettings, string topic, ISubscription sub)
{
return Dsl.Consumer
return KafkaConsumer
.PlainSource(consumerSettings, sub)
.Where(c => !c.Value.Equals(InitialMsg))
.Select(c => c.Value)
@ -127,7 +127,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
probe.Cancel();
}
[Fact]
[Fact(Skip = "Flaky")]
public async Task PlainSource_consumes_messages_from_KafkaProducer_with_subscribe_to_topic()
{
int elementsCount = 100;
@ -179,7 +179,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
.WithProperty("auto.offset.reset", "earliest")
.WithGroupId(group1);
var probe = Dsl.Consumer
var probe = KafkaConsumer
.PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.WithAttributes(ActorAttributes.CreateSupervisionStrategy(Deciders.StoppingDecider))
.Select(c => c.Value)
@ -209,7 +209,7 @@ namespace Akka.Streams.Kafka.Tests.Integration
.WithProperty("auto.offset.reset", "earliest")
.WithGroupId(group1);
var probe = Dsl.Consumer
var probe = KafkaConsumer
.PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
.Select(c => c.Value)

Просмотреть файл

@ -0,0 +1,56 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 15
VisualStudioVersion = 15.0.26730.10
MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SimpleConsumer", "examples\SimpleConsumer\SimpleConsumer.csproj", "{ECDA0400-203E-4647-81B4-224954F79520}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SimpleProducer", "examples\SimpleProducer\SimpleProducer.csproj", "{F30D604D-22C5-415E-8F10-EBE0534F3885}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Akka.Streams.Kafka", "src\Akka.Streams.Kafka\Akka.Streams.Kafka.csproj", "{21CFA085-3156-4766-9B2E-B5F1747DF87F}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Akka.Streams.Kafka.Tests", "src\Akka.Streams.Kafka.Tests\Akka.Streams.Kafka.Tests.csproj", "{8A6E9E88-1BE5-48D0-B603-AEA151D68AB4}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "examples", "examples", "{DBBF6380-3734-49B5-8BF6-74A7C33DFA55}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{568F6963-99B1-4E1C-8394-52E4064D6F32}"
ProjectSection(SolutionItems) = preProject
.editorconfig = .editorconfig
docker-compose.yml = docker-compose.yml
README.md = README.md
EndProjectSection
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Release|Any CPU = Release|Any CPU
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{ECDA0400-203E-4647-81B4-224954F79520}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{ECDA0400-203E-4647-81B4-224954F79520}.Debug|Any CPU.Build.0 = Debug|Any CPU
{ECDA0400-203E-4647-81B4-224954F79520}.Release|Any CPU.ActiveCfg = Release|Any CPU
{ECDA0400-203E-4647-81B4-224954F79520}.Release|Any CPU.Build.0 = Release|Any CPU
{F30D604D-22C5-415E-8F10-EBE0534F3885}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{F30D604D-22C5-415E-8F10-EBE0534F3885}.Debug|Any CPU.Build.0 = Debug|Any CPU
{F30D604D-22C5-415E-8F10-EBE0534F3885}.Release|Any CPU.ActiveCfg = Release|Any CPU
{F30D604D-22C5-415E-8F10-EBE0534F3885}.Release|Any CPU.Build.0 = Release|Any CPU
{21CFA085-3156-4766-9B2E-B5F1747DF87F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{21CFA085-3156-4766-9B2E-B5F1747DF87F}.Debug|Any CPU.Build.0 = Debug|Any CPU
{21CFA085-3156-4766-9B2E-B5F1747DF87F}.Release|Any CPU.ActiveCfg = Release|Any CPU
{21CFA085-3156-4766-9B2E-B5F1747DF87F}.Release|Any CPU.Build.0 = Release|Any CPU
{8A6E9E88-1BE5-48D0-B603-AEA151D68AB4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{8A6E9E88-1BE5-48D0-B603-AEA151D68AB4}.Debug|Any CPU.Build.0 = Debug|Any CPU
{8A6E9E88-1BE5-48D0-B603-AEA151D68AB4}.Release|Any CPU.ActiveCfg = Release|Any CPU
{8A6E9E88-1BE5-48D0-B603-AEA151D68AB4}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{ECDA0400-203E-4647-81B4-224954F79520} = {DBBF6380-3734-49B5-8BF6-74A7C33DFA55}
{F30D604D-22C5-415E-8F10-EBE0534F3885} = {DBBF6380-3734-49B5-8BF6-74A7C33DFA55}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {4B399516-A19F-4B0E-9AA9-CD6197619BE0}
EndGlobalSection
EndGlobal

Просмотреть файл

@ -4,14 +4,15 @@
<AssemblyTitle>Akka.Streams.Kafka</AssemblyTitle>
<Description>Kafka adapter for Akka.NET Streams</Description>
<TargetFramework>netstandard1.6</TargetFramework>
<Copyright>Copyright © 2017 alexvaluyskiy</Copyright>
<Authors>alexvaluyskiy</Authors>
<VersionPrefix>0.2.0</VersionPrefix>
<Copyright>Copyright © 2017-2018 AkkaNetContrib</Copyright>
<Authors>AkkaNetContrib</Authors>
<VersionPrefix>0.5.0</VersionPrefix>
<VersionSuffix>beta</VersionSuffix>
<PackageTags>akka;actors;streams;kafka;reactive</PackageTags>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<PackageIconUrl>http://getakka.net/images/akkalogo.png</PackageIconUrl>
<PackageProjectUrl>https://github.com/alexvaluyskiy/Akka.Streams.Kafka</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/alexvaluyskiy/Akka.Streams.Kafka/blob/dev/LICENSE</PackageLicenseUrl>
<PackageProjectUrl>https://github.com/AkkaNetContrib/Alpakka</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/AkkaNetContrib/Alpakka/blob/dev/LICENSE</PackageLicenseUrl>
<NetStandardImplicitPackageVersion>1.6.1</NetStandardImplicitPackageVersion>
<NoWarn>$(NoWarn);CS1591</NoWarn>
</PropertyGroup>
@ -22,8 +23,7 @@
<ItemGroup>
<PackageReference Include="Akka.Streams" Version="1.3.5" />
<PackageReference Include="Confluent.Kafka" Version="0.11.3" />
<PackageReference Include="MessagePack" Version="1.7.3.4" />
<PackageReference Include="Confluent.Kafka" Version="1.0.0-experimental-2" />
<PackageReference Include="System.ValueTuple" Version="4.4.0" />
</ItemGroup>

Просмотреть файл

@ -1,25 +0,0 @@
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Kafka.Stages;
using Confluent.Kafka;
using Akka.Streams.Kafka.Messages;
namespace Akka.Streams.Kafka.Dsl
{
public static class Consumer
{
public static Source<Message<K, V>, Task> PlainSource<K, V>(ConsumerSettings<K, V> settings, ISubscription subscription)
{
return Source.FromGraph(new KafkaSourceStage<K, V>(settings, subscription));
}
public static Source<CommittableMessage<K, V>, Task> CommittableSource<K, V>(ConsumerSettings<K, V> settings, ISubscription subscription)
{
return Source.FromGraph(new CommittableConsumerStage<K, V>(settings, subscription));
}
}
}

Просмотреть файл

@ -0,0 +1,43 @@
using System.Threading.Tasks;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Kafka.Stages;
using Confluent.Kafka;
using Akka.Streams.Kafka.Messages;
namespace Akka.Streams.Kafka.Dsl
{
/// <summary>
/// Akka Stream connector for subscribing to Kafka topics.
/// </summary>
public static class KafkaConsumer
{
/// <summary>
/// The <see cref="PlainSource{K,V}"/> emits <see cref="ConsumerRecord"/> elements (as received from the underlying
/// <see cref="IConsumer{TKey,TValue}"/>). It has no support for committing offsets to Kafka. It can be used when the
/// offset is stored externally or with auto-commit (note that auto-commit is by default disabled).
/// The consumer application doesn't need to use Kafka's built-in offset storage and can store offsets in a store of its own
/// choosing. The primary use case for this is allowing the application to store both the offset and the results of the
/// consumption in the same system in a way that both the results and offsets are stored atomically.This is not always
/// possible, but when it is, it will make the consumption fully atomic and give "exactly once" semantics that are
/// stronger than the "at-least once" semantics you get with Kafka's offset commit functionality.
/// </summary>
public static Source<ConsumerRecord<K, V>, Task> PlainSource<K, V>(ConsumerSettings<K, V> settings, ISubscription subscription)
{
return Source.FromGraph(new KafkaSourceStage<K, V>(settings, subscription));
}
/// <summary>
/// The <see cref="CommittableSource{K,V}"/> makes it possible to commit offset positions to Kafka.
/// This is useful when "at-least once delivery" is desired, as each message will likely be
/// delivered one time but in failure cases could be duplicated.
/// Compared to auto-commit, this gives exact control over when a message is considered consumed.
/// If you need to store offsets in anything other than Kafka, <see cref="PlainSource{K,V}"/> should
/// be used instead of this API.
/// </summary>
public static Source<CommittableMessage<K, V>, Task> CommittableSource<K, V>(ConsumerSettings<K, V> settings, ISubscription subscription)
{
return Source.FromGraph(new CommittableConsumerStage<K, V>(settings, subscription));
}
}
}

Просмотреть файл

@ -0,0 +1,71 @@
using System.Threading.Tasks;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Kafka.Stages;
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Dsl
{
/// <summary>
/// Akka Stream connector for publishing messages to Kafka topics.
/// </summary>
public static class KafkaProducer
{
/// <summary>
/// The `PlainSink` can be used for publishing records to Kafka topics.
/// </summary>
public static Sink<MessageAndMeta<TKey, TValue>, Task> PlainSink<TKey, TValue>(ProducerSettings<TKey, TValue> settings)
{
return Flow
.Create<MessageAndMeta<TKey, TValue>>()
.Via(PlainFlow(settings))
.ToMaterialized(Sink.Ignore<DeliveryReport<TKey, TValue>>(), Keep.Right);
}
/// <summary>
/// The `PlainSink` can be used for publishing records to Kafka topics.
/// </summary>
public static Sink<MessageAndMeta<TKey, TValue>, Task> PlainSink<TKey, TValue>(ProducerSettings<TKey, TValue> settings, IProducer<TKey, TValue> producer)
{
return Flow
.Create<MessageAndMeta<TKey, TValue>>()
.Via(PlainFlow(settings, producer))
.ToMaterialized(Sink.Ignore<DeliveryReport<TKey, TValue>>(), Keep.Right);
}
/// <summary>
/// Publish records to Kafka topics and then continue the flow. Possibility to pass through a message, which
/// can for example be a <see cref="CommittedOffsets"/> that can be committed later in the flow.
/// </summary>
public static Flow<MessageAndMeta<TKey, TValue>, DeliveryReport<TKey, TValue>, NotUsed> PlainFlow<TKey, TValue>(ProducerSettings<TKey, TValue> settings)
{
var flow = Flow.FromGraph(new ProducerStage<TKey, TValue>(
settings,
closeProducerOnStop: true,
producerProvider : settings.CreateKafkaProducer))
.SelectAsync(settings.Parallelism, x => x);
return string.IsNullOrEmpty(settings.DispatcherId)
? flow
: flow.WithAttributes(ActorAttributes.CreateDispatcher(settings.DispatcherId));
}
/// <summary>
/// Publish records to Kafka topics and then continue the flow. Possibility to pass through a message, which
/// can for example be a <see cref="CommitableOffset"/> that can be committed later in the flow.
/// </summary>
public static Flow<MessageAndMeta<TKey, TValue>, DeliveryReport<TKey, TValue>, NotUsed> PlainFlow<TKey, TValue>(ProducerSettings<TKey, TValue> settings, IProducer<TKey, TValue> producer)
{
var flow = Flow.FromGraph(new ProducerStage<TKey, TValue>(
settings,
closeProducerOnStop: false,
producerProvider: () => producer))
.SelectAsync(settings.Parallelism, x => x);
return string.IsNullOrEmpty(settings.DispatcherId)
? flow
: flow.WithAttributes(ActorAttributes.CreateDispatcher(settings.DispatcherId));
}
}
}

Просмотреть файл

@ -1,31 +0,0 @@
using System.Threading.Tasks;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Kafka.Stages;
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Dsl
{
public static class Producer
{
public static Sink<ProduceRecord<TKey, TValue>, Task> PlainSink<TKey, TValue>(ProducerSettings<TKey, TValue> settings)
{
return Flow
.Create<ProduceRecord<TKey, TValue>>()
.Via(CreateFlow(settings))
.ToMaterialized(Sink.Ignore<Message<TKey, TValue>>(), Keep.Right);
}
// TODO: work on naming
public static Flow<ProduceRecord<TKey, TValue>, Message<TKey, TValue>, NotUsed> CreateFlow<TKey, TValue>(ProducerSettings<TKey, TValue> settings)
{
var flow = Flow.FromGraph(new ProducerStage<TKey, TValue>(settings))
.SelectAsync(settings.Parallelism, x => x);
return string.IsNullOrEmpty(settings.DispatcherId)
? flow
: flow.WithAttributes(ActorAttributes.CreateDispatcher(settings.DispatcherId));
}
}
}

Просмотреть файл

@ -1,29 +1,39 @@
using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Akka.Streams.Kafka.Dsl;
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Messages
{
/// <summary>
/// Output element of <see cref="KafkaConsumer.CommittableSource{K,V}"/>.
/// The offset can be committed via the included <see cref="CommitableOffset"/>.
/// </summary>
public sealed class CommittableMessage<K, V>
{
public CommittableMessage(Message<K, V> record, CommitableOffset commitableOffset)
public CommittableMessage(ConsumerRecord<K, V> record, CommitableOffset commitableOffset)
{
Record = record;
CommitableOffset = commitableOffset;
}
public Message<K, V> Record { get; }
public ConsumerRecord<K, V> Record { get; }
public CommitableOffset CommitableOffset { get; }
}
/// <summary>
/// Included in <see cref="CommittableMessage{K,V}"/>. Makes it possible to
/// commit an offset or aggregate several offsets before committing.
/// Note that the offset position that is committed to Kafka will automatically
/// be one more than the `offset` of the message, because the committed offset
/// should be the next message your application will consume,
/// i.e. lastProcessedMessageOffset + 1.
/// </summary>
public class CommitableOffset
{
private readonly Func<Task<CommittedOffsets>> _task;
private readonly Func<CommittedOffsets> _task;
public CommitableOffset(Func<Task<CommittedOffsets>> task, PartitionOffset offset)
public CommitableOffset(Func<CommittedOffsets> task, PartitionOffset offset)
{
_task = task;
Offset = offset;
@ -31,12 +41,15 @@ namespace Akka.Streams.Kafka.Messages
public PartitionOffset Offset { get; }
public Task<CommittedOffsets> Commit()
public CommittedOffsets Commit()
{
return _task();
}
}
/// <summary>
/// Offset position for a groupId, topic, partition.
/// </summary>
public class PartitionOffset
{
public PartitionOffset(string groupId, string topic, int partition, Offset offset)

Просмотреть файл

@ -1,25 +0,0 @@
using System;
using System.Collections.Generic;
using Confluent.Kafka.Serialization;
using MessagePack.Resolvers;
namespace Akka.Streams.Kafka.Messages
{
public sealed class MsgPackDeserializer<T> : ISerializer<T>, IDeserializer<T>
{
public byte[] Serialize(string topic, T data)
{
return MessagePack.MessagePackSerializer.Serialize(data, ContractlessStandardResolver.Instance);
}
public T Deserialize(string topic, byte[] data)
{
return MessagePack.MessagePackSerializer.Deserialize<T>(data, ContractlessStandardResolver.Instance);
}
public IEnumerable<KeyValuePair<string, object>> Configure(IEnumerable<KeyValuePair<string, object>> config, bool isKey)
{
return config;
}
}
}

Просмотреть файл

@ -1,21 +0,0 @@
namespace Akka.Streams.Kafka.Messages
{
public struct ProduceRecord<TKey, TValue>
{
public ProduceRecord(string topic, TKey key, TValue value, int partitionId = -1)
{
Topic = topic;
Key = key;
Value = value;
PartitionId = partitionId;
}
public string Topic { get; }
public TKey Key { get; }
public TValue Value { get; }
public int PartitionId { get; }
}
}

Просмотреть файл

@ -84,7 +84,7 @@ namespace Akka.Streams.Kafka.Settings
dispatcherId: dispatcherId ?? this.DispatcherId,
properties: properties ?? this.Properties);
public Confluent.Kafka.Consumer<TKey, TValue> CreateKafkaConsumer() =>
public Confluent.Kafka.IConsumer<TKey, TValue> CreateKafkaConsumer() =>
new Confluent.Kafka.Consumer<TKey, TValue>(this.Properties, this.KeyDeserializer, this.ValueDeserializer);
}
}

Просмотреть файл

@ -28,6 +28,7 @@ namespace Akka.Streams.Kafka.Settings
{
switch (error.Code)
{
case ErrorCode.Local_Transport:
case ErrorCode.Local_AllBrokersDown:
return false;
}

Просмотреть файл

@ -9,12 +9,13 @@ namespace Akka.Streams.Kafka.Settings
{
public sealed class ProducerSettings<TKey, TValue>
{
public ProducerSettings(ISerializer<TKey> keySerializer, ISerializer<TValue> valueSerializer, int parallelism, string dispatcherId, IImmutableDictionary<string, object> properties)
public ProducerSettings(ISerializer<TKey> keySerializer, ISerializer<TValue> valueSerializer, int parallelism, string dispatcherId, TimeSpan flushTimeout, IImmutableDictionary<string, object> properties)
{
KeySerializer = keySerializer;
ValueSerializer = valueSerializer;
Parallelism = parallelism;
DispatcherId = dispatcherId;
FlushTimeout = flushTimeout;
Properties = properties;
}
@ -22,6 +23,7 @@ namespace Akka.Streams.Kafka.Settings
public ISerializer<TValue> ValueSerializer { get; }
public int Parallelism { get; }
public string DispatcherId { get; }
public TimeSpan FlushTimeout { get; }
public IImmutableDictionary<string, object> Properties { get; }
public ProducerSettings<TKey, TValue> WithBootstrapServers(string bootstrapServers) =>
@ -41,12 +43,14 @@ namespace Akka.Streams.Kafka.Settings
ISerializer<TValue> valueSerializer = null,
int? parallelism = null,
string dispatcherId = null,
TimeSpan? flushTimeout = null,
IImmutableDictionary<string, object> properties = null) =>
new ProducerSettings<TKey, TValue>(
keySerializer: keySerializer ?? this.KeySerializer,
valueSerializer: valueSerializer ?? this.ValueSerializer,
parallelism: parallelism ?? this.Parallelism,
dispatcherId: dispatcherId ?? this.DispatcherId,
flushTimeout: flushTimeout ?? this.FlushTimeout,
properties: properties ?? this.Properties);
public static ProducerSettings<TKey, TValue> Create(ActorSystem system, ISerializer<TKey> keySerializer, ISerializer<TValue> valueSerializer)
@ -66,10 +70,11 @@ namespace Akka.Streams.Kafka.Settings
valueSerializer: valueSerializer,
parallelism: config.GetInt("parallelism", 100),
dispatcherId: config.GetString("use-dispatcher", "akka.kafka.default-dispatcher"),
flushTimeout: config.GetTimeSpan("flush-timeout", TimeSpan.FromSeconds(2)),
properties: ImmutableDictionary<string, object>.Empty);
}
public Confluent.Kafka.Producer<TKey, TValue> CreateKafkaProducer() =>
public Confluent.Kafka.IProducer<TKey, TValue> CreateKafkaProducer() =>
new Confluent.Kafka.Producer<TKey, TValue>(Properties, KeySerializer, ValueSerializer);
}
}

Просмотреть файл

@ -37,9 +37,9 @@ namespace Akka.Streams.Kafka.Stages
private readonly ConsumerSettings<K, V> _settings;
private readonly ISubscription _subscription;
private readonly Outlet<CommittableMessage<K, V>> _out;
private Consumer<K, V> _consumer;
private IConsumer<K, V> _consumer;
private Action<Message<K, V>> _messagesReceived;
private Action<ConsumerRecord<K, V>> _messagesReceived;
private Action<IEnumerable<TopicPartition>> _partitionsAssigned;
private Action<IEnumerable<TopicPartition>> _partitionsRevoked;
private readonly Decider _decider;
@ -47,8 +47,8 @@ namespace Akka.Streams.Kafka.Stages
private const string TimerKey = "PollTimer";
private readonly Queue<CommittableMessage<K, V>> _buffer;
private IEnumerable<TopicPartition> assignedPartitions = null;
private volatile bool isPaused = false;
private IEnumerable<TopicPartition> _assignedPartitions;
private volatile bool _isPaused;
private readonly TaskCompletionSource<NotUsed> _completion;
public KafkaCommittableSourceStage(CommittableConsumerStage<K, V> stage, Attributes attributes, TaskCompletionSource<NotUsed> completion) : base(stage.Shape)
@ -70,11 +70,11 @@ namespace Akka.Streams.Kafka.Stages
}
else
{
if (isPaused)
if (_isPaused)
{
_consumer.Resume(assignedPartitions);
isPaused = false;
Log.Debug($"Polling resumed, buffer is empty");
_consumer.Resume(_assignedPartitions);
_isPaused = false;
Log.Debug("Polling resumed, buffer is empty");
}
PullQueue();
}
@ -88,7 +88,7 @@ namespace Akka.Streams.Kafka.Stages
_consumer = _settings.CreateKafkaConsumer();
Log.Debug($"Consumer started: {_consumer.Name}");
_consumer.OnMessage += HandleOnMessage;
_consumer.OnRecord += HandleOnMessage;
_consumer.OnConsumeError += HandleConsumeError;
_consumer.OnError += HandleOnError;
_consumer.OnPartitionsAssigned += HandleOnPartitionsAssigned;
@ -107,7 +107,7 @@ namespace Akka.Streams.Kafka.Stages
break;
}
_messagesReceived = GetAsyncCallback<Message<K, V>>(MessagesReceived);
_messagesReceived = GetAsyncCallback<ConsumerRecord<K, V>>(MessagesReceived);
_partitionsAssigned = GetAsyncCallback<IEnumerable<TopicPartition>>(PartitionsAssigned);
_partitionsRevoked = GetAsyncCallback<IEnumerable<TopicPartition>>(PartitionsRevoked);
ScheduleRepeatedly(TimerKey, _settings.PollInterval);
@ -115,7 +115,7 @@ namespace Akka.Streams.Kafka.Stages
public override void PostStop()
{
_consumer.OnMessage -= HandleOnMessage;
_consumer.OnRecord -= HandleOnMessage;
_consumer.OnConsumeError -= HandleConsumeError;
_consumer.OnError -= HandleOnError;
_consumer.OnPartitionsAssigned -= HandleOnPartitionsAssigned;
@ -131,9 +131,9 @@ namespace Akka.Streams.Kafka.Stages
// Consumer's events
//
private void HandleOnMessage(object sender, Message<K, V> message) => _messagesReceived.Invoke(message);
private void HandleOnMessage(object sender, ConsumerRecord<K, V> message) => _messagesReceived(message);
private void HandleConsumeError(object sender, Message message)
private void HandleConsumeError(object sender, ConsumerRecord message)
{
Log.Error(message.Error.Reason);
var exception = new SerializationException(message.Error.Reason);
@ -166,23 +166,23 @@ namespace Akka.Streams.Kafka.Stages
private void HandleOnPartitionsAssigned(object sender, List<TopicPartition> list)
{
_partitionsAssigned.Invoke(list);
_partitionsAssigned(list);
}
private void HandleOnPartitionsRevoked(object sender, List<TopicPartition> list)
{
_partitionsRevoked.Invoke(list);
_partitionsRevoked(list);
}
//
// Async callbacks
//
private void MessagesReceived(Message<K, V> message)
private void MessagesReceived(ConsumerRecord<K, V> message)
{
var consumer = _consumer;
var commitableOffset = new CommitableOffset(
() => consumer.CommitAsync(),
() => consumer.Commit(),
new PartitionOffset("groupId", message.Topic, message.Partition, message.Offset));
_buffer.Enqueue(new CommittableMessage<K, V>(message, commitableOffset));
@ -196,25 +196,25 @@ namespace Akka.Streams.Kafka.Stages
{
Log.Debug($"Partitions were assigned: {_consumer.Name}");
_consumer.Assign(partitions);
assignedPartitions = partitions;
_assignedPartitions = partitions;
}
private void PartitionsRevoked(IEnumerable<TopicPartition> partitions)
{
Log.Debug($"Partitions were revoked: {_consumer.Name}");
_consumer.Unassign();
assignedPartitions = null;
_assignedPartitions = null;
}
private void PullQueue()
{
_consumer.Poll(_settings.PollTimeout);
if (!isPaused && _buffer.Count > _settings.BufferSize)
if (!_isPaused && _buffer.Count > _settings.BufferSize)
{
Log.Debug($"Polling paused, buffer is full");
_consumer.Pause(assignedPartitions);
isPaused = true;
_consumer.Pause(_assignedPartitions);
_isPaused = true;
}
}

Просмотреть файл

@ -10,10 +10,10 @@ using System.Runtime.Serialization;
namespace Akka.Streams.Kafka.Stages
{
internal class KafkaSourceStage<K, V> : GraphStageWithMaterializedValue<SourceShape<Message<K, V>>, Task>
internal class KafkaSourceStage<K, V> : GraphStageWithMaterializedValue<SourceShape<ConsumerRecord<K, V>>, Task>
{
public Outlet<Message<K, V>> Out { get; } = new Outlet<Message<K, V>>("kafka.consumer.out");
public override SourceShape<Message<K, V>> Shape { get; }
public Outlet<ConsumerRecord<K, V>> Out { get; } = new Outlet<ConsumerRecord<K, V>>("kafka.consumer.out");
public override SourceShape<ConsumerRecord<K, V>> Shape { get; }
public ConsumerSettings<K, V> Settings { get; }
public ISubscription Subscription { get; }
@ -21,7 +21,7 @@ namespace Akka.Streams.Kafka.Stages
{
Settings = settings;
Subscription = subscription;
Shape = new SourceShape<Message<K, V>>(Out);
Shape = new SourceShape<ConsumerRecord<K, V>>(Out);
Settings = settings;
Subscription = subscription;
}
@ -37,19 +37,19 @@ namespace Akka.Streams.Kafka.Stages
{
private readonly ConsumerSettings<K, V> _settings;
private readonly ISubscription _subscription;
private readonly Outlet<Message<K, V>> _out;
private Consumer<K, V> _consumer;
private readonly Outlet<ConsumerRecord<K, V>> _out;
private IConsumer<K, V> _consumer;
private Action<Message<K, V>> _messagesReceived;
private Action<ConsumerRecord<K, V>> _messagesReceived;
private Action<IEnumerable<TopicPartition>> _partitionsAssigned;
private Action<IEnumerable<TopicPartition>> _partitionsRevoked;
private readonly Decider _decider;
private const string TimerKey = "PollTimer";
private readonly Queue<Message<K, V>> _buffer;
private IEnumerable<TopicPartition> assignedPartitions = null;
private volatile bool isPaused = false;
private readonly Queue<ConsumerRecord<K, V>> _buffer;
private IEnumerable<TopicPartition> _assignedPartitions;
private volatile bool _isPaused;
private readonly TaskCompletionSource<NotUsed> _completion;
public KafkaSourceStageLogic(KafkaSourceStage<K, V> stage, Attributes attributes, TaskCompletionSource<NotUsed> completion) : base(stage.Shape)
@ -58,7 +58,7 @@ namespace Akka.Streams.Kafka.Stages
_subscription = stage.Subscription;
_out = stage.Out;
_completion = completion;
_buffer = new Queue<Message<K, V>>(stage.Settings.BufferSize);
_buffer = new Queue<ConsumerRecord<K, V>>(stage.Settings.BufferSize);
var supervisionStrategy = attributes.GetAttribute<ActorAttributes.SupervisionStrategy>(null);
_decider = supervisionStrategy != null ? supervisionStrategy.Decider : Deciders.ResumingDecider;
@ -71,11 +71,11 @@ namespace Akka.Streams.Kafka.Stages
}
else
{
if (isPaused)
if (_isPaused)
{
_consumer.Resume(assignedPartitions);
isPaused = false;
Log.Debug($"Polling resumed, buffer is empty");
_consumer.Resume(_assignedPartitions);
_isPaused = false;
Log.Debug("Polling resumed, buffer is empty");
}
PullQueue();
}
@ -89,7 +89,7 @@ namespace Akka.Streams.Kafka.Stages
_consumer = _settings.CreateKafkaConsumer();
Log.Debug($"Consumer started: {_consumer.Name}");
_consumer.OnMessage += HandleOnMessage;
_consumer.OnRecord += HandleOnMessage;
_consumer.OnConsumeError += HandleConsumeError;
_consumer.OnError += HandleOnError;
_consumer.OnPartitionsAssigned += HandleOnPartitionsAssigned;
@ -108,7 +108,7 @@ namespace Akka.Streams.Kafka.Stages
break;
}
_messagesReceived = GetAsyncCallback<Message<K, V>>(MessagesReceived);
_messagesReceived = GetAsyncCallback<ConsumerRecord<K, V>>(MessagesReceived);
_partitionsAssigned = GetAsyncCallback<IEnumerable<TopicPartition>>(PartitionsAssigned);
_partitionsRevoked = GetAsyncCallback<IEnumerable<TopicPartition>>(PartitionsRevoked);
ScheduleRepeatedly(TimerKey, _settings.PollInterval);
@ -116,7 +116,7 @@ namespace Akka.Streams.Kafka.Stages
public override void PostStop()
{
_consumer.OnMessage -= HandleOnMessage;
_consumer.OnRecord -= HandleOnMessage;
_consumer.OnConsumeError -= HandleConsumeError;
_consumer.OnError -= HandleOnError;
_consumer.OnPartitionsAssigned -= HandleOnPartitionsAssigned;
@ -132,9 +132,9 @@ namespace Akka.Streams.Kafka.Stages
// Consumer's events
//
private void HandleOnMessage(object sender, Message<K, V> message) => _messagesReceived.Invoke(message);
private void HandleOnMessage(object sender, ConsumerRecord<K, V> message) => _messagesReceived(message);
private void HandleConsumeError(object sender, Message message)
private void HandleConsumeError(object sender, ConsumerRecord message)
{
Log.Error(message.Error.Reason);
var exception = new SerializationException(message.Error.Reason);
@ -167,19 +167,19 @@ namespace Akka.Streams.Kafka.Stages
private void HandleOnPartitionsAssigned(object sender, List<TopicPartition> list)
{
_partitionsAssigned.Invoke(list);
_partitionsAssigned(list);
}
private void HandleOnPartitionsRevoked(object sender, List<TopicPartition> list)
{
_partitionsRevoked.Invoke(list);
_partitionsRevoked(list);
}
//
// Async callbacks
//
private void MessagesReceived(Message<K, V> message)
private void MessagesReceived(ConsumerRecord<K, V> message)
{
_buffer.Enqueue(message);
if (IsAvailable(_out))
@ -192,25 +192,25 @@ namespace Akka.Streams.Kafka.Stages
{
Log.Debug($"Partitions were assigned: {_consumer.Name}");
_consumer.Assign(partitions);
assignedPartitions = partitions;
_assignedPartitions = partitions;
}
private void PartitionsRevoked(IEnumerable<TopicPartition> partitions)
{
Log.Debug($"Partitions were revoked: {_consumer.Name}");
_consumer.Unassign();
assignedPartitions = null;
_assignedPartitions = null;
}
private void PullQueue()
{
_consumer.Poll(_settings.PollTimeout);
if (!isPaused && _buffer.Count > _settings.BufferSize)
if (!_isPaused && _buffer.Count > _settings.BufferSize)
{
Log.Debug($"Polling paused, buffer is full");
_consumer.Pause(assignedPartitions);
isPaused = true;
_consumer.Pause(_assignedPartitions);
_isPaused = true;
}
}

Просмотреть файл

@ -1,27 +1,33 @@
using System;
using System.Linq;
using System.Threading.Tasks;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Stage;
using Confluent.Kafka;
using Akka.Streams.Supervision;
using Akka.Util.Internal;
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Stages
{
internal sealed class ProducerStage<K, V> : GraphStage<FlowShape<ProduceRecord<K, V>, Task<Message<K, V>>>>
internal sealed class ProducerStage<K, V> : GraphStage<FlowShape<MessageAndMeta<K, V>, Task<DeliveryReport<K, V>>>>
{
public ProducerSettings<K, V> Settings { get; }
public Inlet<ProduceRecord<K, V>> In { get; } = new Inlet<ProduceRecord<K, V>>("kafka.producer.in");
public Outlet<Task<Message<K, V>>> Out { get; } = new Outlet<Task<Message<K, V>>>("kafka.producer.out");
public bool CloseProducerOnStop { get; }
public Func<IProducer<K, V>> ProducerProvider { get; }
public Inlet<MessageAndMeta<K, V>> In { get; } = new Inlet<MessageAndMeta<K, V>>("kafka.producer.in");
public Outlet<Task<DeliveryReport<K, V>>> Out { get; } = new Outlet<Task<DeliveryReport<K, V>>>("kafka.producer.out");
public ProducerStage(ProducerSettings<K, V> settings)
public ProducerStage(
ProducerSettings<K, V> settings,
bool closeProducerOnStop,
Func<IProducer<K, V>> producerProvider)
{
Settings = settings;
Shape = new FlowShape<ProduceRecord<K, V>, Task<Message<K, V>>>(In, Out);
CloseProducerOnStop = closeProducerOnStop;
ProducerProvider = producerProvider;
Shape = new FlowShape<MessageAndMeta<K, V>, Task<DeliveryReport<K, V>>>(In, Out);
}
public override FlowShape<ProduceRecord<K, V>, Task<Message<K, V>>> Shape { get; }
public override FlowShape<MessageAndMeta<K, V>, Task<DeliveryReport<K, V>>> Shape { get; }
protected override GraphStageLogic CreateLogic(Attributes inheritedAttributes)
{
@ -31,41 +37,74 @@ namespace Akka.Streams.Kafka.Stages
internal sealed class ProducerStageLogic<K, V> : GraphStageLogic
{
private Producer<K, V> _producer;
private readonly ProducerStage<K, V> _stage;
private IProducer<K, V> _producer;
private readonly TaskCompletionSource<NotUsed> _completionState = new TaskCompletionSource<NotUsed>();
private Action<ProduceRecord<K, V>> _sendToProducer;
private readonly ProducerSettings<K, V> _settings;
private Inlet<ProduceRecord<K, V>> In { get; }
private Outlet<Task<Message<K, V>>> Out { get; }
private volatile bool _inIsClosed;
private readonly AtomicCounter _awaitingConfirmation = new AtomicCounter(0);
public ProducerStageLogic(ProducerStage<K, V> stage, Attributes attributes) : base(stage.Shape)
{
In = stage.In;
Out = stage.Out;
_settings = stage.Settings;
_stage = stage;
SetHandler(In,
var supervisionStrategy = attributes.GetAttribute<ActorAttributes.SupervisionStrategy>(null);
var decider = supervisionStrategy != null ? supervisionStrategy.Decider : Deciders.ResumingDecider;
SetHandler(_stage.In,
onPush: () =>
{
var msg = Grab<ProduceRecord<K, V>>(In);
_sendToProducer.Invoke(msg);
var msg = Grab(_stage.In);
var result = new TaskCompletionSource<DeliveryReport<K, V>>();
_producer.Produce(msg.TopicPartition, msg.Message, report =>
{
if (!report.Error.HasError)
{
result.SetResult(report);
}
else
{
var exception = new KafkaException(report.Error);
switch (decider(exception))
{
case Directive.Stop:
if (_stage.CloseProducerOnStop)
{
_producer.Dispose();
}
FailStage(exception);
break;
default:
result.SetException(exception);
break;
}
}
if (_awaitingConfirmation.DecrementAndGet() == 0 && _inIsClosed)
{
CheckForCompletion();
}
});
_awaitingConfirmation.IncrementAndGet();
Push(_stage.Out, result.Task);
},
onUpstreamFinish: () =>
{
_inIsClosed = true;
_completionState.SetResult(NotUsed.Instance);
_producer.Flush(TimeSpan.FromSeconds(2));
CheckForCompletion();
},
onUpstreamFailure: exception =>
{
_inIsClosed = true;
_completionState.SetException(exception);
CheckForCompletion();
});
SetHandler(Out, onPull: () =>
SetHandler(_stage.Out, onPull: () =>
{
TryPull(In);
TryPull(_stage.In);
});
}
@ -73,24 +112,23 @@ namespace Akka.Streams.Kafka.Stages
{
base.PreStart();
_producer = _settings.CreateKafkaProducer();
_producer = _stage.ProducerProvider();
Log.Debug($"Producer started: {_producer.Name}");
_producer.OnError += OnProducerError;
_sendToProducer = msg =>
{
var task = _producer.ProduceAsync(msg.Topic, msg.Key, msg.Value, msg.PartitionId);
Push(Out, task);
};
}
public override void PostStop()
{
_producer.Flush(TimeSpan.FromSeconds(2));
_producer.Dispose();
Log.Debug($"Producer stopped: {_producer.Name}");
Log.Debug("Stage completed");
_producer.OnError -= OnProducerError;
if (_stage.CloseProducerOnStop)
{
_producer.Flush(_stage.Settings.FlushTimeout);
_producer.Dispose();
Log.Debug($"Producer closed: {_producer.Name}");
}
base.PostStop();
}
@ -108,7 +146,7 @@ namespace Akka.Streams.Kafka.Stages
public void CheckForCompletion()
{
if (IsClosed(In))
if (IsClosed(_stage.In) && _awaitingConfirmation.Current == 0)
{
var completionTask = _completionState.Task;

Просмотреть файл

@ -4,6 +4,9 @@
akka.kafka.producer {
# Tuning parameter of how many sends that can run in parallel.
parallelism = 100
# How long to wait for `Producer.Flush`
flush-timeout = 10s
# Fully qualified config path which holds the dispatcher configuration
# to be used by the producer stages. Some blocking may occur.

120
src/README.md Normal file
Просмотреть файл

@ -0,0 +1,120 @@
# Akka Streams Kafka
Akka Streams Kafka is an Akka Streams connector for Apache Kafka.
## Builds
[![Build status](https://ci.appveyor.com/api/projects/status/0glh2fi8uic17vl4/branch/dev?svg=true)](https://ci.appveyor.com/project/akkadotnet-contrib/akka-streams-kafka/branch/dev)
## Producer
A producer publishes messages to Kafka topics. The message itself contains information about what topic and partition to publish to so you can publish to different topics with the same producer.
### Settings
When creating a consumer stream you need to pass in `ProducerSettings` that define things like:
- bootstrap servers of the Kafka cluster
- serializers for the keys and values
- tuning parameters
```C#
var producerSettings = new ProducerSettings<Null, string>(system, null, new StringSerializer(Encoding.UTF8))
.WithBootstrapServers("localhost:9092");
```
In addition to programmatic construction of the ProducerSettings it can also be created from configuration (application.conf). By default when creating ProducerSettings with the ActorSystem parameter it uses the config section akka.kafka.producer.
```
akka.kafka.producer {
# Tuning parameter of how many sends that can run in parallel.
parallelism = 100
# How long to wait for `Producer.Flush`
flush-timeout = 10s
# Fully qualified config path which holds the dispatcher configuration
# to be used by the producer stages. Some blocking may occur.
# When this value is empty, the dispatcher configured for the stream
# will be used.
use-dispatcher = "akka.kafka.default-dispatcher"
}
```
### Producer as a Sink
`KafkaProducer.PlainSink` is the easiest way to publish messages. The sink consumes `MessageAndMeta` elements which contains a topic name to which the record is being sent, an optional partition number, and an optional key and value.
```C#
Source
.From(Enumerable.Range(1, 100))
.Select(c => c.ToString())
.Select(elem => new MessageAndMeta<Null, string> { Topic = "topic1", Message = new Message<Null, string> { Value = elem } })
.RunWith(KafkaProducer.PlainSink(producerSettings), materializer);
```
The materialized value of the sink is a `Task` which is completed with result when the stream completes or with exception if an error occurs.
### Producer as a Flow
Sometimes there is a need for publishing messages in the middle of the stream processing, not as the last step, and then you can use `KafkaProducer.PlainFlow`
```C#
Source
.From(Enumerable.Range(1, 100))
.Select(c => c.ToString())
.Select(elem => new MessageAndMeta<Null, string> { Topic = "topic1", Message = new Message<Null, string> { Value = elem } })
.Via(KafkaProducer.PlainFlow(producerSettings))
.Select(record =>
{
Console.WriteLine($"Producer: {record.Topic}/{record.Partition} {record.Offset}: {record.Value}");
return record;
})
.RunWith(Sink.Ignore<DeliveryReport<Null, string>>(), materializer);
```
## Consumer
A consumer is used for subscribing to Kafka topics.
### Settings
When creating a consumer stream you need to pass in `ConsumerSettings` that define things like:
- bootstrap servers of the Kafka cluster
- group id for the consumer, note that offsets are always committed for a given consumer group
- serializers for the keys and values
- tuning parameters
```C#
var consumerSettings = ConsumerSettings<Null, string>.Create(system, null, new StringDeserializer(Encoding.UTF8))
.WithBootstrapServers("localhost:9092")
.WithGroupId("group1");
```
### Plain Consumer
```C#
var subscription = Subscriptions.Assignment(new TopicPartition("akka", 0));
KafkaConsumer.PlainSource(consumerSettings, subscription)
.RunForeach(result =>
{
Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}");
}, materializer);
```
### Committable Consumer
The `KafkaConsumer.CommittableSource` makes it possible to commit offset positions to Kafka.
Compared to auto-commit this gives exact control of when a message is considered consumed.
If you need to store offsets in anything other than Kafka, `PlainSource` should be used instead of this API.
This is useful when “at-least once delivery” is desired, as each message will likely be delivered one time but in failure cases could be duplicated.
```C#
KafkaConsumer.CommitableSource(consumerSettings, Subscriptions.Topics("topic1"))
.SelectAsync(1, elem =>
{
return elem.CommitableOffset.Commit();
})
.RunWith(Sink.Ignore<CommittedOffsets>(), _materializer);
```
The above example uses separate mapAsync stages for processing and committing. This guarantees that for parallelism higher than 1 we will keep correct ordering of messages sent for commit.
Committing the offset for each message as illustrated above is rather slow. It is recommended to batch the commits for better throughput, with the trade-off that more messages may be re-delivered in case of failures.

30
src/docker-compose.yml Normal file
Просмотреть файл

@ -0,0 +1,30 @@
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:4.0.0
ports:
- 32181:32181
environment:
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"
kafka:
image: confluentinc/cp-kafka:4.0.0
ports:
- 29092:29092
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_DELETE_TOPIC_ENABLE: "true"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"

Просмотреть файл

@ -0,0 +1,41 @@
using System;
using System.Text;
using Akka.Actor;
using Akka.Configuration;
using Akka.Streams;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
namespace SimpleConsumer
{
public class Program
{
public static void Main(string[] args)
{
Config fallbackConfig = ConfigurationFactory.ParseString(@"
akka.suppress-json-serializer-warning=true
akka.loglevel = DEBUG
").WithFallback(ConfigurationFactory.FromResource<ConsumerSettings<object, object>>("Akka.Streams.Kafka.reference.conf"));
var system = ActorSystem.Create("TestKafka", fallbackConfig);
var materializer = system.Materializer();
var consumerSettings = ConsumerSettings<Null, string>.Create(system, null, new StringDeserializer(Encoding.UTF8))
.WithBootstrapServers("localhost:29092")
.WithGroupId("group1");
var subscription = Subscriptions.Topics("akka100");
KafkaConsumer.PlainSource(consumerSettings, subscription)
.RunForeach(result =>
{
Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}");
}, materializer);
Console.ReadLine();
}
}
}

Просмотреть файл

@ -0,0 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp2.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\Akka.Streams.Kafka\Akka.Streams.Kafka.csproj" />
</ItemGroup>
</Project>

Просмотреть файл

@ -0,0 +1,49 @@
using System;
using System.Linq;
using System.Text;
using Akka.Actor;
using Akka.Configuration;
using Akka.Streams;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
namespace SimpleProducer
{
public class Program
{
public static void Main(string[] args)
{
Config fallbackConfig = ConfigurationFactory.ParseString(@"
akka.suppress-json-serializer-warning=true
akka.loglevel = DEBUG
").WithFallback(ConfigurationFactory.FromResource<ConsumerSettings<object, object>>("Akka.Streams.Kafka.reference.conf"));
var system = ActorSystem.Create("TestKafka", fallbackConfig);
var materializer = system.Materializer();
var producerSettings = ProducerSettings<Null, string>.Create(system, null, new StringSerializer(Encoding.UTF8))
.WithBootstrapServers("localhost:29092");
Source
.Cycle(() => Enumerable.Range(1, 100).GetEnumerator())
.Select(c => c.ToString())
.Select(elem => new MessageAndMeta<Null, string> { Topic = "akka100", Message = new Message<Null, string> { Value = elem }})
.Via(KafkaProducer.PlainFlow(producerSettings))
.Select(record =>
{
Console.WriteLine($"Producer: {record.Topic}/{record.Partition} {record.Offset}: {record.Value}");
return record;
})
.RunWith(Sink.Ignore<DeliveryReport<Null, string>>(), materializer);
// TODO: producer as a Commitable Sink
// TODO: Sharing KafkaProducer
Console.ReadLine();
}
}
}

Просмотреть файл

@ -0,0 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp2.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\..\src\Akka.Streams.Kafka\Akka.Streams.Kafka.csproj" />
</ItemGroup>
</Project>

Просмотреть файл

@ -0,0 +1,21 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp2.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Akka.Streams.TestKit" Version="1.3.5" />
<PackageReference Include="Akka.TestKit.Xunit2" Version="1.3.5" />
<PackageReference Include="FluentAssertions" Version="5.2.0" />
<PackageReference Include="Microsoft.NET.Test.Sdk" Version="15.6.2" />
<PackageReference Include="xunit" Version="2.3.1" />
<PackageReference Include="xunit.runner.visualstudio" Version="2.3.1" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Akka.Streams.Kafka\Akka.Streams.Kafka.csproj" />
</ItemGroup>
</Project>

Просмотреть файл

@ -0,0 +1,157 @@
using System;
using System.Collections.Concurrent;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Akka.Configuration;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.TestKit;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using Xunit;
using Xunit.Abstractions;
namespace Akka.Streams.Kafka.Tests.Integration
{
public class CommittableSourceIntegrationTests : Akka.TestKit.Xunit2.TestKit
{
private const string KafkaUrl = "localhost:29092";
private const string InitialMsg = "initial msg in topic, required to create the topic before any consumer subscribes to it";
private readonly ActorMaterializer _materializer;
public CommittableSourceIntegrationTests(ITestOutputHelper output)
: base(ConfigurationFactory.FromResource<ConsumerSettings<object, object>>("Akka.Streams.Kafka.reference.conf"), null, output)
{
_materializer = Sys.Materializer();
}
private string Uuid { get; } = Guid.NewGuid().ToString();
private string CreateTopic(int number) => $"topic-{number}-{Uuid}";
private string CreateGroup(int number) => $"group-{number}-{Uuid}";
private ProducerSettings<Null, string> ProducerSettings =>
ProducerSettings<Null, string>.Create(Sys, null, new StringSerializer(Encoding.UTF8))
.WithBootstrapServers(KafkaUrl);
private async Task GivenInitializedTopic(string topic)
{
using (var producer = ProducerSettings.CreateKafkaProducer())
{
await producer.ProduceAsync(topic, new Message<Null, string> { Value = InitialMsg });
}
}
private ConsumerSettings<Null, string> CreateConsumerSettings(string group)
{
return ConsumerSettings<Null, string>.Create(Sys, null, new StringDeserializer(Encoding.UTF8))
.WithBootstrapServers(KafkaUrl)
.WithProperty("auto.offset.reset", "earliest")
.WithGroupId(group);
}
[Fact]
public async Task CommitableSource_consumes_messages_from_Producer_without_commits()
{
int elementsCount = 100;
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
await GivenInitializedTopic(topic1);
await Source
.From(Enumerable.Range(1, elementsCount))
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem.ToString() } })
.RunWith(KafkaProducer.PlainSink(ProducerSettings), _materializer);
var consumerSettings = CreateConsumerSettings(group1);
var probe = KafkaConsumer
.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.Where(c => !c.Record.Value.Equals(InitialMsg))
.Select(c => c.Record.Value)
.RunWith(this.SinkProbe<string>(), _materializer);
probe.Request(elementsCount);
foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
probe.ExpectNext(i, TimeSpan.FromSeconds(10));
probe.Cancel();
}
[Fact]
public async Task CommitableSource_resume_from_commited_offset()
{
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
var group2 = CreateGroup(2);
await GivenInitializedTopic(topic1);
await Source
.From(Enumerable.Range(1, 100))
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem.ToString() } })
.RunWith(KafkaProducer.PlainSink(ProducerSettings), _materializer);
var consumerSettings = CreateConsumerSettings(group1);
var committedElements = new ConcurrentQueue<string>();
var (_, probe1) = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.WhereNot(c => c.Record.Value == InitialMsg)
.SelectAsync(10, elem =>
{
elem.CommitableOffset.Commit();
committedElements.Enqueue(elem.Record.Value);
return Task.FromResult(Done.Instance);
})
.ToMaterialized(this.SinkProbe<Done>(), Keep.Both)
.Run(_materializer);
probe1.Request(25);
foreach (var _ in Enumerable.Range(1, 25))
{
probe1.ExpectNext(Done.Instance, TimeSpan.FromSeconds(10));
}
probe1.Cancel();
// Await.result(control.isShutdown, remainingOrDefault)
var probe2 = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.Select(_ => _.Record.Value)
.RunWith(this.SinkProbe<string>(), _materializer);
// Note that due to buffers and SelectAsync(10) the committed offset is more
// than 26, and that is not wrong
// some concurrent publish
await Source
.From(Enumerable.Range(101, 100))
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem.ToString() } })
.RunWith(KafkaProducer.PlainSink(ProducerSettings), _materializer);
probe2.Request(100);
foreach (var i in Enumerable.Range(committedElements.Count + 1, 100).Select(c => c.ToString()))
probe2.ExpectNext(i, TimeSpan.FromSeconds(10));
probe2.Cancel();
// another consumer should see all
var probe3 = KafkaConsumer.CommittableSource(consumerSettings.WithGroupId(group2), Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.WhereNot(c => c.Record.Value == InitialMsg)
.Select(_ => _.Record.Value)
.RunWith(this.SinkProbe<string>(), _materializer);
probe3.Request(100);
foreach (var i in Enumerable.Range(1, 100).Select(c => c.ToString()))
probe3.ExpectNext(i, TimeSpan.FromSeconds(10));
probe3.Cancel();
}
}
}

Просмотреть файл

@ -0,0 +1,121 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Akka.Configuration;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.TestKit;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using FluentAssertions;
using Xunit;
using Xunit.Abstractions;
namespace Akka.Streams.Kafka.Tests.Integration
{
public class PlainSinkIntegrationTests : Akka.TestKit.Xunit2.TestKit
{
private const string KafkaUrl = "localhost:29092";
private const string InitialMsg = "initial msg in topic, required to create the topic before any consumer subscribes to it";
private readonly ActorMaterializer _materializer;
private string Uuid { get; } = Guid.NewGuid().ToString();
private string CreateTopic(int number) => $"topic-{number}-{Uuid}";
private string CreateGroup(int number) => $"group-{number}-{Uuid}";
public PlainSinkIntegrationTests(ITestOutputHelper output)
: base(ConfigurationFactory
.FromResource<ConsumerSettings<object, object>>("Akka.Streams.Kafka.reference.conf"), null, output)
{
_materializer = Sys.Materializer();
}
private async Task GivenInitializedTopic(string topic)
{
using (var producer = ProducerSettings.CreateKafkaProducer())
{
await producer.ProduceAsync(topic, new Message<Null, string> { Value = InitialMsg });
}
}
private ProducerSettings<Null, string> ProducerSettings =>
ProducerSettings<Null, string>.Create(Sys, null, new StringSerializer(Encoding.UTF8))
.WithBootstrapServers(KafkaUrl);
private ConsumerSettings<Null, string> CreateConsumerSettings(string group)
{
return ConsumerSettings<Null, string>.Create(Sys, null, new StringDeserializer(Encoding.UTF8))
.WithBootstrapServers(KafkaUrl)
.WithProperty("auto.offset.reset", "earliest")
.WithGroupId(group);
}
[Fact]
public async Task PlainSink_should_publish_100_elements_to_Kafka_producer()
{
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
await GivenInitializedTopic(topic1);
var consumerSettings = CreateConsumerSettings(group1);
var consumer = consumerSettings.CreateKafkaConsumer();
consumer.Assign(new List<TopicPartition> { new TopicPartition(topic1, 0) });
var task = new TaskCompletionSource<NotUsed>();
int messagesReceived = 0;
consumer.OnRecord += (sender, message) =>
{
messagesReceived++;
if (messagesReceived == 100)
task.SetResult(NotUsed.Instance);
};
await Source
.From(Enumerable.Range(1, 100))
.Select(c => c.ToString())
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem } })
.RunWith(KafkaProducer.PlainSink(ProducerSettings), _materializer);
var dateTimeStart = DateTime.UtcNow;
bool CheckTimeout(TimeSpan timeout)
{
return dateTimeStart.AddSeconds(timeout.TotalSeconds) > DateTime.UtcNow;
}
while (!task.Task.IsCompleted && CheckTimeout(TimeSpan.FromMinutes(1)))
{
consumer.Poll(TimeSpan.FromSeconds(1));
}
messagesReceived.Should().Be(100);
}
[Fact(Skip = "Not implemented yet")]
public async Task PlainSink_should_fail_stage_if_broker_unavailable()
{
var topic1 = CreateTopic(1);
await GivenInitializedTopic(topic1);
var config = ProducerSettings<Null, string>.Create(Sys, null, new StringSerializer(Encoding.UTF8))
.WithBootstrapServers("localhost:10092");
var probe = Source
.From(Enumerable.Range(1, 100))
.Select(c => c.ToString())
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic1, Message = new Message<Null, string> { Value = elem } })
.Via(KafkaProducer.PlainFlow(config))
.RunWith(this.SinkProbe<DeliveryReport<Null, string>>(), _materializer);
probe.ExpectSubscription();
probe.OnError(new KafkaException(ErrorCode.Local_Transport));
}
}
}

Просмотреть файл

@ -0,0 +1,223 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.Serialization;
using System.Text;
using System.Threading.Tasks;
using Akka.Configuration;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Supervision;
using Akka.Streams.TestKit;
using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using FluentAssertions;
using Xunit;
using Xunit.Abstractions;
namespace Akka.Streams.Kafka.Tests.Integration
{
public class PlainSourceIntegrationTests : Akka.TestKit.Xunit2.TestKit
{
private const string KafkaUrl = "localhost:29092";
private const string InitialMsg = "initial msg in topic, required to create the topic before any consumer subscribes to it";
private readonly ActorMaterializer _materializer;
public static Config Default()
{
return ConfigurationFactory.ParseString("akka.loglevel = DEBUG")
.WithFallback(ConfigurationFactory.FromResource<ConsumerSettings<object, object>>(
"Akka.Streams.Kafka.reference.conf"));
}
public PlainSourceIntegrationTests(ITestOutputHelper output)
: base(Default(), nameof(PlainSourceIntegrationTests), output)
{
_materializer = Sys.Materializer();
}
private string Uuid { get; } = Guid.NewGuid().ToString();
private string CreateTopic(int number) => $"topic-{number}-{Uuid}";
private string CreateGroup(int number) => $"group-{number}-{Uuid}";
private ProducerSettings<Null, string> ProducerSettings =>
ProducerSettings<Null, string>.Create(Sys, null, new StringSerializer(Encoding.UTF8))
.WithBootstrapServers(KafkaUrl);
private async Task GivenInitializedTopic(string topic)
{
using (var producer = ProducerSettings.CreateKafkaProducer())
{
await producer.ProduceAsync(topic, new Message<Null, string> { Value = InitialMsg });
producer.Flush(TimeSpan.FromSeconds(1));
}
}
private ConsumerSettings<Null, string> CreateConsumerSettings(string group)
{
return ConsumerSettings<Null, string>.Create(Sys, null, new StringDeserializer(Encoding.UTF8))
.WithBootstrapServers(KafkaUrl)
.WithProperty("auto.offset.reset", "earliest")
.WithGroupId(group);
}
private async Task Produce(string topic, IEnumerable<int> range, ProducerSettings<Null, string> producerSettings)
{
await Source
.From(range)
.Select(elem => new MessageAndMeta<Null, string> { Topic = topic, Message = new Message<Null, string> { Value = elem.ToString() } })
.RunWith(KafkaProducer.PlainSink(producerSettings), _materializer);
}
private TestSubscriber.Probe<string> CreateProbe(ConsumerSettings<Null, string> consumerSettings, string topic, ISubscription sub)
{
return KafkaConsumer
.PlainSource(consumerSettings, sub)
.Where(c => !c.Value.Equals(InitialMsg))
.Select(c => c.Value)
.RunWith(this.SinkProbe<string>(), _materializer);
}
[Fact]
public async Task PlainSource_consumes_messages_from_KafkaProducer_with_topicPartition_assignment()
{
int elementsCount = 100;
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
await GivenInitializedTopic(topic1);
await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);
var consumerSettings = CreateConsumerSettings(group1);
var probe = CreateProbe(consumerSettings, topic1, Subscriptions.Assignment(new TopicPartition(topic1, 0)));
probe.Request(elementsCount);
foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
probe.ExpectNext(i, TimeSpan.FromSeconds(10));
probe.Cancel();
}
[Fact]
public async Task PlainSource_consumes_messages_from_KafkaProducer_with_topicPartitionOffset_assignment()
{
int elementsCount = 100;
int offset = 50;
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
await GivenInitializedTopic(topic1);
await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);
var consumerSettings = CreateConsumerSettings(group1);
var probe = CreateProbe(consumerSettings, topic1, Subscriptions.AssignmentWithOffset(new TopicPartitionOffset(topic1, 0, new Offset(offset))));
probe.Request(elementsCount);
foreach (var i in Enumerable.Range(offset, elementsCount - offset).Select(c => c.ToString()))
probe.ExpectNext(i, TimeSpan.FromSeconds(10));
probe.Cancel();
}
[Fact(Skip = "Flaky")]
public async Task PlainSource_consumes_messages_from_KafkaProducer_with_subscribe_to_topic()
{
int elementsCount = 100;
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
await GivenInitializedTopic(topic1);
await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);
var consumerSettings = CreateConsumerSettings(group1);
var probe = CreateProbe(consumerSettings, topic1, Subscriptions.Topics(topic1));
probe.Request(elementsCount);
foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
probe.ExpectNext(i, TimeSpan.FromSeconds(10));
probe.Cancel();
}
[Fact]
public async Task PlainSource_should_fail_stage_if_broker_unavailable()
{
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
await GivenInitializedTopic(topic1);
var config = ConsumerSettings<Null, string>.Create(Sys, null, new StringDeserializer(Encoding.UTF8))
.WithBootstrapServers("localhost:10092")
.WithGroupId(group1);
var probe = CreateProbe(config, topic1, Subscriptions.Assignment(new TopicPartition(topic1, 0)));
probe.Request(1).ExpectError().Should().BeOfType<KafkaException>();
}
[Fact]
public async Task PlainSource_should_stop_on_deserialization_errors()
{
int elementsCount = 10;
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);
var settings = ConsumerSettings<Null, int>.Create(Sys, null, new IntDeserializer())
.WithBootstrapServers(KafkaUrl)
.WithProperty("auto.offset.reset", "earliest")
.WithGroupId(group1);
var probe = KafkaConsumer
.PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.WithAttributes(ActorAttributes.CreateSupervisionStrategy(Deciders.StoppingDecider))
.Select(c => c.Value)
.RunWith(this.SinkProbe<int>(), _materializer);
var error = probe.Request(elementsCount).ExpectEvent(TimeSpan.FromSeconds(10));
error.Should().BeOfType<TestSubscriber.OnError>();
((TestSubscriber.OnError)error).Cause.Should().BeOfType<SerializationException>();
probe.Cancel();
}
[Fact]
public async Task PlainSource_should_resume_on_deserialization_errors()
{
Directive Decider(Exception cause) => cause is SerializationException
? Directive.Resume
: Directive.Stop;
int elementsCount = 10;
var topic1 = CreateTopic(1);
var group1 = CreateGroup(1);
await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);
var settings = ConsumerSettings<Null, int>.Create(Sys, null, new IntDeserializer())
.WithBootstrapServers(KafkaUrl)
.WithProperty("auto.offset.reset", "earliest")
.WithGroupId(group1);
var probe = KafkaConsumer
.PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
.WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
.Select(c => c.Value)
.RunWith(this.SinkProbe<int>(), _materializer);
probe.Request(elementsCount);
probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
probe.Cancel();
}
}
}

Просмотреть файл

@ -0,0 +1,30 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<AssemblyTitle>Akka.Streams.Kafka</AssemblyTitle>
<Description>Kafka adapter for Akka.NET Streams</Description>
<TargetFramework>netstandard1.6</TargetFramework>
<Copyright>Copyright © 2017-2018 AkkaNetContrib</Copyright>
<Authors>AkkaNetContrib</Authors>
<VersionPrefix>0.5.0</VersionPrefix>
<VersionSuffix>beta</VersionSuffix>
<PackageTags>akka;actors;streams;kafka;reactive</PackageTags>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<PackageIconUrl>http://getakka.net/images/akkalogo.png</PackageIconUrl>
<PackageProjectUrl>https://github.com/AkkaNetContrib/Alpakka</PackageProjectUrl>
<PackageLicenseUrl>https://github.com/AkkaNetContrib/Alpakka/blob/dev/LICENSE</PackageLicenseUrl>
<NetStandardImplicitPackageVersion>1.6.1</NetStandardImplicitPackageVersion>
<NoWarn>$(NoWarn);CS1591</NoWarn>
</PropertyGroup>
<ItemGroup>
<EmbeddedResource Include="reference.conf" />
</ItemGroup>
<ItemGroup>
<PackageReference Include="Akka.Streams" Version="1.3.5" />
<PackageReference Include="Confluent.Kafka" Version="1.0.0-experimental-2" />
<PackageReference Include="System.ValueTuple" Version="4.4.0" />
</ItemGroup>
</Project>

Просмотреть файл

@ -0,0 +1,43 @@
using System.Threading.Tasks;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Kafka.Stages;
using Confluent.Kafka;
using Akka.Streams.Kafka.Messages;
namespace Akka.Streams.Kafka.Dsl
{
/// <summary>
/// Akka Stream connector for subscribing to Kafka topics.
/// </summary>
public static class KafkaConsumer
{
/// <summary>
/// The <see cref="PlainSource{K,V}"/> emits <see cref="ConsumerRecord"/> elements (as received from the underlying
/// <see cref="IConsumer{TKey,TValue}"/>). It has no support for committing offsets to Kafka. It can be used when the
/// offset is stored externally or with auto-commit (note that auto-commit is by default disabled).
/// The consumer application doesn't need to use Kafka's built-in offset storage and can store offsets in a store of its own
/// choosing. The primary use case for this is allowing the application to store both the offset and the results of the
/// consumption in the same system in a way that both the results and offsets are stored atomically.This is not always
/// possible, but when it is, it will make the consumption fully atomic and give "exactly once" semantics that are
/// stronger than the "at-least once" semantics you get with Kafka's offset commit functionality.
/// </summary>
public static Source<ConsumerRecord<K, V>, Task> PlainSource<K, V>(ConsumerSettings<K, V> settings, ISubscription subscription)
{
return Source.FromGraph(new KafkaSourceStage<K, V>(settings, subscription));
}
/// <summary>
/// The <see cref="CommittableSource{K,V}"/> makes it possible to commit offset positions to Kafka.
/// This is useful when "at-least once delivery" is desired, as each message will likely be
/// delivered one time but in failure cases could be duplicated.
/// Compared to auto-commit, this gives exact control over when a message is considered consumed.
/// If you need to store offsets in anything other than Kafka, <see cref="PlainSource{K,V}"/> should
/// be used instead of this API.
/// </summary>
public static Source<CommittableMessage<K, V>, Task> CommittableSource<K, V>(ConsumerSettings<K, V> settings, ISubscription subscription)
{
return Source.FromGraph(new CommittableConsumerStage<K, V>(settings, subscription));
}
}
}

Просмотреть файл

@ -0,0 +1,71 @@
using System.Threading.Tasks;
using Akka.Streams.Dsl;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Kafka.Stages;
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Dsl
{
/// <summary>
/// Akka Stream connector for publishing messages to Kafka topics.
/// </summary>
public static class KafkaProducer
{
/// <summary>
/// The `PlainSink` can be used for publishing records to Kafka topics.
/// </summary>
public static Sink<MessageAndMeta<TKey, TValue>, Task> PlainSink<TKey, TValue>(ProducerSettings<TKey, TValue> settings)
{
return Flow
.Create<MessageAndMeta<TKey, TValue>>()
.Via(PlainFlow(settings))
.ToMaterialized(Sink.Ignore<DeliveryReport<TKey, TValue>>(), Keep.Right);
}
/// <summary>
/// The `PlainSink` can be used for publishing records to Kafka topics.
/// </summary>
public static Sink<MessageAndMeta<TKey, TValue>, Task> PlainSink<TKey, TValue>(ProducerSettings<TKey, TValue> settings, IProducer<TKey, TValue> producer)
{
return Flow
.Create<MessageAndMeta<TKey, TValue>>()
.Via(PlainFlow(settings, producer))
.ToMaterialized(Sink.Ignore<DeliveryReport<TKey, TValue>>(), Keep.Right);
}
/// <summary>
/// Publish records to Kafka topics and then continue the flow. Possibility to pass through a message, which
/// can for example be a <see cref="CommittedOffsets"/> that can be committed later in the flow.
/// </summary>
public static Flow<MessageAndMeta<TKey, TValue>, DeliveryReport<TKey, TValue>, NotUsed> PlainFlow<TKey, TValue>(ProducerSettings<TKey, TValue> settings)
{
var flow = Flow.FromGraph(new ProducerStage<TKey, TValue>(
settings,
closeProducerOnStop: true,
producerProvider : settings.CreateKafkaProducer))
.SelectAsync(settings.Parallelism, x => x);
return string.IsNullOrEmpty(settings.DispatcherId)
? flow
: flow.WithAttributes(ActorAttributes.CreateDispatcher(settings.DispatcherId));
}
/// <summary>
/// Publish records to Kafka topics and then continue the flow. Possibility to pass through a message, which
/// can for example be a <see cref="CommitableOffset"/> that can be committed later in the flow.
/// </summary>
public static Flow<MessageAndMeta<TKey, TValue>, DeliveryReport<TKey, TValue>, NotUsed> PlainFlow<TKey, TValue>(ProducerSettings<TKey, TValue> settings, IProducer<TKey, TValue> producer)
{
var flow = Flow.FromGraph(new ProducerStage<TKey, TValue>(
settings,
closeProducerOnStop: false,
producerProvider: () => producer))
.SelectAsync(settings.Parallelism, x => x);
return string.IsNullOrEmpty(settings.DispatcherId)
? flow
: flow.WithAttributes(ActorAttributes.CreateDispatcher(settings.DispatcherId));
}
}
}

Просмотреть файл

@ -0,0 +1,71 @@
using System;
using Akka.Streams.Kafka.Dsl;
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Messages
{
/// <summary>
/// Output element of <see cref="KafkaConsumer.CommittableSource{K,V}"/>.
/// The offset can be committed via the included <see cref="CommitableOffset"/>.
/// </summary>
public sealed class CommittableMessage<K, V>
{
public CommittableMessage(ConsumerRecord<K, V> record, CommitableOffset commitableOffset)
{
Record = record;
CommitableOffset = commitableOffset;
}
public ConsumerRecord<K, V> Record { get; }
public CommitableOffset CommitableOffset { get; }
}
/// <summary>
/// Included in <see cref="CommittableMessage{K,V}"/>. Makes it possible to
/// commit an offset or aggregate several offsets before committing.
/// Note that the offset position that is committed to Kafka will automatically
/// be one more than the `offset` of the message, because the committed offset
/// should be the next message your application will consume,
/// i.e. lastProcessedMessageOffset + 1.
/// </summary>
public class CommitableOffset
{
private readonly Func<CommittedOffsets> _task;
public CommitableOffset(Func<CommittedOffsets> task, PartitionOffset offset)
{
_task = task;
Offset = offset;
}
public PartitionOffset Offset { get; }
public CommittedOffsets Commit()
{
return _task();
}
}
/// <summary>
/// Offset position for a groupId, topic, partition.
/// </summary>
public class PartitionOffset
{
public PartitionOffset(string groupId, string topic, int partition, Offset offset)
{
GroupId = groupId;
Topic = topic;
Partition = partition;
Offset = offset;
}
public string GroupId { get; }
public string Topic { get; }
public int Partition { get; }
public Offset Offset { get; }
}
}

Просмотреть файл

@ -0,0 +1,90 @@
using System;
using System.Collections.Immutable;
using Akka.Actor;
using Akka.Configuration;
using Confluent.Kafka.Serialization;
namespace Akka.Streams.Kafka.Settings
{
public sealed class ConsumerSettings<TKey, TValue>
{
public static ConsumerSettings<TKey, TValue> Create(ActorSystem system, IDeserializer<TKey> keyDeserializer, IDeserializer<TValue> valueDeserializer)
{
var config = system.Settings.Config.GetConfig("akka.kafka.consumer");
return Create(config, keyDeserializer, valueDeserializer);
}
public static ConsumerSettings<TKey, TValue> Create(Config config, IDeserializer<TKey> keyDeserializer, IDeserializer<TValue> valueDeserializer)
{
if (config == null) throw new ArgumentNullException(nameof(config), "Kafka config for Akka.NET consumer was not provided");
return new ConsumerSettings<TKey, TValue>(
keyDeserializer: keyDeserializer,
valueDeserializer: valueDeserializer,
pollInterval: config.GetTimeSpan("poll-interval", TimeSpan.FromMilliseconds(50)),
pollTimeout: config.GetTimeSpan("poll-timeout", TimeSpan.FromMilliseconds(50)),
bufferSize: config.GetInt("buffer-size", 50),
dispatcherId: config.GetString("use-dispatcher", "akka.kafka.default-dispatcher"),
properties: ImmutableDictionary<string, object>.Empty);
}
public object this[string propertyKey] => this.Properties.GetValueOrDefault(propertyKey);
public IDeserializer<TKey> KeyDeserializer { get; }
public IDeserializer<TValue> ValueDeserializer { get; }
public TimeSpan PollInterval { get; }
public TimeSpan PollTimeout { get; }
public int BufferSize { get; }
public string DispatcherId { get; }
public IImmutableDictionary<string, object> Properties { get; }
public ConsumerSettings(IDeserializer<TKey> keyDeserializer, IDeserializer<TValue> valueDeserializer, TimeSpan pollInterval, TimeSpan pollTimeout, int bufferSize, string dispatcherId, IImmutableDictionary<string, object> properties)
{
KeyDeserializer = keyDeserializer;
ValueDeserializer = valueDeserializer;
PollInterval = pollInterval;
PollTimeout = pollTimeout;
BufferSize = bufferSize;
DispatcherId = dispatcherId;
Properties = properties;
}
public ConsumerSettings<TKey, TValue> WithBootstrapServers(string bootstrapServers) =>
Copy(properties: Properties.SetItem("bootstrap.servers", bootstrapServers));
public ConsumerSettings<TKey, TValue> WithClientId(string clientId) =>
Copy(properties: Properties.SetItem("client.id", clientId));
public ConsumerSettings<TKey, TValue> WithGroupId(string groupId) =>
Copy(properties: Properties.SetItem("group.id", groupId));
public ConsumerSettings<TKey, TValue> WithProperty(string key, object value) =>
Copy(properties: Properties.SetItem(key, value));
public ConsumerSettings<TKey, TValue> WithPollInterval(TimeSpan pollInterval) => Copy(pollInterval: pollInterval);
public ConsumerSettings<TKey, TValue> WithPollTimeout(TimeSpan pollTimeout) => Copy(pollTimeout: pollTimeout);
public ConsumerSettings<TKey, TValue> WithDispatcher(string dispatcherId) => Copy(dispatcherId: dispatcherId);
private ConsumerSettings<TKey, TValue> Copy(
IDeserializer<TKey> keyDeserializer = null,
IDeserializer<TValue> valueDeserializer = null,
TimeSpan? pollInterval = null,
TimeSpan? pollTimeout = null,
int? bufferSize = null,
string dispatcherId = null,
IImmutableDictionary<string, object> properties = null) =>
new ConsumerSettings<TKey, TValue>(
keyDeserializer: keyDeserializer ?? this.KeyDeserializer,
valueDeserializer: valueDeserializer ?? this.ValueDeserializer,
pollInterval: pollInterval ?? this.PollInterval,
pollTimeout: pollTimeout ?? this.PollTimeout,
bufferSize: bufferSize ?? this.BufferSize,
dispatcherId: dispatcherId ?? this.DispatcherId,
properties: properties ?? this.Properties);
public Confluent.Kafka.IConsumer<TKey, TValue> CreateKafkaConsumer() =>
new Confluent.Kafka.Consumer<TKey, TValue>(this.Properties, this.KeyDeserializer, this.ValueDeserializer);
}
}

Просмотреть файл

@ -0,0 +1,39 @@
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Settings
{
public static class KafkaExtensions
{
public static bool IsBrokerErrorRetriable(Error error)
{
switch (error.Code)
{
case ErrorCode.InvalidMsg:
case ErrorCode.UnknownTopicOrPart:
case ErrorCode.LeaderNotAvailable:
case ErrorCode.NotLeaderForPartition:
case ErrorCode.RequestTimedOut:
case ErrorCode.GroupLoadInProress:
case ErrorCode.GroupCoordinatorNotAvailable:
case ErrorCode.NotCoordinatorForGroup:
case ErrorCode.NotEnoughReplicas:
case ErrorCode.NotEnoughReplicasAfterAppend:
return true;
}
return false;
}
public static bool IsLocalErrorRetriable(Error error)
{
switch (error.Code)
{
case ErrorCode.Local_Transport:
case ErrorCode.Local_AllBrokersDown:
return false;
}
return true;
}
}
}

Просмотреть файл

@ -0,0 +1,80 @@
using System;
using System.Collections.Generic;
using System.Collections.Immutable;
using Akka.Actor;
using Akka.Configuration;
using Confluent.Kafka.Serialization;
namespace Akka.Streams.Kafka.Settings
{
public sealed class ProducerSettings<TKey, TValue>
{
public ProducerSettings(ISerializer<TKey> keySerializer, ISerializer<TValue> valueSerializer, int parallelism, string dispatcherId, TimeSpan flushTimeout, IImmutableDictionary<string, object> properties)
{
KeySerializer = keySerializer;
ValueSerializer = valueSerializer;
Parallelism = parallelism;
DispatcherId = dispatcherId;
FlushTimeout = flushTimeout;
Properties = properties;
}
public ISerializer<TKey> KeySerializer { get; }
public ISerializer<TValue> ValueSerializer { get; }
public int Parallelism { get; }
public string DispatcherId { get; }
public TimeSpan FlushTimeout { get; }
public IImmutableDictionary<string, object> Properties { get; }
public ProducerSettings<TKey, TValue> WithBootstrapServers(string bootstrapServers) =>
WithProperty("bootstrap.servers", bootstrapServers);
public ProducerSettings<TKey, TValue> WithProperty(string key, object value) =>
Copy(properties: Properties.SetItem(key, value));
public ProducerSettings<TKey, TValue> WithParallelism(int parallelism) =>
Copy(parallelism: parallelism);
public ProducerSettings<TKey, TValue> WithDispatcher(string dispatcherId) =>
Copy(dispatcherId: dispatcherId);
private ProducerSettings<TKey, TValue> Copy(
ISerializer<TKey> keySerializer = null,
ISerializer<TValue> valueSerializer = null,
int? parallelism = null,
string dispatcherId = null,
TimeSpan? flushTimeout = null,
IImmutableDictionary<string, object> properties = null) =>
new ProducerSettings<TKey, TValue>(
keySerializer: keySerializer ?? this.KeySerializer,
valueSerializer: valueSerializer ?? this.ValueSerializer,
parallelism: parallelism ?? this.Parallelism,
dispatcherId: dispatcherId ?? this.DispatcherId,
flushTimeout: flushTimeout ?? this.FlushTimeout,
properties: properties ?? this.Properties);
public static ProducerSettings<TKey, TValue> Create(ActorSystem system, ISerializer<TKey> keySerializer, ISerializer<TValue> valueSerializer)
{
if (system == null) throw new ArgumentNullException(nameof(system));
var config = system.Settings.Config.GetConfig("akka.kafka.producer");
return Create(config, keySerializer, valueSerializer);
}
public static ProducerSettings<TKey, TValue> Create(Config config, ISerializer<TKey> keySerializer, ISerializer<TValue> valueSerializer)
{
if (config == null) throw new ArgumentNullException(nameof(config), "Kafka config for Akka.NET producer was not provided");
return new ProducerSettings<TKey, TValue>(
keySerializer: keySerializer,
valueSerializer: valueSerializer,
parallelism: config.GetInt("parallelism", 100),
dispatcherId: config.GetString("use-dispatcher", "akka.kafka.default-dispatcher"),
flushTimeout: config.GetTimeSpan("flush-timeout", TimeSpan.FromSeconds(2)),
properties: ImmutableDictionary<string, object>.Empty);
}
public Confluent.Kafka.IProducer<TKey, TValue> CreateKafkaProducer() =>
new Confluent.Kafka.Producer<TKey, TValue>(Properties, KeySerializer, ValueSerializer);
}
}

Просмотреть файл

@ -0,0 +1,51 @@
using System.Collections.Immutable;
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Settings
{
public interface ISubscription { }
public interface IManualSubscription : ISubscription { }
public interface IAutoSubscription : ISubscription { }
internal sealed class TopicSubscription : IAutoSubscription
{
public TopicSubscription(IImmutableSet<string> topics)
{
Topics = topics;
}
public IImmutableSet<string> Topics { get; }
}
internal sealed class Assignment : IManualSubscription
{
public Assignment(IImmutableSet<TopicPartition> topicPartitions)
{
TopicPartitions = topicPartitions;
}
public IImmutableSet<TopicPartition> TopicPartitions { get; }
}
internal sealed class AssignmentWithOffset : IManualSubscription
{
public AssignmentWithOffset(IImmutableSet<TopicPartitionOffset> topicPartitions)
{
TopicPartitions = topicPartitions;
}
public IImmutableSet<TopicPartitionOffset> TopicPartitions { get; }
}
public static class Subscriptions
{
public static IAutoSubscription Topics(params string[] topics) =>
new TopicSubscription(topics.ToImmutableHashSet());
public static IManualSubscription Assignment(params TopicPartition[] topicPartitions) =>
new Assignment(topicPartitions.ToImmutableHashSet());
public static IManualSubscription AssignmentWithOffset(params TopicPartitionOffset[] topicPartitions) =>
new AssignmentWithOffset(topicPartitions.ToImmutableHashSet());
}
}

Просмотреть файл

@ -0,0 +1,223 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Stage;
using Confluent.Kafka;
using Akka.Streams.Supervision;
using System.Runtime.Serialization;
namespace Akka.Streams.Kafka.Stages
{
internal class CommittableConsumerStage<K, V> : GraphStageWithMaterializedValue<SourceShape<CommittableMessage<K, V>>, Task>
{
public Outlet<CommittableMessage<K, V>> Out { get; } = new Outlet<CommittableMessage<K, V>>("kafka.commitable.consumer.out");
public override SourceShape<CommittableMessage<K, V>> Shape { get; }
public ConsumerSettings<K, V> Settings { get; }
public ISubscription Subscription { get; }
public CommittableConsumerStage(ConsumerSettings<K, V> settings, ISubscription subscription)
{
Settings = settings;
Subscription = subscription;
Shape = new SourceShape<CommittableMessage<K, V>>(Out);
}
public override ILogicAndMaterializedValue<Task> CreateLogicAndMaterializedValue(Attributes inheritedAttributes)
{
var completion = new TaskCompletionSource<NotUsed>();
return new LogicAndMaterializedValue<Task>(new KafkaCommittableSourceStage<K, V>(this, inheritedAttributes, completion), completion.Task);
}
}
internal class KafkaCommittableSourceStage<K, V> : TimerGraphStageLogic
{
private readonly ConsumerSettings<K, V> _settings;
private readonly ISubscription _subscription;
private readonly Outlet<CommittableMessage<K, V>> _out;
private IConsumer<K, V> _consumer;
private Action<ConsumerRecord<K, V>> _messagesReceived;
private Action<IEnumerable<TopicPartition>> _partitionsAssigned;
private Action<IEnumerable<TopicPartition>> _partitionsRevoked;
private readonly Decider _decider;
private const string TimerKey = "PollTimer";
private readonly Queue<CommittableMessage<K, V>> _buffer;
private IEnumerable<TopicPartition> _assignedPartitions;
private volatile bool _isPaused;
private readonly TaskCompletionSource<NotUsed> _completion;
public KafkaCommittableSourceStage(CommittableConsumerStage<K, V> stage, Attributes attributes, TaskCompletionSource<NotUsed> completion) : base(stage.Shape)
{
_settings = stage.Settings;
_subscription = stage.Subscription;
_out = stage.Out;
_completion = completion;
_buffer = new Queue<CommittableMessage<K, V>>(stage.Settings.BufferSize);
var supervisionStrategy = attributes.GetAttribute<ActorAttributes.SupervisionStrategy>(null);
_decider = supervisionStrategy != null ? supervisionStrategy.Decider : Deciders.ResumingDecider;
SetHandler(_out, onPull: () =>
{
if (_buffer.Count > 0)
{
Push(_out, _buffer.Dequeue());
}
else
{
if (_isPaused)
{
_consumer.Resume(_assignedPartitions);
_isPaused = false;
Log.Debug("Polling resumed, buffer is empty");
}
PullQueue();
}
});
}
public override void PreStart()
{
base.PreStart();
_consumer = _settings.CreateKafkaConsumer();
Log.Debug($"Consumer started: {_consumer.Name}");
_consumer.OnRecord += HandleOnMessage;
_consumer.OnConsumeError += HandleConsumeError;
_consumer.OnError += HandleOnError;
_consumer.OnPartitionsAssigned += HandleOnPartitionsAssigned;
_consumer.OnPartitionsRevoked += HandleOnPartitionsRevoked;
switch (_subscription)
{
case TopicSubscription ts:
_consumer.Subscribe(ts.Topics);
break;
case Assignment a:
_consumer.Assign(a.TopicPartitions);
break;
case AssignmentWithOffset awo:
_consumer.Assign(awo.TopicPartitions);
break;
}
_messagesReceived = GetAsyncCallback<ConsumerRecord<K, V>>(MessagesReceived);
_partitionsAssigned = GetAsyncCallback<IEnumerable<TopicPartition>>(PartitionsAssigned);
_partitionsRevoked = GetAsyncCallback<IEnumerable<TopicPartition>>(PartitionsRevoked);
ScheduleRepeatedly(TimerKey, _settings.PollInterval);
}
public override void PostStop()
{
_consumer.OnRecord -= HandleOnMessage;
_consumer.OnConsumeError -= HandleConsumeError;
_consumer.OnError -= HandleOnError;
_consumer.OnPartitionsAssigned -= HandleOnPartitionsAssigned;
_consumer.OnPartitionsRevoked -= HandleOnPartitionsRevoked;
Log.Debug($"Consumer stopped: {_consumer.Name}");
_consumer.Dispose();
base.PostStop();
}
//
// Consumer's events
//
private void HandleOnMessage(object sender, ConsumerRecord<K, V> message) => _messagesReceived(message);
private void HandleConsumeError(object sender, ConsumerRecord message)
{
Log.Error(message.Error.Reason);
var exception = new SerializationException(message.Error.Reason);
switch (_decider(exception))
{
case Directive.Stop:
// Throw
_completion.TrySetException(exception);
FailStage(exception);
break;
case Directive.Resume:
// keep going
break;
case Directive.Restart:
// keep going
break;
}
}
private void HandleOnError(object sender, Error error)
{
Log.Error(error.Reason);
if (!KafkaExtensions.IsBrokerErrorRetriable(error) && !KafkaExtensions.IsLocalErrorRetriable(error))
{
var exception = new KafkaException(error);
FailStage(exception);
}
}
private void HandleOnPartitionsAssigned(object sender, List<TopicPartition> list)
{
_partitionsAssigned(list);
}
private void HandleOnPartitionsRevoked(object sender, List<TopicPartition> list)
{
_partitionsRevoked(list);
}
//
// Async callbacks
//
private void MessagesReceived(ConsumerRecord<K, V> message)
{
var consumer = _consumer;
var commitableOffset = new CommitableOffset(
() => consumer.Commit(),
new PartitionOffset("groupId", message.Topic, message.Partition, message.Offset));
_buffer.Enqueue(new CommittableMessage<K, V>(message, commitableOffset));
if (IsAvailable(_out))
{
Push(_out, _buffer.Dequeue());
}
}
private void PartitionsAssigned(IEnumerable<TopicPartition> partitions)
{
Log.Debug($"Partitions were assigned: {_consumer.Name}");
_consumer.Assign(partitions);
_assignedPartitions = partitions;
}
private void PartitionsRevoked(IEnumerable<TopicPartition> partitions)
{
Log.Debug($"Partitions were revoked: {_consumer.Name}");
_consumer.Unassign();
_assignedPartitions = null;
}
private void PullQueue()
{
_consumer.Poll(_settings.PollTimeout);
if (!_isPaused && _buffer.Count > _settings.BufferSize)
{
Log.Debug($"Polling paused, buffer is full");
_consumer.Pause(_assignedPartitions);
_isPaused = true;
}
}
protected override void OnTimer(object timerKey) => PullQueue();
}
}

Просмотреть файл

@ -0,0 +1,219 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Stage;
using Confluent.Kafka;
using Akka.Streams.Supervision;
using System.Runtime.Serialization;
namespace Akka.Streams.Kafka.Stages
{
internal class KafkaSourceStage<K, V> : GraphStageWithMaterializedValue<SourceShape<ConsumerRecord<K, V>>, Task>
{
public Outlet<ConsumerRecord<K, V>> Out { get; } = new Outlet<ConsumerRecord<K, V>>("kafka.consumer.out");
public override SourceShape<ConsumerRecord<K, V>> Shape { get; }
public ConsumerSettings<K, V> Settings { get; }
public ISubscription Subscription { get; }
public KafkaSourceStage(ConsumerSettings<K, V> settings, ISubscription subscription)
{
Settings = settings;
Subscription = subscription;
Shape = new SourceShape<ConsumerRecord<K, V>>(Out);
Settings = settings;
Subscription = subscription;
}
public override ILogicAndMaterializedValue<Task> CreateLogicAndMaterializedValue(Attributes inheritedAttributes)
{
var completion = new TaskCompletionSource<NotUsed>();
return new LogicAndMaterializedValue<Task>(new KafkaSourceStageLogic<K, V>(this, inheritedAttributes, completion), completion.Task);
}
}
internal class KafkaSourceStageLogic<K, V> : TimerGraphStageLogic
{
private readonly ConsumerSettings<K, V> _settings;
private readonly ISubscription _subscription;
private readonly Outlet<ConsumerRecord<K, V>> _out;
private IConsumer<K, V> _consumer;
private Action<ConsumerRecord<K, V>> _messagesReceived;
private Action<IEnumerable<TopicPartition>> _partitionsAssigned;
private Action<IEnumerable<TopicPartition>> _partitionsRevoked;
private readonly Decider _decider;
private const string TimerKey = "PollTimer";
private readonly Queue<ConsumerRecord<K, V>> _buffer;
private IEnumerable<TopicPartition> _assignedPartitions;
private volatile bool _isPaused;
private readonly TaskCompletionSource<NotUsed> _completion;
public KafkaSourceStageLogic(KafkaSourceStage<K, V> stage, Attributes attributes, TaskCompletionSource<NotUsed> completion) : base(stage.Shape)
{
_settings = stage.Settings;
_subscription = stage.Subscription;
_out = stage.Out;
_completion = completion;
_buffer = new Queue<ConsumerRecord<K, V>>(stage.Settings.BufferSize);
var supervisionStrategy = attributes.GetAttribute<ActorAttributes.SupervisionStrategy>(null);
_decider = supervisionStrategy != null ? supervisionStrategy.Decider : Deciders.ResumingDecider;
SetHandler(_out, onPull:() =>
{
if (_buffer.Count > 0)
{
Push(_out, _buffer.Dequeue());
}
else
{
if (_isPaused)
{
_consumer.Resume(_assignedPartitions);
_isPaused = false;
Log.Debug("Polling resumed, buffer is empty");
}
PullQueue();
}
});
}
public override void PreStart()
{
base.PreStart();
_consumer = _settings.CreateKafkaConsumer();
Log.Debug($"Consumer started: {_consumer.Name}");
_consumer.OnRecord += HandleOnMessage;
_consumer.OnConsumeError += HandleConsumeError;
_consumer.OnError += HandleOnError;
_consumer.OnPartitionsAssigned += HandleOnPartitionsAssigned;
_consumer.OnPartitionsRevoked += HandleOnPartitionsRevoked;
switch (_subscription)
{
case TopicSubscription ts:
_consumer.Subscribe(ts.Topics);
break;
case Assignment a:
_consumer.Assign(a.TopicPartitions);
break;
case AssignmentWithOffset awo:
_consumer.Assign(awo.TopicPartitions);
break;
}
_messagesReceived = GetAsyncCallback<ConsumerRecord<K, V>>(MessagesReceived);
_partitionsAssigned = GetAsyncCallback<IEnumerable<TopicPartition>>(PartitionsAssigned);
_partitionsRevoked = GetAsyncCallback<IEnumerable<TopicPartition>>(PartitionsRevoked);
ScheduleRepeatedly(TimerKey, _settings.PollInterval);
}
public override void PostStop()
{
_consumer.OnRecord -= HandleOnMessage;
_consumer.OnConsumeError -= HandleConsumeError;
_consumer.OnError -= HandleOnError;
_consumer.OnPartitionsAssigned -= HandleOnPartitionsAssigned;
_consumer.OnPartitionsRevoked -= HandleOnPartitionsRevoked;
Log.Debug($"Consumer stopped: {_consumer.Name}");
_consumer.Dispose();
base.PostStop();
}
//
// Consumer's events
//
private void HandleOnMessage(object sender, ConsumerRecord<K, V> message) => _messagesReceived(message);
private void HandleConsumeError(object sender, ConsumerRecord message)
{
Log.Error(message.Error.Reason);
var exception = new SerializationException(message.Error.Reason);
switch (_decider(exception))
{
case Directive.Stop:
// Throw
_completion.TrySetException(exception);
FailStage(exception);
break;
case Directive.Resume:
// keep going
break;
case Directive.Restart:
// keep going
break;
}
}
private void HandleOnError(object sender, Error error)
{
Log.Error(error.Reason);
if (!KafkaExtensions.IsBrokerErrorRetriable(error) && !KafkaExtensions.IsLocalErrorRetriable(error))
{
var exception = new KafkaException(error);
FailStage(exception);
}
}
private void HandleOnPartitionsAssigned(object sender, List<TopicPartition> list)
{
_partitionsAssigned(list);
}
private void HandleOnPartitionsRevoked(object sender, List<TopicPartition> list)
{
_partitionsRevoked(list);
}
//
// Async callbacks
//
private void MessagesReceived(ConsumerRecord<K, V> message)
{
_buffer.Enqueue(message);
if (IsAvailable(_out))
{
Push(_out, _buffer.Dequeue());
}
}
private void PartitionsAssigned(IEnumerable<TopicPartition> partitions)
{
Log.Debug($"Partitions were assigned: {_consumer.Name}");
_consumer.Assign(partitions);
_assignedPartitions = partitions;
}
private void PartitionsRevoked(IEnumerable<TopicPartition> partitions)
{
Log.Debug($"Partitions were revoked: {_consumer.Name}");
_consumer.Unassign();
_assignedPartitions = null;
}
private void PullQueue()
{
_consumer.Poll(_settings.PollTimeout);
if (!_isPaused && _buffer.Count > _settings.BufferSize)
{
Log.Debug($"Polling paused, buffer is full");
_consumer.Pause(_assignedPartitions);
_isPaused = true;
}
}
protected override void OnTimer(object timerKey) => PullQueue();
}
}

Просмотреть файл

@ -0,0 +1,164 @@
using System;
using System.Threading.Tasks;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.Stage;
using Akka.Streams.Supervision;
using Akka.Util.Internal;
using Confluent.Kafka;
namespace Akka.Streams.Kafka.Stages
{
internal sealed class ProducerStage<K, V> : GraphStage<FlowShape<MessageAndMeta<K, V>, Task<DeliveryReport<K, V>>>>
{
public ProducerSettings<K, V> Settings { get; }
public bool CloseProducerOnStop { get; }
public Func<IProducer<K, V>> ProducerProvider { get; }
public Inlet<MessageAndMeta<K, V>> In { get; } = new Inlet<MessageAndMeta<K, V>>("kafka.producer.in");
public Outlet<Task<DeliveryReport<K, V>>> Out { get; } = new Outlet<Task<DeliveryReport<K, V>>>("kafka.producer.out");
public ProducerStage(
ProducerSettings<K, V> settings,
bool closeProducerOnStop,
Func<IProducer<K, V>> producerProvider)
{
Settings = settings;
CloseProducerOnStop = closeProducerOnStop;
ProducerProvider = producerProvider;
Shape = new FlowShape<MessageAndMeta<K, V>, Task<DeliveryReport<K, V>>>(In, Out);
}
public override FlowShape<MessageAndMeta<K, V>, Task<DeliveryReport<K, V>>> Shape { get; }
protected override GraphStageLogic CreateLogic(Attributes inheritedAttributes)
{
return new ProducerStageLogic<K, V>(this, inheritedAttributes);
}
}
internal sealed class ProducerStageLogic<K, V> : GraphStageLogic
{
private readonly ProducerStage<K, V> _stage;
private IProducer<K, V> _producer;
private readonly TaskCompletionSource<NotUsed> _completionState = new TaskCompletionSource<NotUsed>();
private volatile bool _inIsClosed;
private readonly AtomicCounter _awaitingConfirmation = new AtomicCounter(0);
public ProducerStageLogic(ProducerStage<K, V> stage, Attributes attributes) : base(stage.Shape)
{
_stage = stage;
var supervisionStrategy = attributes.GetAttribute<ActorAttributes.SupervisionStrategy>(null);
var decider = supervisionStrategy != null ? supervisionStrategy.Decider : Deciders.ResumingDecider;
SetHandler(_stage.In,
onPush: () =>
{
var msg = Grab(_stage.In);
var result = new TaskCompletionSource<DeliveryReport<K, V>>();
_producer.Produce(msg.TopicPartition, msg.Message, report =>
{
if (!report.Error.HasError)
{
result.SetResult(report);
}
else
{
var exception = new KafkaException(report.Error);
switch (decider(exception))
{
case Directive.Stop:
if (_stage.CloseProducerOnStop)
{
_producer.Dispose();
}
FailStage(exception);
break;
default:
result.SetException(exception);
break;
}
}
if (_awaitingConfirmation.DecrementAndGet() == 0 && _inIsClosed)
{
CheckForCompletion();
}
});
_awaitingConfirmation.IncrementAndGet();
Push(_stage.Out, result.Task);
},
onUpstreamFinish: () =>
{
_inIsClosed = true;
_completionState.SetResult(NotUsed.Instance);
CheckForCompletion();
},
onUpstreamFailure: exception =>
{
_inIsClosed = true;
_completionState.SetException(exception);
CheckForCompletion();
});
SetHandler(_stage.Out, onPull: () =>
{
TryPull(_stage.In);
});
}
public override void PreStart()
{
base.PreStart();
_producer = _stage.ProducerProvider();
Log.Debug($"Producer started: {_producer.Name}");
_producer.OnError += OnProducerError;
}
public override void PostStop()
{
Log.Debug("Stage completed");
_producer.OnError -= OnProducerError;
if (_stage.CloseProducerOnStop)
{
_producer.Flush(_stage.Settings.FlushTimeout);
_producer.Dispose();
Log.Debug($"Producer closed: {_producer.Name}");
}
base.PostStop();
}
private void OnProducerError(object sender, Error error)
{
Log.Error(error.Reason);
if (!KafkaExtensions.IsBrokerErrorRetriable(error) && !KafkaExtensions.IsLocalErrorRetriable(error))
{
var exception = new KafkaException(error);
FailStage(exception);
}
}
public void CheckForCompletion()
{
if (IsClosed(_stage.In) && _awaitingConfirmation.Current == 0)
{
var completionTask = _completionState.Task;
if (completionTask.IsFaulted || completionTask.IsCanceled)
{
FailStage(completionTask.Exception);
}
else if (completionTask.IsCompleted)
{
CompleteStage();
}
}
}
}
}

Просмотреть файл

@ -0,0 +1,42 @@
# Properties for akka.kafka.ProducerSettings can be
# defined in this section or a configuration section with
# the same layout.
akka.kafka.producer {
# Tuning parameter of how many sends that can run in parallel.
parallelism = 100
# How long to wait for `Producer.Flush`
flush-timeout = 10s
# Fully qualified config path which holds the dispatcher configuration
# to be used by the producer stages. Some blocking may occur.
# When this value is empty, the dispatcher configured for the stream
# will be used.
use-dispatcher = "akka.kafka.default-dispatcher"
}
# Properties for akka.kafka.ConsumerSettings can be
# defined in this section or a configuration section with
# the same layout.
akka.kafka.consumer {
# Tuning property of scheduled polls.
poll-interval = 50ms
# Tuning property of the `KafkaConsumer.Poll` parameter.
# Note that non-zero value means that blocking of the thread that
# is executing the stage will be blocked.
poll-timeout = 50ms
buffer-size = 128
# Fully qualified config path which holds the dispatcher configuration
# to be used by the KafkaConsumerActor. Some blocking may occur.
use-dispatcher = "akka.kafka.default-dispatcher"
}
# The dispatcher that will be used by default by consumer and
# producer stages.
akka.kafka.default-dispatcher {
type = "Dispatcher"
executor = "default-executor"
}