diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..ab503d44 --- /dev/null +++ b/404.html @@ -0,0 +1,2715 @@ + + + + + + + + + + + + + + + + + + + + + PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/AlterConfigsOptions/index.html b/api/RdKafka/Admin/AlterConfigsOptions/index.html new file mode 100644 index 00000000..5ae146bc --- /dev/null +++ b/api/RdKafka/Admin/AlterConfigsOptions/index.html @@ -0,0 +1,2890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + AlterConfigsOptions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class AlterConfigsOptions

+

Class \RdKafka\Admin\AlterConfigsOptions

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (1 / 1)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/Client/index.html b/api/RdKafka/Admin/Client/index.html new file mode 100644 index 00000000..03bc02ba --- /dev/null +++ b/api/RdKafka/Admin/Client/index.html @@ -0,0 +1,3455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Client - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Class Client

+

Class \RdKafka\Admin\Client

+

Methods

+

alterConfigs()

+
public alterConfigs ( 
+    \RdKafka\Admin\ConfigResource[] $resources, 
+    ?\RdKafka\Admin\AlterConfigsOptions $options = null
+ ): \RdKafka\Admin\ConfigResourceResult[]
+
+
+
Parameters
+
resources \RdKafka\Admin\ConfigResource[]
+
options ?\RdKafka\Admin\AlterConfigsOptions
+
Returns
+
\RdKafka\Admin\ConfigResourceResult[]
+
+

createPartitions()

+
public createPartitions ( 
+    \RdKafka\Admin\NewPartitions[] $partitions, 
+    ?\RdKafka\Admin\CreatePartitionsOptions $options = null
+ ): \RdKafka\Admin\TopicResult[]
+
+
+
Parameters
+
partitions \RdKafka\Admin\NewPartitions[]
+
options ?\RdKafka\Admin\CreatePartitionsOptions
+
Returns
+
\RdKafka\Admin\TopicResult[]
+
+

createTopics()

+
public createTopics ( 
+    \RdKafka\Admin\NewTopic[] $topics, 
+    ?\RdKafka\Admin\CreateTopicsOptions $options = null
+ ): \RdKafka\Admin\TopicResult[]
+
+
+
Parameters
+
topics \RdKafka\Admin\NewTopic[]
+
options ?\RdKafka\Admin\CreateTopicsOptions
+
Returns
+
\RdKafka\Admin\TopicResult[]
+
+

deleteConsumerGroupOffsets()

+
public deleteConsumerGroupOffsets ( 
+    \RdKafka\Admin\DeleteConsumerGroupOffsets $offsets, 
+    ?\RdKafka\Admin\DeleteConsumerGroupOffsetsOptions $options = null
+ ): \RdKafka\Admin\GroupResult[]
+
+
+
Parameters
+
offsets \RdKafka\Admin\DeleteConsumerGroupOffsets
+
options ?\RdKafka\Admin\DeleteConsumerGroupOffsetsOptions
+
Returns
+
\RdKafka\Admin\GroupResult[]
+
+

deleteGroups()

+
public deleteGroups ( 
+    \RdKafka\Admin\DeleteGroup[] $groups, 
+    ?\RdKafka\Admin\DeleteGroupsOptions $options = null
+ ): \RdKafka\Admin\GroupResult[]
+
+
+
Parameters
+
groups \RdKafka\Admin\DeleteGroup[]
+
options ?\RdKafka\Admin\DeleteGroupsOptions
+
Returns
+
\RdKafka\Admin\GroupResult[]
+
+

deleteRecords()

+
public deleteRecords ( 
+    \RdKafka\Admin\DeleteRecords[] $records, 
+    ?\RdKafka\Admin\DeleteRecordsOptions $options = null
+ ): \RdKafka\TopicPartition[]
+
+
+
Parameters
+
records \RdKafka\Admin\DeleteRecords[]
+
options ?\RdKafka\Admin\DeleteRecordsOptions
+
Returns
+
\RdKafka\TopicPartition[]
+
+

deleteTopics()

+
public deleteTopics ( 
+    \RdKafka\Admin\DeleteTopic[] $topics, 
+    ?\RdKafka\Admin\DeleteTopicsOptions $options = null
+ ): \RdKafka\Admin\TopicResult[]
+
+
+
Parameters
+
topics \RdKafka\Admin\DeleteTopic[]
+
options ?\RdKafka\Admin\DeleteTopicsOptions
+
Returns
+
\RdKafka\Admin\TopicResult[]
+
+

describeConfigs()

+
public describeConfigs ( 
+    \RdKafka\Admin\ConfigResource[] $resources, 
+    ?\RdKafka\Admin\DescribeConfigsOptions $options = null
+ ): \RdKafka\Admin\ConfigResourceResult[]
+
+
+
Parameters
+
resources \RdKafka\Admin\ConfigResource[]
+
options ?\RdKafka\Admin\DescribeConfigsOptions
+
Returns
+
\RdKafka\Admin\ConfigResourceResult[]
+
+

fromConf()

+
public static fromConf ( 
+    \RdKafka\Conf $conf
+ ): self
+
+
+
Parameters
+
conf \RdKafka\Conf
+
Returns
+
self
+
+

fromConsumer()

+
public static fromConsumer ( 
+    \RdKafka\Consumer $consumer
+ ): self
+
+
+
Parameters
+
consumer \RdKafka\Consumer
+
Returns
+
self
+
+

fromProducer()

+
public static fromProducer ( 
+    \RdKafka\Producer $producer
+ ): self
+
+
+
Parameters
+
producer \RdKafka\Producer
+
Returns
+
self
+
+

getMetadata()

+
public getMetadata ( 
+    bool $all_topics, 
+    ?\RdKafka\Topic $only_topic, 
+    int $timeout_ms
+ ): \RdKafka\Metadata
+
+
+
Parameters
+
all_topics bool
+
only_topic ?\RdKafka\Topic
+
timeout_ms int
+
Returns
+
\RdKafka\Metadata
+
+

newAlterConfigsOptions()

+
public newAlterConfigsOptions (  ): \RdKafka\Admin\AlterConfigsOptions
+
+
+
Returns
+
\RdKafka\Admin\AlterConfigsOptions
+
+

newCreatePartitionsOptions()

+
public newCreatePartitionsOptions (  ): \RdKafka\Admin\CreatePartitionsOptions
+
+
+
Returns
+
\RdKafka\Admin\CreatePartitionsOptions
+
+

newCreateTopicsOptions()

+
public newCreateTopicsOptions (  ): \RdKafka\Admin\CreateTopicsOptions
+
+
+
Returns
+
\RdKafka\Admin\CreateTopicsOptions
+
+

newDeleteConsumerGroupOffsetsOptions()

+
public newDeleteConsumerGroupOffsetsOptions (  ): \RdKafka\Admin\DeleteConsumerGroupOffsetsOptions
+
+
+
Returns
+
\RdKafka\Admin\DeleteConsumerGroupOffsetsOptions
+
+

newDeleteGroupsOptions()

+
public newDeleteGroupsOptions (  ): \RdKafka\Admin\DeleteGroupsOptions
+
+
+
Returns
+
\RdKafka\Admin\DeleteGroupsOptions
+
+

newDeleteRecordsOptions()

+
public newDeleteRecordsOptions (  ): \RdKafka\Admin\DeleteRecordsOptions
+
+
+
Returns
+
\RdKafka\Admin\DeleteRecordsOptions
+
+

newDeleteTopicsOptions()

+
public newDeleteTopicsOptions (  ): \RdKafka\Admin\DeleteTopicsOptions
+
+
+
Returns
+
\RdKafka\Admin\DeleteTopicsOptions
+
+

newDescribeConfigsOptions()

+
public newDescribeConfigsOptions (  ): \RdKafka\Admin\DescribeConfigsOptions
+
+
+
Returns
+
\RdKafka\Admin\DescribeConfigsOptions
+
+

setWaitForResultEventTimeout()

+
public setWaitForResultEventTimeout ( 
+    int $timeoutMs
+ ): void
+
+
+
Parameters
+
timeoutMs int
+
+

Test Coverage 💛

+
    +
  • 💛 + Lines: 83.51% (162 / 194)
  • +
  • 💛 + Methods: 79.17% (19 / 24)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/ConfigEntry/index.html b/api/RdKafka/Admin/ConfigEntry/index.html new file mode 100644 index 00000000..f474aaf3 --- /dev/null +++ b/api/RdKafka/Admin/ConfigEntry/index.html @@ -0,0 +1,3086 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ConfigEntry - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class ConfigEntry

+

Class \RdKafka\Admin\ConfigEntry

+

Properties

+

isDefault

+
 public bool isDefault
+
+

isReadOnly

+
 public bool isReadOnly
+
+

isSensitive

+
 public bool isSensitive
+
+

isSynonym

+
 public bool isSynonym
+
+

name

+
 public string name
+
+

source

+
 public int source
+
+

synonyms

+
 public \RdKafka\Admin\ConfigEntry[] synonyms
+
+

value

+
 public ?string value
+
+

Methods

+

__construct()

+
public __construct ( 
+    \FFI\CData $entry
+ ): 
+
+
+
Parameters
+
entry \FFI\CData
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (13 / 13)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/ConfigResource/index.html b/api/RdKafka/Admin/ConfigResource/index.html new file mode 100644 index 00000000..6be4f5cd --- /dev/null +++ b/api/RdKafka/Admin/ConfigResource/index.html @@ -0,0 +1,2964 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ConfigResource - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class ConfigResource

+

Class \RdKafka\Admin\ConfigResource

+

Methods

+

__construct()

+
public __construct ( 
+    int $type, 
+    string $name
+ ): 
+
+
+
Parameters
+
type int
+
name string
+
+

__destruct()

+
public __destruct (  ): 
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

setConfig()

+
public setConfig ( 
+    string $name, 
+    string $value
+ ): void
+
+
+
Parameters
+
name string
+
value string
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 92.31% (12 / 13)
  • +
  • 💛 + Methods: 75% (3 / 4)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/ConfigResourceResult/index.html b/api/RdKafka/Admin/ConfigResourceResult/index.html new file mode 100644 index 00000000..c6b015da --- /dev/null +++ b/api/RdKafka/Admin/ConfigResourceResult/index.html @@ -0,0 +1,3023 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ConfigResourceResult - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class ConfigResourceResult

+

Class \RdKafka\Admin\ConfigResourceResult

+

Properties

+

configs

+
 public \RdKafka\Admin\ConfigEntry[] configs
+
+

error

+
 public int error
+
+

errorString

+
 public ?string errorString
+
+

name

+
 public string name
+
+

type

+
 public int type
+
+

Methods

+

__construct()

+
public __construct ( 
+    \FFI\CData $result
+ ): 
+
+
+
Parameters
+
result \FFI\CData
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (10 / 10)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/CreatePartitionsOptions/index.html b/api/RdKafka/Admin/CreatePartitionsOptions/index.html new file mode 100644 index 00000000..8f8b0fe9 --- /dev/null +++ b/api/RdKafka/Admin/CreatePartitionsOptions/index.html @@ -0,0 +1,2890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CreatePartitionsOptions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class CreatePartitionsOptions

+

Class \RdKafka\Admin\CreatePartitionsOptions

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (1 / 1)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/CreateTopicsOptions/index.html b/api/RdKafka/Admin/CreateTopicsOptions/index.html new file mode 100644 index 00000000..fe39308a --- /dev/null +++ b/api/RdKafka/Admin/CreateTopicsOptions/index.html @@ -0,0 +1,2890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CreateTopicsOptions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class CreateTopicsOptions

+

Class \RdKafka\Admin\CreateTopicsOptions

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (1 / 1)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DeleteConsumerGroupOffsets/index.html b/api/RdKafka/Admin/DeleteConsumerGroupOffsets/index.html new file mode 100644 index 00000000..9956e877 --- /dev/null +++ b/api/RdKafka/Admin/DeleteConsumerGroupOffsets/index.html @@ -0,0 +1,2935 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeleteConsumerGroupOffsets - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DeleteConsumerGroupOffsets

+

Class \RdKafka\Admin\DeleteConsumerGroupOffsets

+

Methods

+

__construct()

+
public __construct ( 
+    string $group, 
+    \RdKafka\TopicPartition $partitions
+ ): 
+
+
+
Parameters
+
group string
+
partitions \RdKafka\TopicPartition
+
+

__destruct()

+
public __destruct (  ): 
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

Test Coverage ❤️

+
    +
  • ❤️ + Lines: 0% (0 / 10)
  • +
  • ❤️ + Methods: 0% (0 / 3)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DeleteConsumerGroupOffsetsOptions/index.html b/api/RdKafka/Admin/DeleteConsumerGroupOffsetsOptions/index.html new file mode 100644 index 00000000..60146e5b --- /dev/null +++ b/api/RdKafka/Admin/DeleteConsumerGroupOffsetsOptions/index.html @@ -0,0 +1,2890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeleteConsumerGroupOffsetsOptions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DeleteConsumerGroupOffsetsOptions

+

Class \RdKafka\Admin\DeleteConsumerGroupOffsetsOptions

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
+

Test Coverage ❤️

+
    +
  • ❤️ + Lines: 0% (0 / 1)
  • +
  • ❤️ + Methods: 0% (0 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DeleteGroup/index.html b/api/RdKafka/Admin/DeleteGroup/index.html new file mode 100644 index 00000000..82cc7e00 --- /dev/null +++ b/api/RdKafka/Admin/DeleteGroup/index.html @@ -0,0 +1,2933 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeleteGroup - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DeleteGroup

+

Class \RdKafka\Admin\DeleteGroup

+

Methods

+

__construct()

+
public __construct ( 
+    string $name
+ ): 
+
+
+
Parameters
+
name string
+
+

__destruct()

+
public __destruct (  ): 
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

Test Coverage ❤️

+
    +
  • ❤️ + Lines: 0% (0 / 8)
  • +
  • ❤️ + Methods: 0% (0 / 3)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DeleteGroupsOptions/index.html b/api/RdKafka/Admin/DeleteGroupsOptions/index.html new file mode 100644 index 00000000..4ec5dcc3 --- /dev/null +++ b/api/RdKafka/Admin/DeleteGroupsOptions/index.html @@ -0,0 +1,2890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeleteGroupsOptions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DeleteGroupsOptions

+

Class \RdKafka\Admin\DeleteGroupsOptions

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
+

Test Coverage ❤️

+
    +
  • ❤️ + Lines: 0% (0 / 1)
  • +
  • ❤️ + Methods: 0% (0 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DeleteRecords/index.html b/api/RdKafka/Admin/DeleteRecords/index.html new file mode 100644 index 00000000..456ea808 --- /dev/null +++ b/api/RdKafka/Admin/DeleteRecords/index.html @@ -0,0 +1,2933 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeleteRecords - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DeleteRecords

+

Class \RdKafka\Admin\DeleteRecords

+

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka\TopicPartition $beforeOffsets
+ ): 
+
+
+
Parameters
+
beforeOffsets \RdKafka\TopicPartition
+
+

__destruct()

+
public __destruct (  ): 
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

Test Coverage 🧡

+
    +
  • 🧡 + Lines: 70% (7 / 10)
  • +
  • ❤️ + Methods: 33.33% (1 / 3)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DeleteRecordsOptions/index.html b/api/RdKafka/Admin/DeleteRecordsOptions/index.html new file mode 100644 index 00000000..503b944a --- /dev/null +++ b/api/RdKafka/Admin/DeleteRecordsOptions/index.html @@ -0,0 +1,2890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeleteRecordsOptions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DeleteRecordsOptions

+

Class \RdKafka\Admin\DeleteRecordsOptions

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (1 / 1)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DeleteTopic/index.html b/api/RdKafka/Admin/DeleteTopic/index.html new file mode 100644 index 00000000..8880b4dd --- /dev/null +++ b/api/RdKafka/Admin/DeleteTopic/index.html @@ -0,0 +1,2933 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeleteTopic - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DeleteTopic

+

Class \RdKafka\Admin\DeleteTopic

+

Methods

+

__construct()

+
public __construct ( 
+    string $name
+ ): 
+
+
+
Parameters
+
name string
+
+

__destruct()

+
public __destruct (  ): 
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

Test Coverage 🧡

+
    +
  • 🧡 + Lines: 62.5% (5 / 8)
  • +
  • ❤️ + Methods: 33.33% (1 / 3)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DeleteTopicsOptions/index.html b/api/RdKafka/Admin/DeleteTopicsOptions/index.html new file mode 100644 index 00000000..58ab7461 --- /dev/null +++ b/api/RdKafka/Admin/DeleteTopicsOptions/index.html @@ -0,0 +1,2890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DeleteTopicsOptions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DeleteTopicsOptions

+

Class \RdKafka\Admin\DeleteTopicsOptions

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (1 / 1)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/DescribeConfigsOptions/index.html b/api/RdKafka/Admin/DescribeConfigsOptions/index.html new file mode 100644 index 00000000..89e2ad47 --- /dev/null +++ b/api/RdKafka/Admin/DescribeConfigsOptions/index.html @@ -0,0 +1,2890 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DescribeConfigsOptions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DescribeConfigsOptions

+

Class \RdKafka\Admin\DescribeConfigsOptions

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (1 / 1)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/GroupResult/index.html b/api/RdKafka/Admin/GroupResult/index.html new file mode 100644 index 00000000..96d9d53c --- /dev/null +++ b/api/RdKafka/Admin/GroupResult/index.html @@ -0,0 +1,3002 @@ + + + + + + + + + + + + + + + + + + + + + + + + + GroupResult - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class GroupResult

+

Class \RdKafka\Admin\GroupResult

+

Properties

+

error

+
 public int error
+
+

errorString

+
 public ?string errorString
+
+

name

+
 public ?string name
+
+

partitions

+
 public array partitions
+
+

Methods

+

__construct()

+
public __construct ( 
+    \FFI\CData $result
+ ): 
+
+
+
Parameters
+
result \FFI\CData
+
+

Test Coverage ❤️

+
    +
  • ❤️ + Lines: 0% (0 / 7)
  • +
  • ❤️ + Methods: 0% (0 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/NewPartitions/index.html b/api/RdKafka/Admin/NewPartitions/index.html new file mode 100644 index 00000000..9a09dc11 --- /dev/null +++ b/api/RdKafka/Admin/NewPartitions/index.html @@ -0,0 +1,2964 @@ + + + + + + + + + + + + + + + + + + + + + + + + + NewPartitions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class NewPartitions

+

Class \RdKafka\Admin\NewPartitions

+

Methods

+

__construct()

+
public __construct ( 
+    string $topicName, 
+    int $new_total_cnt
+ ): 
+
+
+
Parameters
+
topicName string
+
new_total_cnt int
+
+

__destruct()

+
public __destruct (  ): 
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

setReplicaAssignment()

+
public setReplicaAssignment ( 
+    int $new_partition_id, 
+    int[] $broker_ids
+ ): void
+
+
+
Parameters
+
new_partition_id int
+
broker_ids int[]
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 91.89% (34 / 37)
  • +
  • ❤️ + Methods: 25% (1 / 4)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/NewTopic/index.html b/api/RdKafka/Admin/NewTopic/index.html new file mode 100644 index 00000000..b98c1441 --- /dev/null +++ b/api/RdKafka/Admin/NewTopic/index.html @@ -0,0 +1,2995 @@ + + + + + + + + + + + + + + + + + + + + + + + + + NewTopic - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class NewTopic

+

Class \RdKafka\Admin\NewTopic

+

Methods

+

__construct()

+
public __construct ( 
+    string $name, 
+    int $num_partitions, 
+    int $replication_factor
+ ): 
+
+
+
Parameters
+
name string
+
num_partitions int
+
replication_factor int
+
+

__destruct()

+
public __destruct (  ): 
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

setConfig()

+
public setConfig ( 
+    string $name, 
+    string $value
+ ): void
+
+
+
Parameters
+
name string
+
value string
+
+

setReplicaAssignment()

+
public setReplicaAssignment ( 
+    int $partition_id, 
+    array $broker_ids
+ ): void
+
+
+
Parameters
+
partition_id int
+
broker_ids array
+
+

Test Coverage 💛

+
    +
  • 💛 + Lines: 85.37% (35 / 41)
  • +
  • ❤️ + Methods: 20% (1 / 5)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/Options/index.html b/api/RdKafka/Admin/Options/index.html new file mode 100644 index 00000000..42bbb724 --- /dev/null +++ b/api/RdKafka/Admin/Options/index.html @@ -0,0 +1,3072 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Options - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + +

Class Options

+

abstract Class \RdKafka\Admin\Options

+

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka $kafka, 
+    int $for_api
+ ): 
+
+
+
Parameters
+
kafka \RdKafka
+
for_api int
+
+

__destruct()

+
public __destruct (  ): 
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

setBrokerId()

+
public setBrokerId ( 
+    int $broker_id
+ ): void
+
+
+
Parameters
+
broker_id int
+
+

setOperationTimeout()

+
public setOperationTimeout ( 
+    int $timeout_ms
+ ): void
+
+
+
Parameters
+
timeout_ms int
+
+

setRequestTimeout()

+
public setRequestTimeout ( 
+    int $timeout_ms
+ ): void
+
+
+
Parameters
+
timeout_ms int
+
+

setValidateOnly()

+
public setValidateOnly ( 
+    bool $true_or_false
+ ): void
+
+
+
Parameters
+
true_or_false bool
+
+

Test Coverage 💛

+
    +
  • 💛 + Lines: 90% (36 / 40)
  • +
  • ❤️ + Methods: 42.86% (3 / 7)
  • +
+

Extended by

+ + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Admin/TopicResult/index.html b/api/RdKafka/Admin/TopicResult/index.html new file mode 100644 index 00000000..8f8bdd0c --- /dev/null +++ b/api/RdKafka/Admin/TopicResult/index.html @@ -0,0 +1,2981 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TopicResult - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class TopicResult

+

Class \RdKafka\Admin\TopicResult

+

Properties

+

error

+
 public int error
+
+

errorString

+
 public ?string errorString
+
+

name

+
 public string name
+
+

Methods

+

__construct()

+
public __construct ( 
+    \FFI\CData $result
+ ): 
+
+
+
Parameters
+
result \FFI\CData
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (3 / 3)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Conf/index.html b/api/RdKafka/Conf/index.html new file mode 100644 index 00000000..e3c1e02c --- /dev/null +++ b/api/RdKafka/Conf/index.html @@ -0,0 +1,3277 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Conf - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Class Conf

+

Class \RdKafka\Conf

+ +

Methods

+

__construct()

+
public __construct (  ): 
+
+

__destruct()

+
public __destruct (  ): 
+
+

dump()

+
public dump (  ): array
+
+
+
Returns
+
array
+
+

get()

+
public get ( 
+    string $name
+ ): string|null
+
+
+
Parameters
+
name string
+
Returns
+
string|null
+
+

getCData()

+
public getCData (  ): \FFI\CData
+
+
+
Returns
+
\FFI\CData
+
+

set()

+
public set ( 
+    string $name, 
+    string $value
+ ): void
+
+

Setting non string values like callbacks or default_topic_conf TopicConf objects is not supported.

+

For callbacks and default_topic_conf use corresponding methods directly.

+
+
Parameters
+
name string
+
value string
+
+ +

setDefaultTopicConf()

+
public setDefaultTopicConf ( 
+    \RdKafka\TopicConf $topic_conf
+ ): void
+
+
+
Parameters
+
topic_conf \RdKafka\TopicConf
+
+
+

Deprecated

+

Set custom TopicConf explicitly in Producer::newTopic(), Consumer::newTopic() or KafkaConsumer::newTopic().

+
+

Note: Topic config properties can be also set directly via Conf.

+ +

setDrMsgCb()

+
public setDrMsgCb ( 
+    callable $callback
+ ): void
+
+
+
Parameters
+
callback callable function(Producer $producer, Message $message, ?mixed $opaque = null)
+
+

setErrorCb()

+
public setErrorCb ( 
+    callable $callback
+ ): void
+
+
+
Parameters
+
callback callable function($consumerOrProducer, int $err, string $reason, ?mixed $opaque = null)
+
+

setLogCb()

+
public setLogCb ( 
+    ?callable $callback
+ ): void
+
+
+
Parameters
+
callback ?callable function($consumerOrProducer, int $level, string $facility, string $message) or null to disable logging
+
+

setOffsetCommitCb()

+
public setOffsetCommitCb ( 
+    callable $callback
+ ): void
+
+
+
Parameters
+
callback callable function(KafkaConsumer $consumer, int $err, array $topicPartitions, ?mixed $opaque = null)
+
+

setOpaque()

+
public setOpaque ( 
+    mixed $opaque
+ ): void
+
+
+
Parameters
+
opaque mixed
+
+

setRebalanceCb()

+
public setRebalanceCb ( 
+    callable $callback
+ ): void
+
+
+
Parameters
+
callback callable function(KafkaConsumer $consumer, int $err, array $topicPartitions, ?mixed $opaque = null)
+
+

setStatsCb()

+
public setStatsCb ( 
+    callable $callback
+ ): void
+
+
+
Parameters
+
callback callable function($consumerOrProducer, string $json, int $jsonLength, ?mixed $opaque = null)
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 95.31% (61 / 64)
  • +
  • 💛 + Methods: 85.71% (12 / 14)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Consumer/index.html b/api/RdKafka/Consumer/index.html new file mode 100644 index 00000000..643306f3 --- /dev/null +++ b/api/RdKafka/Consumer/index.html @@ -0,0 +1,3027 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Consumer - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class Consumer

+

Class \RdKafka\Consumer

+ +

Methods

+

__construct()

+
public __construct ( 
+    ?\RdKafka\Conf $conf = null
+ ): 
+
+
+
Parameters
+
conf ?\RdKafka\Conf
+
+

addBrokers()

+
public addBrokers ( 
+    string $broker_list
+ ): int
+
+
+
Parameters
+
broker_list string
+
Returns
+
int
+
+

getOutQLen()

+
public getOutQLen (  ): int
+
+
+
Returns
+
int
+
+

newQueue()

+
public newQueue (  ): \RdKafka\Queue
+
+
+
Returns
+
\RdKafka\Queue
+
+

newTopic()

+
public newTopic ( 
+    string $topic_name, 
+    ?\RdKafka\TopicConf $topic_conf = null
+ ): \RdKafka\ConsumerTopic
+
+
+
Parameters
+
topic_name string
+
topic_conf ?\RdKafka\TopicConf
+
Returns
+
\RdKafka\ConsumerTopic
+
+

poll()

+
public poll ( 
+    int $timeout_ms
+ ): int
+
+
+
Parameters
+
timeout_ms int
+
Returns
+
int
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (6 / 6)
  • +
  • 💚 + Methods: 100% (6 / 6)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/ConsumerTopic/index.html b/api/RdKafka/ConsumerTopic/index.html new file mode 100644 index 00000000..063ccfd4 --- /dev/null +++ b/api/RdKafka/ConsumerTopic/index.html @@ -0,0 +1,3132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ConsumerTopic - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + +

Class ConsumerTopic

+

Class \RdKafka\ConsumerTopic

+ +

Methods

+

__construct()

+
public __construct ( 
+    \RdKafka\Consumer $consumer, 
+    string $name, 
+    ?\RdKafka\TopicConf $conf = null
+ ): 
+
+
+
Parameters
+
consumer \RdKafka\Consumer
+
name string
+
conf ?\RdKafka\TopicConf
+
+

__destruct()

+
public __destruct (  ): 
+
+

consume()

+
public consume ( 
+    int $partition, 
+    int $timeout_ms
+ ): ?\RdKafka\Message
+
+
+
Parameters
+
partition int
+
timeout_ms int
+
Returns
+
?\RdKafka\Message
+
+

consumeBatch()

+
public consumeBatch ( 
+    int $partition, 
+    int $timeout_ms, 
+    int $batch_size
+ ): \RdKafka\Message[]
+
+
+
Parameters
+
partition int
+
timeout_ms int
+
batch_size int
+
Returns
+
\RdKafka\Message[]
+
+

consumeCallback()

+
public consumeCallback ( 
+    int $partition, 
+    int $timeout_ms, 
+    callable $callback, 
+    mixed $opaque = null
+ ): int
+
+
+
Parameters
+
partition int
+
timeout_ms int
+
callback callable
+
opaque mixed
+
Returns
+
int
+
+
+

Deprecated

+

since 1.4.0 librdkafka

+
+

consumeQueueStart()

+
public consumeQueueStart ( 
+    int $partition, 
+    int $offset, 
+    \RdKafka\Queue $queue
+ ): void
+
+
+
Parameters
+
partition int
+
offset int
+
queue \RdKafka\Queue
+
+

consumeStart()

+
public consumeStart ( 
+    int $partition, 
+    int $offset
+ ): void
+
+
+
Parameters
+
partition int
+
offset int
+
+

consumeStop()

+
public consumeStop ( 
+    int $partition
+ ): void
+
+
+
Parameters
+
partition int
+
+

offsetStore()

+
public offsetStore ( 
+    int $partition, 
+    int $offset
+ ): void
+
+
+
Parameters
+
partition int
+
offset int
+
+

Test Coverage 💛

+
    +
  • 💛 + Lines: 74.55% (82 / 110)
  • +
  • ❤️ + Methods: 27.27% (3 / 11)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Event/index.html b/api/RdKafka/Event/index.html new file mode 100644 index 00000000..a6e4675c --- /dev/null +++ b/api/RdKafka/Event/index.html @@ -0,0 +1,3056 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Event - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class Event

+

Class \RdKafka\Event

+

Methods

+

__construct()

+
public __construct ( 
+    \FFI\CData $event
+ ): 
+
+
+
Parameters
+
event \FFI\CData
+
+

__destruct()

+
public __destruct (  ): 
+
+

error()

+
public error (  ): int
+
+
+
Returns
+
int
+
+

errorIsFatal()

+
public errorIsFatal (  ): bool
+
+
+
Returns
+
bool
+
+

errorString()

+
public errorString (  ): string
+
+
+
Returns
+
string
+
+

getCData()

+
public getCData (  ): mixed
+
+
+
Returns
+
mixed
+
+

name()

+
public name (  ): string
+
+
+
Returns
+
string
+
+

type()

+
public type (  ): int
+
+
+
Returns
+
int
+
+

Test Coverage 💛

+
    +
  • 💛 + Lines: 90% (9 / 10)
  • +
  • 💛 + Methods: 87.5% (7 / 8)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Exception/index.html b/api/RdKafka/Exception/index.html new file mode 100644 index 00000000..bbce50e4 --- /dev/null +++ b/api/RdKafka/Exception/index.html @@ -0,0 +1,2943 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Exception - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class Exception

+

Class \RdKafka\Exception

+
    +
  • extends \Exception
  • +
+

Methods

+

__construct()

+
public __construct ( 
+    mixed $message = '', 
+    mixed $code, 
+    ?\Throwable $previous = null
+ ): 
+
+
+
Parameters
+
message mixed
+
code mixed
+
previous ?\Throwable
+
+

fromError()

+
public static fromError ( 
+    int $code
+ ): self
+
+
+
Parameters
+
code int
+
Returns
+
self
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (2 / 2)
  • +
  • 💚 + Methods: 100% (2 / 2)
  • +
+

Extended by

+ + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/CallbackProxy/index.html b/api/RdKafka/FFI/CallbackProxy/index.html new file mode 100644 index 00000000..301872af --- /dev/null +++ b/api/RdKafka/FFI/CallbackProxy/index.html @@ -0,0 +1,2918 @@ + + + + + + + + + + + + + + + + + + + + + + + + + CallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class CallbackProxy

+

abstract Class \RdKafka\FFI\CallbackProxy

+

Methods

+

create()

+
public static create ( 
+    callable $callback
+ ): \Closure
+
+
+
Parameters
+
callback callable
+
Returns
+
\Closure
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (4 / 4)
  • +
  • 💚 + Methods: 100% (2 / 2)
  • +
+

Extended by

+ + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/ConsumeCallbackProxy/index.html b/api/RdKafka/FFI/ConsumeCallbackProxy/index.html new file mode 100644 index 00000000..03e79ee2 --- /dev/null +++ b/api/RdKafka/FFI/ConsumeCallbackProxy/index.html @@ -0,0 +1,2892 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ConsumeCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class ConsumeCallbackProxy

+

Class \RdKafka\FFI\ConsumeCallbackProxy

+ +

Methods

+

__invoke()

+
public __invoke ( 
+    \FFI\CData $nativeMessage, 
+    ?\FFI\CData $opaque = null
+ ): void
+
+
+
Parameters
+
nativeMessage \FFI\CData
+
opaque ?\FFI\CData
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (4 / 4)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/DrMsgCallbackProxy/index.html b/api/RdKafka/FFI/DrMsgCallbackProxy/index.html new file mode 100644 index 00000000..57655ef8 --- /dev/null +++ b/api/RdKafka/FFI/DrMsgCallbackProxy/index.html @@ -0,0 +1,2894 @@ + + + + + + + + + + + + + + + + + + + + + + + + + DrMsgCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class DrMsgCallbackProxy

+

Class \RdKafka\FFI\DrMsgCallbackProxy

+ +

Methods

+

__invoke()

+
public __invoke ( 
+    \FFI\CData $producer, 
+    \FFI\CData $nativeMessage, 
+    ?\FFI\CData $opaque = null
+ ): void
+
+
+
Parameters
+
producer \FFI\CData
+
nativeMessage \FFI\CData
+
opaque ?\FFI\CData
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (5 / 5)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/ErrorCallbackProxy/index.html b/api/RdKafka/FFI/ErrorCallbackProxy/index.html new file mode 100644 index 00000000..93f15678 --- /dev/null +++ b/api/RdKafka/FFI/ErrorCallbackProxy/index.html @@ -0,0 +1,2896 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ErrorCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class ErrorCallbackProxy

+

Class \RdKafka\FFI\ErrorCallbackProxy

+ +

Methods

+

__invoke()

+
public __invoke ( 
+    \FFI\CData $consumerOrProducer, 
+    int $err, 
+    string $reason, 
+    ?\FFI\CData $opaque = null
+ ): void
+
+
+
Parameters
+
consumerOrProducer \FFI\CData
+
err int
+
reason string
+
opaque ?\FFI\CData
+
+

Test Coverage 💚

+
    +
  • 💚 + Lines: 100% (6 / 6)
  • +
  • 💚 + Methods: 100% (1 / 1)
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/Library/index.html b/api/RdKafka/FFI/Library/index.html new file mode 100644 index 00000000..17d26c66 --- /dev/null +++ b/api/RdKafka/FFI/Library/index.html @@ -0,0 +1,22509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Library - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Class Library

+

Class \RdKafka\FFI\Library

+ +

This class is for internal use. It provides access to the low level interface to librdkafka.

+

Best practice is to use high level interfaces like \RdKafka\Conf, \RdKafka\Producer, …

+ +

Constants

+

PHP_LIBRARY_VERSION

+
public PHP_LIBRARY_VERSION = '0.6.0'
+
+

VERSION_AUTODETECT

+
public VERSION_AUTODETECT = ''
+
+

VERSION_LATEST

+
public VERSION_LATEST = '2.3.0'
+
+

Methods

+

cast()

+
public static cast ( 
+    mixed $type, 
+    \FFI\CData $ptr
+ ): \FFI\CData
+
+
+
Parameters
+
type mixed
+
ptr \FFI\CData
+
Returns
+
\FFI\CData
+
+

getClientVersion()

+
public static getClientVersion (  ): string
+
+

The client version exposed to the brokers.

+

The version has the format v{phpLibraryVersion}-v{bindingVersion}-librdkafka-v{librdkafkaVersion} - e.g. v0.1.0-dev-v1.5.2-librdkafka-v1.5.2)

+
+
Returns
+
string
+
+

getFFI()

+
public static getFFI (  ): \FFI
+
+
+
Returns
+
\FFI
+
+

getLibrary()

+
public static getLibrary (  ): string
+
+
+
Returns
+
string
+
+

getLibraryVersion()

+
public static getLibraryVersion (  ): string
+
+

The version of librdkafka

+
+
Returns
+
string
+
+

getVersion()

+
public static getVersion (  ): string
+
+

The version of the current binding with librdkafka

+
+
Returns
+
string
+
+

hasMethod()

+
public static hasMethod ( 
+    string $name
+ ): bool
+
+

Whether method is supported by current binding version.

+
+
Parameters
+
name string
+
Returns
+
bool
+
+

init()

+
public static init ( 
+    string $version = self::VERSION_AUTODETECT, 
+    string $scope = 'RdKafka', 
+    ?string $library = null, 
+    ?string $cdef = null
+ ): void
+
+
+
Parameters
+
version string
+
scope string
+
library ?string
+
cdef ?string
+
+

new()

+
public static new ( 
+    string|\FFI\CData|mixed $type, 
+    bool $owned = true, 
+    bool $persistent = false
+ ): \FFI\CData
+
+
+
Parameters
+
type string|\FFI\CData|mixed
+
owned bool
+
persistent bool
+
Returns
+
\FFI\CData
+
+

preload()

+
public static preload ( 
+    string $version = self::VERSION_AUTODETECT, 
+    string $scope = 'RdKafka', 
+    ?string $library = null, 
+    ?string $cdef = null
+ ): \FFI
+
+
+
Parameters
+
version string
+
scope string
+
library ?string
+
cdef ?string
+
Returns
+
\FFI
+
+

rd_kafka_AclBindingFilter_new()

+
public static rd_kafka_AclBindingFilter_new ( 
+    int $restype, 
+    string|null $name, 
+    int $resource_pattern_type, 
+    string|null $principal, 
+    string|null $host, 
+    int $operation, 
+    int $permission_type, 
+    \FFI\CData|null $errstr, 
+    int|null $errstr_size
+ ): \FFI\CData|null
+
+

Create a new AclBindingFilter object. This object is later passed to rd_kafka_DescribeAcls() or rd_kafka_DeletesAcls() in order to filter the acls to retrieve or to delete. Use the same rd_kafka_AclBinding functions to query or destroy it.

+ +
+
Parameters
+
restype int rd_kafka_ResourceType_t - The ResourceType or RD_KAFKA_RESOURCE_ANY if not filtering by this field.
+
name string|null const char* - The resource name or NULL if not filtering by this field.
+
resource_pattern_type int rd_kafka_ResourcePatternType_t - The pattern type or RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field.
+
principal string|null const char* - A principal or NULL if not filtering by this field.
+
host string|null const char* - An hostname or ip or NULL if not filtering by this field.
+
operation int rd_kafka_AclOperation_t - A Kafka operation or RD_KAFKA_ACL_OPERATION_ANY if not filtering by this field.
+
permission_type int rd_kafka_AclPermissionType_t - A Kafka permission type or RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field.
+
errstr \FFI\CData|null char* - An error string for returning errors or NULL to not use it.
+
errstr_size int|null size_t - The errstr size or 0 to not use it.
+
Returns
+
\FFI\CData|null rd_kafka_AclBindingFilter_t* - a new allocated AclBindingFilter object, or NULL if the input parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when done.
+
+ +

rd_kafka_AclBinding_destroy()

+
public static rd_kafka_AclBinding_destroy ( 
+    \FFI\CData|null $acl_binding
+ ): void
+
+
+
Parameters
+
acl_binding \FFI\CData|null rd_kafka_AclBinding_t*
+
+

rd_kafka_AclBinding_destroy_array()

+
public static rd_kafka_AclBinding_destroy_array ( 
+    \FFI\CData|null $acl_bindings, 
+    int|null $acl_bindings_cnt
+ ): void
+
+
+
Parameters
+
acl_bindings \FFI\CData|null rd_kafka_AclBinding_t**
+
acl_bindings_cnt int|null size_t
+
+

rd_kafka_AclBinding_error()

+
public static rd_kafka_AclBinding_error ( 
+    \FFI\CData|null $acl
+ ): \FFI\CData|null
+
+
+
Parameters
+
acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
+
Returns
+
\FFI\CData|null const rd_kafka_error_t* - the error object for the given acl binding, or NULL on success.
+
+ +

rd_kafka_AclBinding_host()

+
public static rd_kafka_AclBinding_host ( 
+    \FFI\CData|null $acl
+ ): string|null
+
+
Remarks
lifetime of the returned string is the same as the acl.
+ +
+
Parameters
+
acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
+
Returns
+
string|null const char* - the host for the given acl binding.
+
+ +

rd_kafka_AclBinding_name()

+
public static rd_kafka_AclBinding_name ( 
+    \FFI\CData|null $acl
+ ): string|null
+
+
Remarks
lifetime of the returned string is the same as the acl.
+ +
+
Parameters
+
acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
+
Returns
+
string|null const char* - the resource name for the given acl binding.
+
+ +

rd_kafka_AclBinding_new()

+
public static rd_kafka_AclBinding_new ( 
+    int $restype, 
+    string|null $name, 
+    int $resource_pattern_type, 
+    string|null $principal, 
+    string|null $host, 
+    int $operation, 
+    int $permission_type, 
+    \FFI\CData|null $errstr, 
+    int|null $errstr_size
+ ): \FFI\CData|null
+
+

Create a new AclBinding object. This object is later passed to rd_kafka_CreateAcls().

+ +
+
Parameters
+
restype int rd_kafka_ResourceType_t - The ResourceType.
+
name string|null const char* - The resource name.
+
resource_pattern_type int rd_kafka_ResourcePatternType_t - The pattern type.
+
principal string|null const char* - A principal, following the kafka specification.
+
host string|null const char* - An hostname or ip.
+
operation int rd_kafka_AclOperation_t - A Kafka operation.
+
permission_type int rd_kafka_AclPermissionType_t - A Kafka permission type.
+
errstr \FFI\CData|null char* - An error string for returning errors or NULL to not use it.
+
errstr_size int|null size_t - The errstr size or 0 to not use it.
+
Returns
+
\FFI\CData|null rd_kafka_AclBinding_t* - a new allocated AclBinding object, or NULL if the input parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when done.
+
+ +

rd_kafka_AclBinding_operation()

+
public static rd_kafka_AclBinding_operation ( 
+    \FFI\CData|null $acl
+ ): int
+
+
+
Parameters
+
acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
+
Returns
+
int rd_kafka_AclOperation_t - the acl operation for the given acl binding.
+
+ +

rd_kafka_AclBinding_permission_type()

+
public static rd_kafka_AclBinding_permission_type ( 
+    \FFI\CData|null $acl
+ ): int
+
+
+
Parameters
+
acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
+
Returns
+
int rd_kafka_AclPermissionType_t - the permission type for the given acl binding.
+
+ +

rd_kafka_AclBinding_principal()

+
public static rd_kafka_AclBinding_principal ( 
+    \FFI\CData|null $acl
+ ): string|null
+
+
Remarks
lifetime of the returned string is the same as the acl.
+ +
+
Parameters
+
acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
+
Returns
+
string|null const char* - the principal for the given acl binding.
+
+ +

rd_kafka_AclBinding_resource_pattern_type()

+
public static rd_kafka_AclBinding_resource_pattern_type ( 
+    \FFI\CData|null $acl
+ ): int
+
+
+
Parameters
+
acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
+
Returns
+
int rd_kafka_ResourcePatternType_t - the resource pattern type for the given acl binding.
+
+ +

rd_kafka_AclBinding_restype()

+
public static rd_kafka_AclBinding_restype ( 
+    \FFI\CData|null $acl
+ ): int
+
+
+
Parameters
+
acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
+
Returns
+
int rd_kafka_ResourceType_t - the resource type for the given acl binding.
+
+ +

rd_kafka_AclOperation_name()

+
public static rd_kafka_AclOperation_name ( 
+    int $acl_operation
+ ): string|null
+
+
+
Parameters
+
acl_operation int rd_kafka_AclOperation_t - )
+
Returns
+
string|null const char* - a string representation of the acl_operation
+
+ +

rd_kafka_AclPermissionType_name()

+
public static rd_kafka_AclPermissionType_name ( 
+    int $acl_permission_type
+ ): string|null
+
+
+
Parameters
+
acl_permission_type int rd_kafka_AclPermissionType_t - )
+
Returns
+
string|null const char* - a string representation of the acl_permission_type
+
+ +

rd_kafka_AdminOptions_destroy()

+
public static rd_kafka_AdminOptions_destroy ( 
+    \FFI\CData|null $options
+ ): void
+
+
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t*
+
+

rd_kafka_AdminOptions_new()

+
public static rd_kafka_AdminOptions_new ( 
+    \FFI\CData|null $rk, 
+    int $for_api
+ ): \FFI\CData|null
+
+

Create a new AdminOptions object.

+
   The options object is not modified by the Admin API request APIs,
+   (e.g. CreateTopics) and may be reused for multiple calls.
+
+
+ +
+
Parameters
+
rk \FFI\CData|null rd_kafka_t* - Client instance.
+
for_api int rd_kafka_admin_op_t - Specifies what Admin API this AdminOptions object will be used for, which will enforce what AdminOptions_set_..() calls may be used based on the API, causing unsupported set..() calls to fail. Specifying RD_KAFKA_ADMIN_OP_ANY disables the enforcement allowing any option to be set, even if the option is not used in a future call to an Admin API method.
+
Returns
+
\FFI\CData|null rd_kafka_AdminOptions_t* - a new AdminOptions object (which must be freed with rd_kafka_AdminOptions_destroy()), or NULL if for_api was set to an unknown API op type.
+
+ +

rd_kafka_AdminOptions_set_broker()

+
public static rd_kafka_AdminOptions_set_broker ( 
+    \FFI\CData|null $options, 
+    int|null $broker_id, 
+    \FFI\CData|null $errstr, 
+    int|null $errstr_size
+ ): int
+
+

Override what broker the Admin request will be sent to.

+

By default, Admin requests are sent to the controller broker, with the following exceptions:

+
    +
  • AlterConfigs with a BROKER resource are sent to the broker id set as the resource name.
  • +
  • IncrementalAlterConfigs with a BROKER resource are sent to the broker id set as the resource name.
  • +
  • DescribeConfigs with a BROKER resource are sent to the broker id set as the resource name.
  • +
+ +
Remarks
This API should typically not be used, but serves as a workaround if new resource types are to the broker that the client does not know where to send.
+ +
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin Options.
+
broker_id int|null int32_t - The broker to send the request to.
+
errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
+
errstr_size int|null size_t
+
Returns
+
int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure in which case an error string will be written errstr.
+
+ +

rd_kafka_AdminOptions_set_include_authorized_operations()

+
public static rd_kafka_AdminOptions_set_include_authorized_operations ( 
+    \FFI\CData|null $options, 
+    int|null $true_or_false
+ ): \FFI\CData|null
+
+
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t*
+
true_or_false int|null int
+
Returns
+
\FFI\CData|null rd_kafka_error_t*
+
+

rd_kafka_AdminOptions_set_isolation_level()

+
public static rd_kafka_AdminOptions_set_isolation_level ( 
+    \FFI\CData|null $options, 
+    int $value
+ ): \FFI\CData|null
+
+
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t*
+
value int rd_kafka_IsolationLevel_t
+
Returns
+
\FFI\CData|null rd_kafka_error_t*
+
+

rd_kafka_AdminOptions_set_match_consumer_group_states()

+
public static rd_kafka_AdminOptions_set_match_consumer_group_states ( 
+    \FFI\CData|null $options, 
+    \FFI\CData|null $consumer_group_states, 
+    int|null $consumer_group_states_cnt
+ ): \FFI\CData|null
+
+

Set consumer groups states to query for.

+ +
Remarks
This option is valid for ListConsumerGroups.
+ +
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
+
consumer_group_states \FFI\CData|null const rd_kafka_consumer_group_state_t* - Array of consumer group states.
+
consumer_group_states_cnt int|null size_t - Size of the consumer_group_states array.
+
Returns
+
\FFI\CData|null rd_kafka_error_t* - NULL on success, a new error instance that must be released with rd_kafka_error_destroy() in case of error.
+
+ +

rd_kafka_AdminOptions_set_opaque()

+
public static rd_kafka_AdminOptions_set_opaque ( 
+    \FFI\CData|null $options, 
+    \FFI\CData|object|string|null $opaque
+ ): void
+
+
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t*
+
opaque \FFI\CData|object|string|null void*
+
+

rd_kafka_AdminOptions_set_operation_timeout()

+
public static rd_kafka_AdminOptions_set_operation_timeout ( 
+    \FFI\CData|null $options, 
+    int|null $timeout_ms, 
+    \FFI\CData|null $errstr, 
+    int|null $errstr_size
+ ): int
+
+

Sets the broker's operation timeout, such as the timeout for CreateTopics to complete the creation of topics on the controller before returning a result to the application.

+

CreateTopics: values <= 0 will return immediately after triggering topic creation, while > 0 will wait this long for topic creation to propagate in cluster. Default: 60 seconds.

+

DeleteTopics: same semantics as CreateTopics. CreatePartitions: same semantics as CreateTopics.

+ +
Remarks
This option is valid for CreateTopics, DeleteTopics, CreatePartitions, and DeleteRecords.
+ +
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
+
timeout_ms int|null int - Timeout in milliseconds.
+
errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
+
errstr_size int|null size_t
+
Returns
+
int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which case an error string will be written errstr.
+
+ +

rd_kafka_AdminOptions_set_request_timeout()

+
public static rd_kafka_AdminOptions_set_request_timeout ( 
+    \FFI\CData|null $options, 
+    int|null $timeout_ms, 
+    \FFI\CData|null $errstr, 
+    int|null $errstr_size
+ ): int
+
+

Sets the overall request timeout, including broker lookup, request transmission, operation time on broker, and response.

+ +
Remarks
This option is valid for all Admin API requests.
+ +
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
+
timeout_ms int|null int - Timeout in milliseconds. Defaults to socket.timeout.ms.
+
errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
+
errstr_size int|null size_t
+
Returns
+
int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which case an error string will be written errstr.
+
+ +

rd_kafka_AdminOptions_set_require_stable_offsets()

+
public static rd_kafka_AdminOptions_set_require_stable_offsets ( 
+    \FFI\CData|null $options, 
+    int|null $true_or_false
+ ): \FFI\CData|null
+
+

Whether broker should return stable offsets (transaction-committed).

+ +
Remarks
This option is valid for ListConsumerGroupOffsets.
+ +
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
+
true_or_false int|null int - Defaults to false.
+
Returns
+
\FFI\CData|null rd_kafka_error_t* - NULL on success, a new error instance that must be released with rd_kafka_error_destroy() in case of error.
+
+ +

rd_kafka_AdminOptions_set_validate_only()

+
public static rd_kafka_AdminOptions_set_validate_only ( 
+    \FFI\CData|null $options, 
+    int|null $true_or_false, 
+    \FFI\CData|null $errstr, 
+    int|null $errstr_size
+ ): int
+
+

Tell broker to only validate the request, without performing the requested operation (create topics, etc).

+ +
Remarks
This option is valid for CreateTopics, CreatePartitions, AlterConfigs.
+ +
+
Parameters
+
options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
+
true_or_false int|null int - Defaults to false.
+
errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
+
errstr_size int|null size_t
+
Returns
+
int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure in which case an error string will be written errstr.
+
+ +

rd_kafka_AlterConfigs()

+
public static rd_kafka_AlterConfigs ( 
+    \FFI\CData|null $rk, 
+    \FFI\CData|null $configs, 
+    int|null $config_cnt, 
+    \FFI\CData|null $options, 
+    \FFI\CData|null $rkqu
+ ): void
+
+

Update the configuration for the specified resources. Updates are not transactional so they may succeed for a subset of the provided resources while the others fail. The configuration for a particular resource is updated atomically, replacing values using the provided ConfigEntrys and reverting unspecified ConfigEntrys to their default values.

+
Remarks
Requires broker version >=0.11.0.0
+
Warning
AlterConfigs will replace all existing configuration for the provided resources with the new configuration given, reverting all other configuration to their default values.
+
Remarks
Multiple resources and resource types may be set, but at most one resource of type RD_KAFKA_RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource.
+
Deprecated:
Use rd_kafka_IncrementalAlterConfigs().
+ +
+
Parameters
+
rk \FFI\CData|null rd_kafka_t*
+
configs \FFI\CData|null rd_kafka_ConfigResource_t**
+
config_cnt int|null size_t
+
options \FFI\CData|null const rd_kafka_AdminOptions_t*
+
rkqu \FFI\CData|null rd_kafka_queue_t*
+
+ +

rd_kafka_AlterConfigs_result_resources()

+
public static rd_kafka_AlterConfigs_result_resources ( 
+    \FFI\CData|null $result, 
+    \FFI\CData|null $cntp
+ ): \FFI\CData|null
+
+

Get an array of resource results from a AlterConfigs result.

+

Use rd_kafka_ConfigResource_error() and rd_kafka_ConfigResource_error_string() to extract per-resource error results on the returned array elements.

+

The returned object life-times are the same as the result object.

+ +
+
Parameters
+
result \FFI\CData|null const rd_kafka_AlterConfigs_result_t* - Result object to get resource results from.
+
cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
+
Returns
+
\FFI\CData|null const rd_kafka_ConfigResource_t** - an array of ConfigResource elements, or NULL if not available.
+
+ +

rd_kafka_AlterConsumerGroupOffsets()

+
public static rd_kafka_AlterConsumerGroupOffsets ( 
+    \FFI\CData|null $rk, 
+    \FFI\CData|null $alter_grpoffsets, 
+    int|null $alter_grpoffsets_cnt, 
+    \FFI\CData|null $options, 
+    \FFI\CData|null $rkqu
+ ): void
+
+

Alter committed offsets for a set of partitions in a consumer group. This will succeed at the partition level only if the group is not actively subscribed to the corresponding topic.

+ +
Remarks
The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
+
+The current implementation only supports one group per invocation.
+ +
+
Parameters
+
rk \FFI\CData|null rd_kafka_t* - Client instance.
+
alter_grpoffsets \FFI\CData|null rd_kafka_AlterConsumerGroupOffsets_t** - Array of group committed offsets to alter. MUST only be one single element.
+
alter_grpoffsets_cnt int|null size_t - Number of elements in alter_grpoffsets array. MUST always be 1.
+
options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
+
rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
+
+ +

rd_kafka_AlterConsumerGroupOffsets_destroy()

+
public static rd_kafka_AlterConsumerGroupOffsets_destroy ( 
+    \FFI\CData|null $alter_grpoffsets
+ ): void
+
+
+
Parameters
+
alter_grpoffsets \FFI\CData|null rd_kafka_AlterConsumerGroupOffsets_t*
+
+

rd_kafka_AlterConsumerGroupOffsets_destroy_array()

+
public static rd_kafka_AlterConsumerGroupOffsets_destroy_array ( 
+    \FFI\CData|null $alter_grpoffsets, 
+    int|null $alter_grpoffset_cnt
+ ): void
+
+
+
Parameters
+
alter_grpoffsets \FFI\CData|null rd_kafka_AlterConsumerGroupOffsets_t**
+
alter_grpoffset_cnt int|null size_t
+
+

rd_kafka_AlterConsumerGroupOffsets_new()

+
public static rd_kafka_AlterConsumerGroupOffsets_new ( 
+    string|null $group_id, 
+    \FFI\CData|null $partitions
+ ): \FFI\CData|null
+
+

Create a new AlterConsumerGroupOffsets object. This object is later passed to rd_kafka_AlterConsumerGroupOffsets().

+ +
+
Parameters
+
group_id string|null const char* - Consumer group id.
+
partitions \FFI\CData|null const rd_kafka_topic_partition_list_t* - Partitions to alter committed offsets for. Only the topic and partition fields are used.
+
Returns
+
\FFI\CData|null rd_kafka_AlterConsumerGroupOffsets_t* - a new allocated AlterConsumerGroupOffsets object. Use rd_kafka_AlterConsumerGroupOffsets_destroy() to free object when done.
+
+ +

rd_kafka_AlterConsumerGroupOffsets_result_groups()

+
public static rd_kafka_AlterConsumerGroupOffsets_result_groups ( 
+    \FFI\CData|null $result, 
+    \FFI\CData|null $cntp
+ ): \FFI\CData|null
+
+

Get an array of results from a AlterConsumerGroupOffsets result.

+

The returned groups life-time is the same as the result object.

+ +
Remarks
The lifetime of the returned memory is the same as the lifetime of the result object.
+ +
+
Parameters
+
result \FFI\CData|null const rd_kafka_AlterConsumerGroupOffsets_result_t* - Result to get group results from.
+
cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
+
Returns
+
\FFI\CData|null const rd_kafka_group_result_t**
+
+ +

rd_kafka_AlterUserScramCredentials()

+
public static rd_kafka_AlterUserScramCredentials ( 
+    \FFI\CData|null $rk, 
+    \FFI\CData|null $alterations, 
+    int|null $alteration_cnt, 
+    \FFI\CData|null $options, 
+    \FFI\CData|null $rkqu
+ ): void
+
+

Alter SASL/SCRAM credentials. This operation is supported by brokers with version 2.7.0 or higher.

+
Remarks
For upsertions to be processed, librdkfka must be build with OpenSSL support. It's needed to calculate the HMAC.
+ +
+
Parameters
+
rk \FFI\CData|null rd_kafka_t* - Client instance.
+
alterations \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t** - The alterations to be applied.
+
alteration_cnt int|null size_t - Number of elements in alterations array.
+
options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
+
rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
+
+ +

rd_kafka_AlterUserScramCredentials_result_response_error()

+
public static rd_kafka_AlterUserScramCredentials_result_response_error ( 
+    \FFI\CData|null $response
+ ): \FFI\CData|null
+
+
+
Parameters
+
response \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_response_t*
+
Returns
+
\FFI\CData|null const rd_kafka_error_t*
+
+

rd_kafka_AlterUserScramCredentials_result_response_user()

+
public static rd_kafka_AlterUserScramCredentials_result_response_user ( 
+    \FFI\CData|null $response
+ ): string|null
+
+
+
Parameters
+
response \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_response_t*
+
Returns
+
string|null const char*
+
+

rd_kafka_AlterUserScramCredentials_result_responses()

+
public static rd_kafka_AlterUserScramCredentials_result_responses ( 
+    \FFI\CData|null $result, 
+    \FFI\CData|null $cntp
+ ): \FFI\CData|null
+
+

Get an array of responses from a AlterUserScramCredentials result.

+

The returned value life-time is the same as the result object.

+ +
+
Parameters
+
result \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_t* - Result to get responses from.
+
cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
+
Returns
+
\FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_response_t**
+
+ +

rd_kafka_ConfigEntry_is_default()

+
public static rd_kafka_ConfigEntry_is_default ( 
+    \FFI\CData|null $entry
+ ): int|null
+
+
Remarks
Shall only be used on a DescribeConfigs result, otherwise returns -1.
+ +
+
Parameters
+
entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
+
Returns
+
int|null int - 1 if the config property is set to its default value on the broker, else 0.
+
+ +

rd_kafka_ConfigEntry_is_read_only()

+
public static rd_kafka_ConfigEntry_is_read_only ( 
+    \FFI\CData|null $entry
+ ): int|null
+
+
Remarks
Shall only be used on a DescribeConfigs result, otherwise returns -1.
+ +
+
Parameters
+
entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
+
Returns
+
int|null int - 1 if the config property is read-only on the broker, else 0.
+
+ +

rd_kafka_ConfigEntry_is_sensitive()

+
public static rd_kafka_ConfigEntry_is_sensitive ( 
+    \FFI\CData|null $entry
+ ): int|null
+
+
Remarks
An application should take care not to include the value of sensitive configuration entries in its output.
+
+Shall only be used on a DescribeConfigs result, otherwise returns -1.
+ +
+
Parameters
+
entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
+
Returns
+
int|null int - 1 if the config property contains sensitive information (such as security configuration), else 0.
+
+ +

rd_kafka_ConfigEntry_is_synonym()

+
public static rd_kafka_ConfigEntry_is_synonym ( 
+    \FFI\CData|null $entry
+ ): int|null
+
+
+
Parameters
+
entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
+
Returns
+
int|null int - 1 if this entry is a synonym, else 0.
+
+ +

rd_kafka_ConfigEntry_name()

+
public static rd_kafka_ConfigEntry_name ( 
+    \FFI\CData|null $entry
+ ): string|null
+
+
+
Parameters
+
entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
+
Returns
+
string|null const char* - the configuration property name
+
+ +

rd_kafka_ConfigEntry_source()

+
public static rd_kafka_ConfigEntry_source ( 
+    \FFI\CData|null $entry
+ ): int
+
+
+
Parameters
+
entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
+
Returns
+
int rd_kafka_ConfigSource_t - the config source.
+
+ +

rd_kafka_ConfigEntry_synonyms()

+
public static rd_kafka_ConfigEntry_synonyms ( 
+    \FFI\CData|null $entry, 
+    \FFI\CData|null $cntp
+ ): \FFI\CData|null
+
+
Remarks
The lifetime of the returned entry is the same as conf .
+
+Shall only be used on a DescribeConfigs result, otherwise returns NULL.
+ +
+
Parameters
+
entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - Entry to get synonyms for.
+
cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
+
Returns
+
\FFI\CData|null const rd_kafka_ConfigEntry_t** - the synonym config entry array.
+
+ +

rd_kafka_ConfigEntry_value()

+
public static rd_kafka_ConfigEntry_value ( 
+    \FFI\CData|null $entry
+ ): string|null
+
+
+
Parameters
+
entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
+
Returns
+
string|null const char* - the configuration value, may be NULL for sensitive or unset properties.
+
+ +

rd_kafka_ConfigResource_add_incremental_config()

+
public static rd_kafka_ConfigResource_add_incremental_config ( 
+    \FFI\CData|null $config, 
+    string|null $name, 
+    int $op_type, 
+    string|null $value
+ ): \FFI\CData|null
+
+

Add the value of the configuration entry for a subsequent incremental alter config operation. APPEND and SUBTRACT are possible for list-type configuration entries only.

+ +
+
Parameters
+
config \FFI\CData|null rd_kafka_ConfigResource_t* - ConfigResource to add config property to.
+
name string|null const char* - Configuration name, depends on resource type.
+
op_type int rd_kafka_AlterConfigOpType_t - Operation type, one of rd_kafka_AlterConfigOpType_t.
+
value string|null const char* - Configuration value, depends on resource type and name. Set to NULL, only with with op_type set to DELETE, to revert configuration value to default.
+
Returns
+
\FFI\CData|null rd_kafka_error_t* - NULL on success, or an rd_kafka_error_t * with the corresponding error code and string. Error ownership belongs to the caller. Possible error codes:
    +
    +
  • RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input.
  • + + +

    rd_kafka_ConfigResource_configs()

    +
    public static rd_kafka_ConfigResource_configs ( 
    +    \FFI\CData|null $config, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of config entries from a ConfigResource object.

    +

    The returned object life-times are the same as the config object.

    + +
    +
    Parameters
    +
    config \FFI\CData|null const rd_kafka_ConfigResource_t* - ConfigResource to get configs from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_ConfigEntry_t**
    +
    + +

    rd_kafka_ConfigResource_destroy()

    +
    public static rd_kafka_ConfigResource_destroy ( 
    +    \FFI\CData|null $config
    + ): void
    +
    +
    +
    Parameters
    +
    config \FFI\CData|null rd_kafka_ConfigResource_t*
    +
    +

    rd_kafka_ConfigResource_destroy_array()

    +
    public static rd_kafka_ConfigResource_destroy_array ( 
    +    \FFI\CData|null $config, 
    +    int|null $config_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    config \FFI\CData|null rd_kafka_ConfigResource_t**
    +
    config_cnt int|null size_t
    +
    +

    rd_kafka_ConfigResource_error()

    +
    public static rd_kafka_ConfigResource_error ( 
    +    \FFI\CData|null $config
    + ): int
    +
    +
    +
    Parameters
    +
    config \FFI\CData|null const rd_kafka_ConfigResource_t* - )
    +
    Returns
    +
    int rd_kafka_resp_err_t - the error for this resource from an AlterConfigs request
    +
    + +

    rd_kafka_ConfigResource_error_string()

    +
    public static rd_kafka_ConfigResource_error_string ( 
    +    \FFI\CData|null $config
    + ): string|null
    +
    +
    +
    Parameters
    +
    config \FFI\CData|null const rd_kafka_ConfigResource_t* - )
    +
    Returns
    +
    string|null const char* - the error string for this resource from an AlterConfigs request, or NULL if no error.
    +
    + +

    rd_kafka_ConfigResource_name()

    +
    public static rd_kafka_ConfigResource_name ( 
    +    \FFI\CData|null $config
    + ): string|null
    +
    +
    +
    Parameters
    +
    config \FFI\CData|null const rd_kafka_ConfigResource_t* - )
    +
    Returns
    +
    string|null const char* - the name for config
    +
    + +

    rd_kafka_ConfigResource_new()

    +
    public static rd_kafka_ConfigResource_new ( 
    +    int $restype, 
    +    string|null $resname
    + ): \FFI\CData|null
    +
    +

    Create new ConfigResource object.

    + +
    +
    Parameters
    +
    restype int rd_kafka_ResourceType_t - The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC)
    +
    resname string|null const char* - The resource name (e.g., the topic name)
    +
    Returns
    +
    \FFI\CData|null rd_kafka_ConfigResource_t* - a newly allocated object
    +
    + +

    rd_kafka_ConfigResource_set_config()

    +
    public static rd_kafka_ConfigResource_set_config ( 
    +    \FFI\CData|null $config, 
    +    string|null $name, 
    +    string|null $value
    + ): int
    +
    +

    Set configuration name value pair.

    + +

    This will overwrite the current value.

    + +
    +
    Parameters
    +
    config \FFI\CData|null rd_kafka_ConfigResource_t* - ConfigResource to set config property on.
    +
    name string|null const char* - Configuration name, depends on resource type.
    +
    value string|null const char* - Configuration value, depends on resource type and name. Set to NULL to revert configuration value to default.
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if config was added to resource, or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input.
    +
    + +

    rd_kafka_ConfigResource_type()

    +
    public static rd_kafka_ConfigResource_type ( 
    +    \FFI\CData|null $config
    + ): int
    +
    +
    +
    Parameters
    +
    config \FFI\CData|null const rd_kafka_ConfigResource_t* - )
    +
    Returns
    +
    int rd_kafka_ResourceType_t - the ResourceType for config
    +
    + +

    rd_kafka_ConfigSource_name()

    +
    public static rd_kafka_ConfigSource_name ( 
    +    int $confsource
    + ): string|null
    +
    +
    +
    Parameters
    +
    confsource int rd_kafka_ConfigSource_t - )
    +
    Returns
    +
    string|null const char* - a string representation of the confsource.
    +
    + +

    rd_kafka_ConsumerGroupDescription_authorized_operations()

    +
    public static rd_kafka_ConsumerGroupDescription_authorized_operations ( 
    +    \FFI\CData|null $grpdesc, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_AclOperation_t*
    +
    +

    rd_kafka_ConsumerGroupDescription_coordinator()

    +
    public static rd_kafka_ConsumerGroupDescription_coordinator ( 
    +    \FFI\CData|null $grpdesc
    + ): \FFI\CData|null
    +
    +

    Gets the coordinator for the grpdesc group.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
    + +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_Node_t* - The group coordinator.
    +
    + +

    rd_kafka_ConsumerGroupDescription_error()

    +
    public static rd_kafka_ConsumerGroupDescription_error ( 
    +    \FFI\CData|null $grpdesc
    + ): \FFI\CData|null
    +
    +

    Gets the error for the grpdesc group.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
    + +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_error_t* - The group description error.
    +
    + +

    rd_kafka_ConsumerGroupDescription_group_id()

    +
    public static rd_kafka_ConsumerGroupDescription_group_id ( 
    +    \FFI\CData|null $grpdesc
    + ): string|null
    +
    +

    Gets the group id for the grpdesc group.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
    + +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
    +
    Returns
    +
    string|null const char* - The group id.
    +
    + +

    rd_kafka_ConsumerGroupDescription_is_simple_consumer_group()

    +
    public static rd_kafka_ConsumerGroupDescription_is_simple_consumer_group ( 
    +    \FFI\CData|null $grpdesc
    + ): int|null
    +
    +

    Is the grpdesc group a simple consumer group.

    + +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
    +
    Returns
    +
    int|null int - 1 if the group is a simple consumer group, else 0.
    +
    + +

    rd_kafka_ConsumerGroupDescription_member()

    +
    public static rd_kafka_ConsumerGroupDescription_member ( 
    +    \FFI\CData|null $grpdesc, 
    +    int|null $idx
    + ): \FFI\CData|null
    +
    +

    Gets a member of grpdesc group.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
    + +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - The group description.
    +
    idx int|null size_t - The member idx.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_MemberDescription_t* - A member at index idx, or NULL if idx is out of range.
    +
    + +

    rd_kafka_ConsumerGroupDescription_member_count()

    +
    public static rd_kafka_ConsumerGroupDescription_member_count ( 
    +    \FFI\CData|null $grpdesc
    + ): int|null
    +
    +

    Gets the members count of grpdesc group.

    + +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
    +
    Returns
    +
    int|null size_t - The member count.
    +
    + +

    rd_kafka_ConsumerGroupDescription_partition_assignor()

    +
    public static rd_kafka_ConsumerGroupDescription_partition_assignor ( 
    +    \FFI\CData|null $grpdesc
    + ): string|null
    +
    +

    Gets the partition assignor for the grpdesc group.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
    + +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
    +
    Returns
    +
    string|null const char* - The partition assignor.
    +
    + +

    rd_kafka_ConsumerGroupDescription_state()

    +
    public static rd_kafka_ConsumerGroupDescription_state ( 
    +    \FFI\CData|null $grpdesc
    + ): int
    +
    +

    Gets state for the grpdesc group.

    + +
    +
    Parameters
    +
    grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
    +
    Returns
    +
    int rd_kafka_consumer_group_state_t - A group state.
    +
    + +

    rd_kafka_ConsumerGroupListing_group_id()

    +
    public static rd_kafka_ConsumerGroupListing_group_id ( 
    +    \FFI\CData|null $grplist
    + ): string|null
    +
    +

    Gets the group id for the grplist group.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the grplist object.
    + +
    +
    Parameters
    +
    grplist \FFI\CData|null const rd_kafka_ConsumerGroupListing_t* - ) - The group listing.
    +
    Returns
    +
    string|null const char* - The group id.
    +
    + +

    rd_kafka_ConsumerGroupListing_is_simple_consumer_group()

    +
    public static rd_kafka_ConsumerGroupListing_is_simple_consumer_group ( 
    +    \FFI\CData|null $grplist
    + ): int|null
    +
    +

    Is the grplist group a simple consumer group.

    + +
    +
    Parameters
    +
    grplist \FFI\CData|null const rd_kafka_ConsumerGroupListing_t* - ) - The group listing.
    +
    Returns
    +
    int|null int - 1 if the group is a simple consumer group, else 0.
    +
    + +

    rd_kafka_ConsumerGroupListing_state()

    +
    public static rd_kafka_ConsumerGroupListing_state ( 
    +    \FFI\CData|null $grplist
    + ): int
    +
    +

    Gets state for the grplist group.

    + +
    +
    Parameters
    +
    grplist \FFI\CData|null const rd_kafka_ConsumerGroupListing_t* - ) - The group listing.
    +
    Returns
    +
    int rd_kafka_consumer_group_state_t - A group state.
    +
    + +

    rd_kafka_CreateAcls()

    +
    public static rd_kafka_CreateAcls ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $new_acls, 
    +    int|null $new_acls_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Create acls as specified by the new_acls array of size new_topic_cnt elements.

    + +

    Supported admin options:

    +
      +
    • rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
    • +
    +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATEACLS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    new_acls \FFI\CData|null rd_kafka_AclBinding_t** - Array of new acls to create.
    +
    new_acls_cnt int|null size_t - Number of elements in new_acls array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_CreateAcls_result_acls()

    +
    public static rd_kafka_CreateAcls_result_acls ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of acl results from a CreateAcls result.

    +

    The returned acl result life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_CreateAcls_result_t* - CreateAcls result to get acl results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_acl_result_t**
    +
    + +

    rd_kafka_CreatePartitions()

    +
    public static rd_kafka_CreatePartitions ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $new_parts, 
    +    int|null $new_parts_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Create additional partitions for the given topics, as specified by the new_parts array of size new_parts_cnt elements.

    + +

    Supported admin options:

    +
      +
    • rd_kafka_AdminOptions_set_validate_only() - default false
    • +
    • rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds
    • +
    • rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
    • +
    +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    new_parts \FFI\CData|null rd_kafka_NewPartitions_t** - Array of topics for which new partitions are to be created.
    +
    new_parts_cnt int|null size_t - Number of elements in new_parts array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_CreatePartitions_result_topics()

    +
    public static rd_kafka_CreatePartitions_result_topics ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of topic results from a CreatePartitions result.

    +

    The returned topics life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_CreatePartitions_result_t* - Result o get topic results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_topic_result_t**
    +
    + +

    rd_kafka_CreateTopics()

    +
    public static rd_kafka_CreateTopics ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $new_topics, 
    +    int|null $new_topic_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Create topics in cluster as specified by the new_topics array of size new_topic_cnt elements.

    + +

    Supported admin options:

    +
      +
    • rd_kafka_AdminOptions_set_validate_only() - default false
    • +
    • rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds
    • +
    • rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
    • +
    +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATETOPICS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    new_topics \FFI\CData|null rd_kafka_NewTopic_t** - Array of new topics to create.
    +
    new_topic_cnt int|null size_t - Number of elements in new_topics array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_CreateTopics_result_topics()

    +
    public static rd_kafka_CreateTopics_result_topics ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of topic results from a CreateTopics result.

    +

    The returned topics life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_CreateTopics_result_t* - Result to get topics from.
    +
    cntp \FFI\CData|null size_t* - Updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_topic_result_t**
    +
    + +

    rd_kafka_DeleteAcls()

    +
    public static rd_kafka_DeleteAcls ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $del_acls, 
    +    int|null $del_acls_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Delete acls matching the filteres provided in del_acls array of size del_acls_cnt.

    + +

    Supported admin options:

    +
      +
    • rd_kafka_AdminOptions_set_operation_timeout() - default 0
    • +
    +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETEACLS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    del_acls \FFI\CData|null rd_kafka_AclBindingFilter_t** - Filters for the acls to delete.
    +
    del_acls_cnt int|null size_t - Number of elements in del_acls array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_DeleteAcls_result_response_error()

    +
    public static rd_kafka_DeleteAcls_result_response_error ( 
    +    \FFI\CData|null $result_response
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    result_response \FFI\CData|null const rd_kafka_DeleteAcls_result_response_t* - )
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_error_t* - the error object for the given DeleteAcls result response, or NULL on success.
    +
    + +

    rd_kafka_DeleteAcls_result_response_matching_acls()

    +
    public static rd_kafka_DeleteAcls_result_response_matching_acls ( 
    +    \FFI\CData|null $result_response, 
    +    \FFI\CData|null $matching_acls_cntp
    + ): \FFI\CData|null
    +
    +
    Remarks
    lifetime of the returned acl bindings is the same as the result_response.
    + +
    +
    Parameters
    +
    result_response \FFI\CData|null const rd_kafka_DeleteAcls_result_response_t*
    +
    matching_acls_cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_AclBinding_t** - the matching acls array for the given DeleteAcls result response.
    +
    + +

    rd_kafka_DeleteAcls_result_responses()

    +
    public static rd_kafka_DeleteAcls_result_responses ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of DeleteAcls result responses from a DeleteAcls result.

    +

    The returned responses life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DeleteAcls_result_t* - DeleteAcls result to get responses from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_DeleteAcls_result_response_t**
    +
    + +

    rd_kafka_DeleteConsumerGroupOffsets()

    +
    public static rd_kafka_DeleteConsumerGroupOffsets ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $del_grpoffsets, 
    +    int|null $del_grpoffsets_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Delete committed offsets for a set of partitions in a consumer group. This will succeed at the partition level only if the group is not actively subscribed to the corresponding topic.

    + +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
    +
    +The current implementation only supports one group per invocation.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    del_grpoffsets \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t** - Array of group committed offsets to delete. MUST only be one single element.
    +
    del_grpoffsets_cnt int|null size_t - Number of elements in del_grpoffsets array. MUST always be 1.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_DeleteConsumerGroupOffsets_destroy()

    +
    public static rd_kafka_DeleteConsumerGroupOffsets_destroy ( 
    +    \FFI\CData|null $del_grpoffsets
    + ): void
    +
    +
    +
    Parameters
    +
    del_grpoffsets \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t*
    +
    +

    rd_kafka_DeleteConsumerGroupOffsets_destroy_array()

    +
    public static rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( 
    +    \FFI\CData|null $del_grpoffsets, 
    +    int|null $del_grpoffset_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    del_grpoffsets \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t**
    +
    del_grpoffset_cnt int|null size_t
    +
    +

    rd_kafka_DeleteConsumerGroupOffsets_new()

    +
    public static rd_kafka_DeleteConsumerGroupOffsets_new ( 
    +    string|null $group, 
    +    \FFI\CData|null $partitions
    + ): \FFI\CData|null
    +
    +

    Create a new DeleteConsumerGroupOffsets object. This object is later passed to rd_kafka_DeleteConsumerGroupOffsets().

    + +
    +
    Parameters
    +
    group string|null const char* - Consumer group id.
    +
    partitions \FFI\CData|null const rd_kafka_topic_partition_list_t* - Partitions to delete committed offsets for. Only the topic and partition fields are used.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t* - a new allocated DeleteConsumerGroupOffsets object. Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free object when done.
    +
    + +

    rd_kafka_DeleteConsumerGroupOffsets_result_groups()

    +
    public static rd_kafka_DeleteConsumerGroupOffsets_result_groups ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of results from a DeleteConsumerGroupOffsets result.

    +

    The returned groups life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DeleteConsumerGroupOffsets_result_t* - Result to get group results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_group_result_t**
    +
    + +

    rd_kafka_DeleteGroup_destroy()

    +
    public static rd_kafka_DeleteGroup_destroy ( 
    +    \FFI\CData|null $del_group
    + ): void
    +
    +
    +
    Parameters
    +
    del_group \FFI\CData|null rd_kafka_DeleteGroup_t*
    +
    +

    rd_kafka_DeleteGroup_destroy_array()

    +
    public static rd_kafka_DeleteGroup_destroy_array ( 
    +    \FFI\CData|null $del_groups, 
    +    int|null $del_group_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    del_groups \FFI\CData|null rd_kafka_DeleteGroup_t**
    +
    del_group_cnt int|null size_t
    +
    +

    rd_kafka_DeleteGroup_new()

    +
    public static rd_kafka_DeleteGroup_new ( 
    +    string|null $group
    + ): \FFI\CData|null
    +
    +

    Create a new DeleteGroup object. This object is later passed to rd_kafka_DeleteGroups().

    + +
    +
    Parameters
    +
    group string|null const char* - ) - Name of group to delete.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_DeleteGroup_t* - a new allocated DeleteGroup object. Use rd_kafka_DeleteGroup_destroy() to free object when done.
    +
    + +

    rd_kafka_DeleteGroups()

    +
    public static rd_kafka_DeleteGroups ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $del_groups, 
    +    int|null $del_group_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Delete groups from cluster as specified by the del_groups array of size del_group_cnt elements.

    + +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETEGROUPS_RESULT
    +
    +This function in called deleteConsumerGroups in the Java client.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    del_groups \FFI\CData|null rd_kafka_DeleteGroup_t** - Array of groups to delete.
    +
    del_group_cnt int|null size_t - Number of elements in del_groups array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_DeleteGroups_result_groups()

    +
    public static rd_kafka_DeleteGroups_result_groups ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of group results from a DeleteGroups result.

    +

    The returned groups life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DeleteGroups_result_t* - Result to get group results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_group_result_t**
    +
    + +

    rd_kafka_DeleteRecords()

    +
    public static rd_kafka_DeleteRecords ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $del_records, 
    +    int|null $del_record_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Delete records (messages) in topic partitions older than the offsets provided.

    + +

    Supported admin options:

    +
      +
    • rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds. Controls how long the brokers will wait for records to be deleted.
    • +
    • rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms. Controls how long rdkafka will wait for the request to complete.
    • +
    +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETERECORDS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    del_records \FFI\CData|null rd_kafka_DeleteRecords_t** - The offsets to delete (up to). Currently only one DeleteRecords_t (but containing multiple offsets) is supported.
    +
    del_record_cnt int|null size_t - The number of elements in del_records, must be 1.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_DeleteRecords_destroy()

    +
    public static rd_kafka_DeleteRecords_destroy ( 
    +    \FFI\CData|null $del_records
    + ): void
    +
    +
    +
    Parameters
    +
    del_records \FFI\CData|null rd_kafka_DeleteRecords_t*
    +
    +

    rd_kafka_DeleteRecords_destroy_array()

    +
    public static rd_kafka_DeleteRecords_destroy_array ( 
    +    \FFI\CData|null $del_records, 
    +    int|null $del_record_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    del_records \FFI\CData|null rd_kafka_DeleteRecords_t**
    +
    del_record_cnt int|null size_t
    +
    +

    rd_kafka_DeleteRecords_new()

    +
    public static rd_kafka_DeleteRecords_new ( 
    +    \FFI\CData|null $before_offsets
    + ): \FFI\CData|null
    +
    +

    Create a new DeleteRecords object. This object is later passed to rd_kafka_DeleteRecords().

    +

    before_offsets must contain topic, partition, and offset is the offset before which the messages will be deleted (exclusive). Set offset to RD_KAFKA_OFFSET_END (high-watermark) in order to delete all data in the partition.

    + +
    +
    Parameters
    +
    before_offsets \FFI\CData|null const rd_kafka_topic_partition_list_t* - ) - For each partition delete all messages up to but not including the specified offset.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_DeleteRecords_t* - a new allocated DeleteRecords object. Use rd_kafka_DeleteRecords_destroy() to free object when done.
    +
    + +

    rd_kafka_DeleteRecords_result_offsets()

    +
    public static rd_kafka_DeleteRecords_result_offsets ( 
    +    \FFI\CData|null $result
    + ): \FFI\CData|null
    +
    +

    Get a list of topic and partition results from a DeleteRecords result. The returned objects will contain topic, partition, offset and err. offset will be set to the post-deletion low-watermark (smallest available offset of all live replicas). err will be set per-partition if deletion failed.

    +

    The returned object's life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DeleteRecords_result_t* - )
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_topic_partition_list_t*
    +
    + +

    rd_kafka_DeleteTopic_destroy()

    +
    public static rd_kafka_DeleteTopic_destroy ( 
    +    \FFI\CData|null $del_topic
    + ): void
    +
    +
    +
    Parameters
    +
    del_topic \FFI\CData|null rd_kafka_DeleteTopic_t*
    +
    +

    rd_kafka_DeleteTopic_destroy_array()

    +
    public static rd_kafka_DeleteTopic_destroy_array ( 
    +    \FFI\CData|null $del_topics, 
    +    int|null $del_topic_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    del_topics \FFI\CData|null rd_kafka_DeleteTopic_t**
    +
    del_topic_cnt int|null size_t
    +
    +

    rd_kafka_DeleteTopic_new()

    +
    public static rd_kafka_DeleteTopic_new ( 
    +    string|null $topic
    + ): \FFI\CData|null
    +
    +

    Create a new DeleteTopic object. This object is later passed to rd_kafka_DeleteTopics().

    + +
    +
    Parameters
    +
    topic string|null const char* - ) - Topic name to delete.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_DeleteTopic_t* - a new allocated DeleteTopic object. Use rd_kafka_DeleteTopic_destroy() to free object when done.
    +
    + +

    rd_kafka_DeleteTopics()

    +
    public static rd_kafka_DeleteTopics ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $del_topics, 
    +    int|null $del_topic_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Delete topics from cluster as specified by the topics array of size topic_cnt elements.

    + +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETETOPICS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    del_topics \FFI\CData|null rd_kafka_DeleteTopic_t** - Array of topics to delete.
    +
    del_topic_cnt int|null size_t - Number of elements in topics array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_DeleteTopics_result_topics()

    +
    public static rd_kafka_DeleteTopics_result_topics ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of topic results from a DeleteTopics result.

    +

    The returned topics life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DeleteTopics_result_t* - Result to get topic results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_topic_result_t**
    +
    + +

    rd_kafka_DescribeAcls()

    +
    public static rd_kafka_DescribeAcls ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $acl_filter, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Describe acls matching the filter provided in acl_filter.

    + +

    Supported admin options:

    +
      +
    • rd_kafka_AdminOptions_set_operation_timeout() - default 0
    • +
    +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    acl_filter \FFI\CData|null rd_kafka_AclBindingFilter_t* - Filter for the returned acls.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_DescribeAcls_result_acls()

    +
    public static rd_kafka_DescribeAcls_result_acls ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of resource results from a DescribeAcls result.

    +

    DescribeAcls - describe access control lists.

    +

    The returned resources life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeAcls_result_t* - DescribeAcls result to get acls from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_AclBinding_t**
    +
    + +

    rd_kafka_DescribeCluster()

    +
    public static rd_kafka_DescribeCluster ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t*
    +
    rkqu \FFI\CData|null rd_kafka_queue_t*
    +
    +

    rd_kafka_DescribeCluster_result_authorized_operations()

    +
    public static rd_kafka_DescribeCluster_result_authorized_operations ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_AclOperation_t*
    +
    +

    rd_kafka_DescribeCluster_result_cluster_id()

    +
    public static rd_kafka_DescribeCluster_result_cluster_id ( 
    +    \FFI\CData|null $result
    + ): string|null
    +
    +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
    +
    Returns
    +
    string|null const char*
    +
    +

    rd_kafka_DescribeCluster_result_controller()

    +
    public static rd_kafka_DescribeCluster_result_controller ( 
    +    \FFI\CData|null $result
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_Node_t*
    +
    +

    rd_kafka_DescribeCluster_result_nodes()

    +
    public static rd_kafka_DescribeCluster_result_nodes ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_Node_t**
    +
    +

    rd_kafka_DescribeConfigs()

    +
    public static rd_kafka_DescribeConfigs ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $configs, 
    +    int|null $config_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Get configuration for the specified resources in configs.

    +

    The returned configuration includes default values and the rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source() methods may be used to distinguish them from user supplied values.

    +

    The value of config entries where rd_kafka_ConfigEntry_is_sensitive() is true will always be NULL to avoid disclosing sensitive information, such as security settings.

    +

    Configuration entries where rd_kafka_ConfigEntry_is_read_only() is true can't be updated (with rd_kafka_AlterConfigs()).

    +

    Synonym configuration entries are returned if the broker supports it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms().

    +
    Remarks
    Requires broker version >=0.11.0.0
    +
    +Multiple resources and resource types may be requested, but at most one resource of type RD_KAFKA_RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    configs \FFI\CData|null rd_kafka_ConfigResource_t**
    +
    config_cnt int|null size_t
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t*
    +
    rkqu \FFI\CData|null rd_kafka_queue_t*
    +
    + +

    rd_kafka_DescribeConfigs_result_resources()

    +
    public static rd_kafka_DescribeConfigs_result_resources ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of resource results from a DescribeConfigs result.

    +

    The returned resources life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeConfigs_result_t* - Result object to get resource results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_ConfigResource_t**
    +
    + +

    rd_kafka_DescribeConsumerGroups()

    +
    public static rd_kafka_DescribeConsumerGroups ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $groups, 
    +    int|null $groups_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Describe groups from cluster as specified by the groups array of size groups_cnt elements.

    + +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    groups \FFI\CData|null const char** - Array of groups to describe.
    +
    groups_cnt int|null size_t - Number of elements in groups array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_DescribeConsumerGroups_result_groups()

    +
    public static rd_kafka_DescribeConsumerGroups_result_groups ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of group results from a DescribeConsumerGroups result.

    +

    The returned groups life-time is the same as the result object.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the result object.
    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeConsumerGroups_result_t* - Result to get group results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t**
    +
    + +

    rd_kafka_DescribeTopics()

    +
    public static rd_kafka_DescribeTopics ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $topics, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    topics \FFI\CData|null const rd_kafka_TopicCollection_t*
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t*
    +
    rkqu \FFI\CData|null rd_kafka_queue_t*
    +
    +

    rd_kafka_DescribeTopics_result_topics()

    +
    public static rd_kafka_DescribeTopics_result_topics ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeTopics_result_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_TopicDescription_t**
    +
    +

    rd_kafka_DescribeUserScramCredentials()

    +
    public static rd_kafka_DescribeUserScramCredentials ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $users, 
    +    int|null $user_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Describe SASL/SCRAM credentials. This operation is supported by brokers with version 2.7.0 or higher.

    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    users \FFI\CData|null const char** - The users for which credentials are to be described. All users’ credentials are described if NULL.
    +
    user_cnt int|null size_t - Number of elements in users array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_DescribeUserScramCredentials_result_descriptions()

    +
    public static rd_kafka_DescribeUserScramCredentials_result_descriptions ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of descriptions from a DescribeUserScramCredentials result.

    +

    The returned value life-time is the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_DescribeUserScramCredentials_result_t* - Result to get descriptions from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t**
    +
    + +

    rd_kafka_IncrementalAlterConfigs()

    +
    public static rd_kafka_IncrementalAlterConfigs ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $configs, 
    +    int|null $config_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    Incrementally update the configuration for the specified resources. Updates are not transactional so they may succeed for some resources while fail for others. The configs for a particular resource are updated atomically, executing the corresponding incremental operations on the provided configurations.

    +
    Remarks
    Requires broker version >=2.3.0
    +
    +Multiple resources and resource types may be set, but at most one resource of type RD_KAFKA_RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource. Broker option will be ignored in this case.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    configs \FFI\CData|null rd_kafka_ConfigResource_t** - Array of config entries to alter.
    +
    config_cnt int|null size_t - Number of elements in configs array.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_IncrementalAlterConfigs_result_resources()

    +
    public static rd_kafka_IncrementalAlterConfigs_result_resources ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of resource results from a IncrementalAlterConfigs result.

    +

    Use rd_kafka_ConfigResource_error() and rd_kafka_ConfigResource_error_string() to extract per-resource error results on the returned array elements.

    +

    The returned object life-times are the same as the result object.

    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_IncrementalAlterConfigs_result_t* - Result object to get resource results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_ConfigResource_t** - an array of ConfigResource elements, or NULL if not available.
    +
    + +

    rd_kafka_ListConsumerGroupOffsets()

    +
    public static rd_kafka_ListConsumerGroupOffsets ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $list_grpoffsets, 
    +    int|null $list_grpoffsets_cnt, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    List committed offsets for a set of partitions in a consumer group.

    + +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
    +
    +The current implementation only supports one group per invocation.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    list_grpoffsets \FFI\CData|null rd_kafka_ListConsumerGroupOffsets_t** - Array of group committed offsets to list. MUST only be one single element.
    +
    list_grpoffsets_cnt int|null size_t - Number of elements in list_grpoffsets array. MUST always be 1.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_ListConsumerGroupOffsets_destroy()

    +
    public static rd_kafka_ListConsumerGroupOffsets_destroy ( 
    +    \FFI\CData|null $list_grpoffsets
    + ): void
    +
    +
    +
    Parameters
    +
    list_grpoffsets \FFI\CData|null rd_kafka_ListConsumerGroupOffsets_t*
    +
    +

    rd_kafka_ListConsumerGroupOffsets_destroy_array()

    +
    public static rd_kafka_ListConsumerGroupOffsets_destroy_array ( 
    +    \FFI\CData|null $list_grpoffsets, 
    +    int|null $list_grpoffset_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    list_grpoffsets \FFI\CData|null rd_kafka_ListConsumerGroupOffsets_t**
    +
    list_grpoffset_cnt int|null size_t
    +
    +

    rd_kafka_ListConsumerGroupOffsets_new()

    +
    public static rd_kafka_ListConsumerGroupOffsets_new ( 
    +    string|null $group_id, 
    +    \FFI\CData|null $partitions
    + ): \FFI\CData|null
    +
    +

    Create a new ListConsumerGroupOffsets object. This object is later passed to rd_kafka_ListConsumerGroupOffsets().

    + +
    +
    Parameters
    +
    group_id string|null const char* - Consumer group id.
    +
    partitions \FFI\CData|null const rd_kafka_topic_partition_list_t* - Partitions to list committed offsets for. Only the topic and partition fields are used.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_ListConsumerGroupOffsets_t* - a new allocated ListConsumerGroupOffsets object. Use rd_kafka_ListConsumerGroupOffsets_destroy() to free object when done.
    +
    + +

    rd_kafka_ListConsumerGroupOffsets_result_groups()

    +
    public static rd_kafka_ListConsumerGroupOffsets_result_groups ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of results from a ListConsumerGroupOffsets result.

    +

    The returned groups life-time is the same as the result object.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the result object.
    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_ListConsumerGroupOffsets_result_t* - Result to get group results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_group_result_t**
    +
    + +

    rd_kafka_ListConsumerGroups()

    +
    public static rd_kafka_ListConsumerGroups ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +

    List the consumer groups available in the cluster.

    + +
    Remarks
    The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
    +
    rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
    +
    + +

    rd_kafka_ListConsumerGroups_result_errors()

    +
    public static rd_kafka_ListConsumerGroups_result_errors ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of errors from a ListConsumerGroups call result.

    +

    The returned errors life-time is the same as the result object.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the result object.
    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_ListConsumerGroups_result_t* - ListConsumerGroups result.
    +
    cntp \FFI\CData|null size_t* - Is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_error_t** - Array of errors in result.
    +
    + +

    rd_kafka_ListConsumerGroups_result_valid()

    +
    public static rd_kafka_ListConsumerGroups_result_valid ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Get an array of valid list groups from a ListConsumerGroups result.

    +

    The returned groups life-time is the same as the result object.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the result object.
    + +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_ListConsumerGroups_result_t* - Result to get group results from.
    +
    cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_ConsumerGroupListing_t**
    +
    + +

    rd_kafka_ListOffsets()

    +
    public static rd_kafka_ListOffsets ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $topic_partitions, 
    +    \FFI\CData|null $options, 
    +    \FFI\CData|null $rkqu
    + ): void
    +
    +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    topic_partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
    +
    options \FFI\CData|null const rd_kafka_AdminOptions_t*
    +
    rkqu \FFI\CData|null rd_kafka_queue_t*
    +
    +

    rd_kafka_ListOffsetsResultInfo_timestamp()

    +
    public static rd_kafka_ListOffsetsResultInfo_timestamp ( 
    +    \FFI\CData|null $result_info
    + ): int|null
    +
    +
    +
    Parameters
    +
    result_info \FFI\CData|null const rd_kafka_ListOffsetsResultInfo_t*
    +
    Returns
    +
    int|null int64_t
    +
    +

    rd_kafka_ListOffsetsResultInfo_topic_partition()

    +
    public static rd_kafka_ListOffsetsResultInfo_topic_partition ( 
    +    \FFI\CData|null $result_info
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    result_info \FFI\CData|null const rd_kafka_ListOffsetsResultInfo_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_topic_partition_t*
    +
    +

    rd_kafka_ListOffsets_result_infos()

    +
    public static rd_kafka_ListOffsets_result_infos ( 
    +    \FFI\CData|null $result, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    result \FFI\CData|null const rd_kafka_ListOffsets_result_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_ListOffsetsResultInfo_t**
    +
    +

    rd_kafka_MemberAssignment_partitions()

    +
    public static rd_kafka_MemberAssignment_partitions ( 
    +    \FFI\CData|null $assignment
    + ): \FFI\CData|null
    +
    +

    Gets assigned partitions of a member assignment.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the assignment object.
    + +
    +
    Parameters
    +
    assignment \FFI\CData|null const rd_kafka_MemberAssignment_t* - ) - The group member assignment.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_topic_partition_list_t* - The assigned partitions.
    +
    + +

    rd_kafka_MemberDescription_assignment()

    +
    public static rd_kafka_MemberDescription_assignment ( 
    +    \FFI\CData|null $member
    + ): \FFI\CData|null
    +
    +

    Gets assignment of member.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the member object.
    + +
    +
    Parameters
    +
    member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_MemberAssignment_t* - The member assignment.
    +
    + +

    rd_kafka_MemberDescription_client_id()

    +
    public static rd_kafka_MemberDescription_client_id ( 
    +    \FFI\CData|null $member
    + ): string|null
    +
    +

    Gets client id of member.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the member object.
    + +
    +
    Parameters
    +
    member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
    +
    Returns
    +
    string|null const char* - The client id.
    +
    + +

    rd_kafka_MemberDescription_consumer_id()

    +
    public static rd_kafka_MemberDescription_consumer_id ( 
    +    \FFI\CData|null $member
    + ): string|null
    +
    +

    Gets consumer id of member.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the member object.
    + +
    +
    Parameters
    +
    member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
    +
    Returns
    +
    string|null const char* - The consumer id.
    +
    + +

    rd_kafka_MemberDescription_group_instance_id()

    +
    public static rd_kafka_MemberDescription_group_instance_id ( 
    +    \FFI\CData|null $member
    + ): string|null
    +
    +

    Gets group instance id of member.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the member object.
    + +
    +
    Parameters
    +
    member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
    +
    Returns
    +
    string|null const char* - The group instance id, or NULL if not available.
    +
    + +

    rd_kafka_MemberDescription_host()

    +
    public static rd_kafka_MemberDescription_host ( 
    +    \FFI\CData|null $member
    + ): string|null
    +
    +

    Gets host of member.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the member object.
    + +
    +
    Parameters
    +
    member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
    +
    Returns
    +
    string|null const char* - The host.
    +
    + +

    rd_kafka_NewPartitions_destroy()

    +
    public static rd_kafka_NewPartitions_destroy ( 
    +    \FFI\CData|null $new_parts
    + ): void
    +
    +
    +
    Parameters
    +
    new_parts \FFI\CData|null rd_kafka_NewPartitions_t*
    +
    +

    rd_kafka_NewPartitions_destroy_array()

    +
    public static rd_kafka_NewPartitions_destroy_array ( 
    +    \FFI\CData|null $new_parts, 
    +    int|null $new_parts_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    new_parts \FFI\CData|null rd_kafka_NewPartitions_t**
    +
    new_parts_cnt int|null size_t
    +
    +

    rd_kafka_NewPartitions_new()

    +
    public static rd_kafka_NewPartitions_new ( 
    +    string|null $topic, 
    +    int|null $new_total_cnt, 
    +    \FFI\CData|null $errstr, 
    +    int|null $errstr_size
    + ): \FFI\CData|null
    +
    +

    Create a new NewPartitions. This object is later passed to rd_kafka_CreatePartitions() to increase the number of partitions to new_total_cnt for an existing topic.

    + +
    +
    Parameters
    +
    topic string|null const char* - Topic name to create more partitions for.
    +
    new_total_cnt int|null size_t - Increase the topic’s partition count to this value.
    +
    errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
    +
    errstr_size int|null size_t
    +
    Returns
    +
    \FFI\CData|null rd_kafka_NewPartitions_t* - a new allocated NewPartitions object, or NULL if the input parameters are invalid. Use rd_kafka_NewPartitions_destroy() to free object when done.
    +
    + +

    rd_kafka_NewPartitions_set_replica_assignment()

    +
    public static rd_kafka_NewPartitions_set_replica_assignment ( 
    +    \FFI\CData|null $new_parts, 
    +    int|null $new_partition_idx, 
    +    \FFI\CData|null $broker_ids, 
    +    int|null $broker_id_cnt, 
    +    \FFI\CData|null $errstr, 
    +    int|null $errstr_size
    + ): int
    +
    +

    Set the replica (broker id) assignment for new_partition_idx to the replica set in broker_ids (of broker_id_cnt elements).

    +
    Remarks
    An application must either set the replica assignment for all new partitions, or none.
    +
    +If called, this function must be called consecutively for each new partition being created, where new_partition_idx 0 is the first new partition, 1 is the second, and so on.
    +
    +broker_id_cnt should match the topic's replication factor.
    +
    +Use rd_kafka_metadata() to retrieve the list of brokers in the cluster.
    + +
    See also
    rd_kafka_AdminOptions_set_validate_only()
    + +
    +
    Parameters
    +
    new_parts \FFI\CData|null rd_kafka_NewPartitions_t*
    +
    new_partition_idx int|null int32_t
    +
    broker_ids \FFI\CData|null int32_t*
    +
    broker_id_cnt int|null size_t
    +
    errstr \FFI\CData|null char*
    +
    errstr_size int|null size_t
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid.
    +
    + +

    rd_kafka_NewTopic_destroy()

    +
    public static rd_kafka_NewTopic_destroy ( 
    +    \FFI\CData|null $new_topic
    + ): void
    +
    +
    +
    Parameters
    +
    new_topic \FFI\CData|null rd_kafka_NewTopic_t*
    +
    +

    rd_kafka_NewTopic_destroy_array()

    +
    public static rd_kafka_NewTopic_destroy_array ( 
    +    \FFI\CData|null $new_topics, 
    +    int|null $new_topic_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    new_topics \FFI\CData|null rd_kafka_NewTopic_t**
    +
    new_topic_cnt int|null size_t
    +
    +

    rd_kafka_NewTopic_new()

    +
    public static rd_kafka_NewTopic_new ( 
    +    string|null $topic, 
    +    int|null $num_partitions, 
    +    int|null $replication_factor, 
    +    \FFI\CData|null $errstr, 
    +    int|null $errstr_size
    + ): \FFI\CData|null
    +
    +

    Create a new NewTopic object. This object is later passed to rd_kafka_CreateTopics().

    + +
    +
    Parameters
    +
    topic string|null const char* - Topic name to create.
    +
    num_partitions int|null int - Number of partitions in topic, or -1 to use the broker’s default partition count (>= 2.4.0).
    +
    replication_factor int|null int - Default replication factor for the topic’s partitions, or -1 to use the broker’s default replication factor (>= 2.4.0) or if set_replica_assignment() will be used.
    +
    errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
    +
    errstr_size int|null size_t
    +
    Returns
    +
    \FFI\CData|null rd_kafka_NewTopic_t* - a new allocated NewTopic object, or NULL if the input parameters are invalid. Use rd_kafka_NewTopic_destroy() to free object when done.
    +
    + +

    rd_kafka_NewTopic_set_config()

    +
    public static rd_kafka_NewTopic_set_config ( 
    +    \FFI\CData|null $new_topic, 
    +    string|null $name, 
    +    string|null $value
    + ): int
    +
    +

    Set (broker-side) topic configuration name/value pair.

    +
    Remarks
    The name and value are not validated by the client, the validation takes place on the broker.
    + +
    See also
    rd_kafka_AdminOptions_set_validate_only()
    +
    +http://kafka.apache.org/documentation.html#topicconfigs
    + +
    +
    Parameters
    +
    new_topic \FFI\CData|null rd_kafka_NewTopic_t*
    +
    name string|null const char*
    +
    value string|null const char*
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid.
    +
    + +

    rd_kafka_NewTopic_set_replica_assignment()

    +
    public static rd_kafka_NewTopic_set_replica_assignment ( 
    +    \FFI\CData|null $new_topic, 
    +    int|null $partition, 
    +    \FFI\CData|null $broker_ids, 
    +    int|null $broker_id_cnt, 
    +    \FFI\CData|null $errstr, 
    +    int|null $errstr_size
    + ): int
    +
    +

    Set the replica (broker) assignment for partition to the replica set in broker_ids (of broker_id_cnt elements).

    +
    Remarks
    When this method is used, rd_kafka_NewTopic_new() must have been called with a replication_factor of -1.
    +
    +An application must either set the replica assignment for all new partitions, or none.
    +
    +If called, this function must be called consecutively for each partition, starting at 0.
    +
    +Use rd_kafka_metadata() to retrieve the list of brokers in the cluster.
    + +
    See also
    rd_kafka_AdminOptions_set_validate_only()
    + +
    +
    Parameters
    +
    new_topic \FFI\CData|null rd_kafka_NewTopic_t*
    +
    partition int|null int32_t
    +
    broker_ids \FFI\CData|null int32_t*
    +
    broker_id_cnt int|null size_t
    +
    errstr \FFI\CData|null char*
    +
    errstr_size int|null size_t
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid.
    +
    + +

    rd_kafka_Node_host()

    +
    public static rd_kafka_Node_host ( 
    +    \FFI\CData|null $node
    + ): string|null
    +
    +

    Get the host of node.

    + +
    Remarks
    The lifetime of the returned memory is the same as the lifetime of the node object.
    + +
    +
    Parameters
    +
    node \FFI\CData|null const rd_kafka_Node_t* - ) - The Node instance.
    +
    Returns
    +
    string|null const char* - The node host.
    +
    + +

    rd_kafka_Node_id()

    +
    public static rd_kafka_Node_id ( 
    +    \FFI\CData|null $node
    + ): int|null
    +
    +

    Get the id of node.

    + +
    +
    Parameters
    +
    node \FFI\CData|null const rd_kafka_Node_t* - ) - The Node instance.
    +
    Returns
    +
    int|null int - The node id.
    +
    + +

    rd_kafka_Node_port()

    +
    public static rd_kafka_Node_port ( 
    +    \FFI\CData|null $node
    + ): int|null
    +
    +

    Get the port of node.

    + +
    +
    Parameters
    +
    node \FFI\CData|null const rd_kafka_Node_t* - ) - The Node instance.
    +
    Returns
    +
    int|null uint16_t - The node port.
    +
    + +

    rd_kafka_Node_rack()

    +
    public static rd_kafka_Node_rack ( 
    +    \FFI\CData|null $node
    + ): string|null
    +
    +
    +
    Parameters
    +
    node \FFI\CData|null const rd_kafka_Node_t*
    +
    Returns
    +
    string|null const char*
    +
    +

    rd_kafka_ResourcePatternType_name()

    +
    public static rd_kafka_ResourcePatternType_name ( 
    +    int $resource_pattern_type
    + ): string|null
    +
    +
    +
    Parameters
    +
    resource_pattern_type int rd_kafka_ResourcePatternType_t - )
    +
    Returns
    +
    string|null const char* - a string representation of the resource_pattern_type
    +
    + +

    rd_kafka_ResourceType_name()

    +
    public static rd_kafka_ResourceType_name ( 
    +    int $restype
    + ): string|null
    +
    +
    +
    Parameters
    +
    restype int rd_kafka_ResourceType_t - )
    +
    Returns
    +
    string|null const char* - a string representation of the restype
    +
    + +

    rd_kafka_ScramCredentialInfo_iterations()

    +
    public static rd_kafka_ScramCredentialInfo_iterations ( 
    +    \FFI\CData|null $scram_credential_info
    + ): int|null
    +
    +
    +
    Parameters
    +
    scram_credential_info \FFI\CData|null const rd_kafka_ScramCredentialInfo_t*
    +
    Returns
    +
    int|null int32_t
    +
    +

    rd_kafka_ScramCredentialInfo_mechanism()

    +
    public static rd_kafka_ScramCredentialInfo_mechanism ( 
    +    \FFI\CData|null $scram_credential_info
    + ): int
    +
    +
    +
    Parameters
    +
    scram_credential_info \FFI\CData|null const rd_kafka_ScramCredentialInfo_t*
    +
    Returns
    +
    int rd_kafka_ScramMechanism_t
    +
    +

    rd_kafka_TopicCollection_destroy()

    +
    public static rd_kafka_TopicCollection_destroy ( 
    +    \FFI\CData|null $topics
    + ): void
    +
    +
    +
    Parameters
    +
    topics \FFI\CData|null rd_kafka_TopicCollection_t*
    +
    +

    rd_kafka_TopicCollection_of_topic_names()

    +
    public static rd_kafka_TopicCollection_of_topic_names ( 
    +    \FFI\CData|null $topics, 
    +    int|null $topics_cnt
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    topics \FFI\CData|null const char**
    +
    topics_cnt int|null size_t
    +
    Returns
    +
    \FFI\CData|null rd_kafka_TopicCollection_t*
    +
    +

    rd_kafka_TopicDescription_authorized_operations()

    +
    public static rd_kafka_TopicDescription_authorized_operations ( 
    +    \FFI\CData|null $topicdesc, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_AclOperation_t*
    +
    +

    rd_kafka_TopicDescription_error()

    +
    public static rd_kafka_TopicDescription_error ( 
    +    \FFI\CData|null $topicdesc
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_error_t*
    +
    +

    rd_kafka_TopicDescription_is_internal()

    +
    public static rd_kafka_TopicDescription_is_internal ( 
    +    \FFI\CData|null $topicdesc
    + ): int|null
    +
    +
    +
    Parameters
    +
    topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
    +
    Returns
    +
    int|null int
    +
    +

    rd_kafka_TopicDescription_name()

    +
    public static rd_kafka_TopicDescription_name ( 
    +    \FFI\CData|null $topicdesc
    + ): string|null
    +
    +
    +
    Parameters
    +
    topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
    +
    Returns
    +
    string|null const char*
    +
    +

    rd_kafka_TopicDescription_partitions()

    +
    public static rd_kafka_TopicDescription_partitions ( 
    +    \FFI\CData|null $topicdesc, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_TopicPartitionInfo_t**
    +
    +

    rd_kafka_TopicDescription_topic_id()

    +
    public static rd_kafka_TopicDescription_topic_id ( 
    +    \FFI\CData|null $topicdesc
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_Uuid_t*
    +
    +

    rd_kafka_TopicPartitionInfo_isr()

    +
    public static rd_kafka_TopicPartitionInfo_isr ( 
    +    \FFI\CData|null $partition, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    partition \FFI\CData|null const rd_kafka_TopicPartitionInfo_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_Node_t**
    +
    +

    rd_kafka_TopicPartitionInfo_leader()

    +
    public static rd_kafka_TopicPartitionInfo_leader ( 
    +    \FFI\CData|null $partition
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    partition \FFI\CData|null const rd_kafka_TopicPartitionInfo_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_Node_t*
    +
    +

    rd_kafka_TopicPartitionInfo_partition()

    +
    public static rd_kafka_TopicPartitionInfo_partition ( 
    +    \FFI\CData|null $partition
    + ): int|null
    +
    +
    +
    Parameters
    +
    partition \FFI\CData|null const rd_kafka_TopicPartitionInfo_t*
    +
    Returns
    +
    int|null const int
    +
    +

    rd_kafka_TopicPartitionInfo_replicas()

    +
    public static rd_kafka_TopicPartitionInfo_replicas ( 
    +    \FFI\CData|null $partition, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    partition \FFI\CData|null const rd_kafka_TopicPartitionInfo_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_Node_t**
    +
    +

    rd_kafka_UserScramCredentialAlteration_destroy()

    +
    public static rd_kafka_UserScramCredentialAlteration_destroy ( 
    +    \FFI\CData|null $alteration
    + ): void
    +
    +
    +
    Parameters
    +
    alteration \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t*
    +
    +

    rd_kafka_UserScramCredentialAlteration_destroy_array()

    +
    public static rd_kafka_UserScramCredentialAlteration_destroy_array ( 
    +    \FFI\CData|null $alterations, 
    +    int|null $alteration_cnt
    + ): void
    +
    +
    +
    Parameters
    +
    alterations \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t**
    +
    alteration_cnt int|null size_t
    +
    +

    rd_kafka_UserScramCredentialDeletion_new()

    +
    public static rd_kafka_UserScramCredentialDeletion_new ( 
    +    string|null $username, 
    +    int $mechanism
    + ): \FFI\CData|null
    +
    +

    Allocates a new UserScramCredentialDeletion given its fields.

    + +
    +
    Parameters
    +
    username string|null const char* - The username (not empty).
    +
    mechanism int rd_kafka_ScramMechanism_t - SASL/SCRAM mechanism.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t* - A newly created instance of rd_kafka_UserScramCredentialAlteration_t. Ownership belongs to the caller, use rd_kafka_UserScramCredentialAlteration_destroy to destroy.
    +
    + +

    rd_kafka_UserScramCredentialUpsertion_new()

    +
    public static rd_kafka_UserScramCredentialUpsertion_new ( 
    +    string|null $username, 
    +    int $mechanism, 
    +    int|null $iterations, 
    +    \FFI\CData|null $password, 
    +    int|null $password_size, 
    +    \FFI\CData|null $salt, 
    +    int|null $salt_size
    + ): \FFI\CData|null
    +
    +

    Allocates a new UserScramCredentialUpsertion given its fields. If salt isn't given a 64 B salt is generated using OpenSSL RAND_priv_bytes, if available.

    + +
    Remarks
    A random salt is generated, when NULL, only if OpenSSL >= 1.1.1. Otherwise it's a required param.
    + +
    +
    Parameters
    +
    username string|null const char* - The username (not empty).
    +
    mechanism int rd_kafka_ScramMechanism_t - SASL/SCRAM mechanism.
    +
    iterations int|null int32_t - SASL/SCRAM iterations.
    +
    password \FFI\CData|null const unsigned char* - Password bytes (not empty).
    +
    password_size int|null size_t - Size of password (greater than 0).
    +
    salt \FFI\CData|null const unsigned char* - Salt bytes (optional).
    +
    salt_size int|null size_t - Size of salt (optional).
    +
    Returns
    +
    \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t* - A newly created instance of rd_kafka_UserScramCredentialAlteration_t. Ownership belongs to the caller, use rd_kafka_UserScramCredentialAlteration_destroy to destroy.
    +
    + +

    rd_kafka_UserScramCredentialsDescription_error()

    +
    public static rd_kafka_UserScramCredentialsDescription_error ( 
    +    \FFI\CData|null $description
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    description \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t*
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_error_t*
    +
    +

    rd_kafka_UserScramCredentialsDescription_scramcredentialinfo()

    +
    public static rd_kafka_UserScramCredentialsDescription_scramcredentialinfo ( 
    +    \FFI\CData|null $description, 
    +    int|null $idx
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    description \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t*
    +
    idx int|null size_t
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_ScramCredentialInfo_t*
    +
    +

    rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count()

    +
    public static rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count ( 
    +    \FFI\CData|null $description
    + ): int|null
    +
    +
    +
    Parameters
    +
    description \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t*
    +
    Returns
    +
    int|null size_t
    +
    +

    rd_kafka_UserScramCredentialsDescription_user()

    +
    public static rd_kafka_UserScramCredentialsDescription_user ( 
    +    \FFI\CData|null $description
    + ): string|null
    +
    +
    +
    Parameters
    +
    description \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t*
    +
    Returns
    +
    string|null const char*
    +
    +

    rd_kafka_Uuid_base64str()

    +
    public static rd_kafka_Uuid_base64str ( 
    +    \FFI\CData|null $uuid
    + ): string|null
    +
    +
    +
    Parameters
    +
    uuid \FFI\CData|null const rd_kafka_Uuid_t*
    +
    Returns
    +
    string|null const char*
    +
    +

    rd_kafka_Uuid_copy()

    +
    public static rd_kafka_Uuid_copy ( 
    +    \FFI\CData|null $uuid
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    uuid \FFI\CData|null const rd_kafka_Uuid_t*
    +
    Returns
    +
    \FFI\CData|null rd_kafka_Uuid_t*
    +
    +

    rd_kafka_Uuid_destroy()

    +
    public static rd_kafka_Uuid_destroy ( 
    +    \FFI\CData|null $uuid
    + ): void
    +
    +
    +
    Parameters
    +
    uuid \FFI\CData|null rd_kafka_Uuid_t*
    +
    +

    rd_kafka_Uuid_least_significant_bits()

    +
    public static rd_kafka_Uuid_least_significant_bits ( 
    +    \FFI\CData|null $uuid
    + ): int|null
    +
    +
    +
    Parameters
    +
    uuid \FFI\CData|null const rd_kafka_Uuid_t*
    +
    Returns
    +
    int|null int64_t
    +
    +

    rd_kafka_Uuid_most_significant_bits()

    +
    public static rd_kafka_Uuid_most_significant_bits ( 
    +    \FFI\CData|null $uuid
    + ): int|null
    +
    +
    +
    Parameters
    +
    uuid \FFI\CData|null const rd_kafka_Uuid_t*
    +
    Returns
    +
    int|null int64_t
    +
    +

    rd_kafka_Uuid_new()

    +
    public static rd_kafka_Uuid_new ( 
    +    int|null $most_significant_bits, 
    +    int|null $least_significant_bits
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    most_significant_bits int|null int64_t
    +
    least_significant_bits int|null int64_t
    +
    Returns
    +
    \FFI\CData|null rd_kafka_Uuid_t*
    +
    +

    rd_kafka_abort_transaction()

    +
    public static rd_kafka_abort_transaction ( 
    +    \FFI\CData|null $rk, 
    +    int|null $timeout_ms
    + ): \FFI\CData|null
    +
    +

    Aborts the ongoing transaction.

    +
       This function should also be used to recover from non-fatal abortable
    +   transaction errors.
    +
    +Any outstanding messages will be purged and fail with
    +   RD_KAFKA_RESP_ERR__PURGE_INFLIGHT or RD_KAFKA_RESP_ERR__PURGE_QUEUE.
    +   See rd_kafka_purge() for details.
    +
    +
    Remarks
    It is strongly recommended to always pass -1 (remaining transaction time) as the timeout_ms. Using other values risk internal state desynchronization in case any of the underlying protocol requests fail.
    +
    +This function will block until all outstanding messages are purged and the transaction abort request has been successfully handled by the transaction coordinator, or until timeout_ms expires, which ever comes first. On timeout the application may call the function again. If the application has enabled RD_KAFKA_EVENT_DR it must serve the event queue in a separate thread since rd_kafka_flush() will not serve delivery reports in this mode.
    +
    +This call is resumable when a retriable timeout error is returned. Calling the function again will resume the operation that is progressing in the background.
    + +
    Remarks
    The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Producer instance.
    +
    timeout_ms int|null int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call this function again. Pass -1 to use the remaining transaction timeout, this is the recommended use.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be complete commmitted within timeout_ms, this is a retriable error as the commit continues in the background, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, Other error codes not listed here may be returned, depending on broker version.
    +
    + +

    rd_kafka_acl_result_error()

    +
    public static rd_kafka_acl_result_error ( 
    +    \FFI\CData|null $aclres
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    aclres \FFI\CData|null const rd_kafka_acl_result_t* - )
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_error_t* - the error object for the given acl result, or NULL on success.
    +
    + +

    rd_kafka_assign()

    +
    public static rd_kafka_assign ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $partitions
    + ): int
    +
    +

    Atomic assignment of partitions to consume.

    +

    The new partitions will replace the existing assignment.

    +

    A zero-length partitions will treat the partitions as a valid, albeit empty assignment, and maintain internal state, while a NULL value for partitions will reset and clear the internal state.

    +

    When used from a rebalance callback, the application should pass the partition list passed to the callback (or a copy of it) even if the list is empty (i.e. should not pass NULL in this case) so as to maintain internal join state. This is not strictly required - the application may adjust the assignment provided by the group. However, this is rarely useful in practice.

    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    partitions \FFI\CData|null const rd_kafka_topic_partition_list_t*
    +
    Returns
    +
    int rd_kafka_resp_err_t - An error code indicating if the new assignment was applied or not. RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised a fatal error.
    +
    + +

    rd_kafka_assignment()

    +
    public static rd_kafka_assignment ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $partitions
    + ): int
    +
    +

    Returns the current partition assignment as set by rd_kafka_assign() or rd_kafka_incremental_assign().

    + +
    Remarks
    The application is responsible for calling rd_kafka_topic_partition_list_destroy on the returned list.
    +
    +This assignment represents the partitions assigned through the assign functions and not the partitions assigned to this consumer instance by the consumer group leader. They are usually the same following a rebalance but not necessarily since an application is free to assign any partitions.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    partitions \FFI\CData|null rd_kafka_topic_partition_list_t**
    +
    Returns
    +
    int rd_kafka_resp_err_t - An error code on failure, otherwise partitions is updated to point to a newly allocated partition list (possibly empty).
    +
    + +

    rd_kafka_assignment_lost()

    +
    public static rd_kafka_assignment_lost ( 
    +    \FFI\CData|null $rk
    + ): int|null
    +
    +

    Check whether the consumer considers the current assignment to have been lost involuntarily. This method is only applicable for use with a high level subscribing consumer. Assignments are revoked immediately when determined to have been lost, so this method is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event or from within a rebalance_cb. Partitions that have been lost may already be owned by other members in the group and therefore commiting offsets, for example, may fail.

    +
    Remarks
    Calling rd_kafka_assign(), rd_kafka_incremental_assign() or rd_kafka_incremental_unassign() resets this flag.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - )
    +
    Returns
    +
    int|null int - Returns 1 if the current partition assignment is considered lost, 0 otherwise.
    +
    + +

    rd_kafka_begin_transaction()

    +
    public static rd_kafka_begin_transaction ( 
    +    \FFI\CData|null $rk
    + ): \FFI\CData|null
    +
    +

    Begin a new transaction.

    +

    rd_kafka_init_transactions() must have been called successfully (once) before this function is called.

    +

    Upon successful return from this function the application has to perform at least one of the following operations within transaction.timeout.ms to avoid timing out the transaction on the broker:

    +
      +
    • rd_kafka_produce() (et.al)
    • +
    • rd_kafka_send_offsets_to_transaction()
    • +
    • rd_kafka_commit_transaction()
    • +
    • rd_kafka_abort_transaction()
    • +
    +

    Any messages produced, offsets sent (rd_kafka_send_offsets_to_transaction()), etc, after the successful return of this function will be part of the transaction and committed or aborted atomatically.

    +

    Finish the transaction by calling rd_kafka_commit_transaction() or abort the transaction by calling rd_kafka_abort_transaction().

    + +
    Remarks
    With the transactional producer, rd_kafka_produce(), rd_kafka_producev(), et.al, are only allowed during an on-going transaction, as started with this function. Any produce call outside an on-going transaction, or for a failed transaction, will fail.
    +
    +The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - ) - Producer instance.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__STATE if a transaction is already in progress or upon fatal error, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance. Other error codes not listed here may be returned, depending on broker version.
    +
    + +

    rd_kafka_brokers_add()

    +
    public static rd_kafka_brokers_add ( 
    +    \FFI\CData|null $rk, 
    +    string|null $brokerlist
    + ): int|null
    +
    +

    Adds one or more brokers to the kafka handle's list of initial bootstrap brokers.

    +

    Additional brokers will be discovered automatically as soon as rdkafka connects to a broker by querying the broker metadata.

    +

    If a broker name resolves to multiple addresses (and possibly address families) all will be used for connection attempts in round-robin fashion.

    +

    brokerlist is a ,-separated list of brokers in the format: <broker1>,<broker2>,.. Where each broker is in either the host or URL based format: <host>[:<port>] <proto>://<host>[:port] <proto> is either PLAINTEXT, SSL, SASL, SASL_PLAINTEXT The two formats can be mixed but ultimately the value of the security.protocol config property decides what brokers are allowed.

    +

    Example: brokerlist = "broker1:10000,broker2" brokerlist = "SSL://broker3:9000,ssl://broker2"

    + +
    Remarks
    Brokers may also be defined with the metadata.broker.list or bootstrap.servers configuration property (preferred method).
    +
    Deprecated:
    Set bootstrap servers with the bootstrap.servers configuration property.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    brokerlist string|null const char*
    +
    Returns
    +
    int|null int - the number of brokers successfully added.
    +
    + +

    rd_kafka_clusterid()

    +
    public static rd_kafka_clusterid ( 
    +    \FFI\CData|null $rk, 
    +    int|null $timeout_ms
    + ): \FFI\CData|null
    +
    +

    Returns the ClusterId as reported in broker metadata.

    + +
    Remarks
    Requires broker version >=0.10.0 and api.version.request=true.
    +
    +The application must free the returned pointer using rd_kafka_mem_free().
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Client instance.
    +
    timeout_ms int|null int - If there is no cached value from metadata retrieval then this specifies the maximum amount of time (in milliseconds) the call will block waiting for metadata to be retrieved. Use 0 for non-blocking calls.
    +
    Returns
    +
    \FFI\CData|null char* - a newly allocated string containing the ClusterId, or NULL if no ClusterId could be retrieved in the allotted timespan.
    +
    + +

    rd_kafka_commit()

    +
    public static rd_kafka_commit ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $offsets, 
    +    int|null $async
    + ): int
    +
    +

    Commit offsets on broker for the provided list of partitions.

    +

    offsets should contain topic, partition, offset and possibly metadata. The offset should be the offset where consumption will resume, i.e., the last processed offset + 1. If offsets is NULL the current partition assignment will be used instead.

    +

    If async is false this operation will block until the broker offset commit is done, returning the resulting success or error code.

    +

    If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been configured the callback will be enqueued for a future call to rd_kafka_poll(), rd_kafka_consumer_poll() or similar.

    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    offsets \FFI\CData|null const rd_kafka_topic_partition_list_t*
    +
    async int|null int
    +
    Returns
    +
    int rd_kafka_resp_err_t - An error code indiciating if the commit was successful, or successfully scheduled if asynchronous, or failed. RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised a fatal error.
    +
    + +

    rd_kafka_commit_message()

    +
    public static rd_kafka_commit_message ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $rkmessage, 
    +    int|null $async
    + ): int
    +
    +

    Commit message's offset on broker for the message's partition. The committed offset is the message's offset + 1.

    +
    See also
    rd_kafka_commit
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    rkmessage \FFI\CData|null const rd_kafka_message_t*
    +
    async int|null int
    +
    Returns
    +
    int rd_kafka_resp_err_t
    +
    + +

    rd_kafka_commit_queue()

    +
    public static rd_kafka_commit_queue ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $offsets, 
    +    \FFI\CData|null $rkqu, 
    +    \FFI\CData|\Closure $cb, 
    +    \FFI\CData|object|string|null $opaque
    + ): int
    +
    +

    Commit offsets on broker for the provided list of partitions.

    +

    See rd_kafka_commit for offsets semantics.

    +

    The result of the offset commit will be posted on the provided rkqu queue.

    +

    If the application uses one of the poll APIs (rd_kafka_poll(), rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue the cb callback is required.

    +

    The commit_opaque argument is passed to the callback as commit_opaque, or if using the event API the callback is ignored and the offset commit result will be returned as an RD_KAFKA_EVENT_COMMIT event and the commit_opaque value will be available with rd_kafka_event_opaque().

    +

    If rkqu is NULL a temporary queue will be created and the callback will be served by this call.

    +
    See also
    rd_kafka_commit()
    +
    +rd_kafka_conf_set_offset_commit_cb()
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    offsets \FFI\CData|null const rd_kafka_topic_partition_list_t*
    +
    rkqu \FFI\CData|null rd_kafka_queue_t*
    +
    cb \FFI\CData|\Closure void()(rd_kafka_t, rd_kafka_resp_err_t, rd_kafka_topic_partition_list_t*, void*)
    +
    opaque \FFI\CData|object|string|null void*
    +
    Returns
    +
    int rd_kafka_resp_err_t
    +
    + +

    rd_kafka_commit_transaction()

    +
    public static rd_kafka_commit_transaction ( 
    +    \FFI\CData|null $rk, 
    +    int|null $timeout_ms
    + ): \FFI\CData|null
    +
    +

    Commit the current transaction (as started with rd_kafka_begin_transaction()).

    +

    Any outstanding messages will be flushed (delivered) before actually committing the transaction.

    +

    If any of the outstanding messages fail permanently the current transaction will enter the abortable error state and this function will return an abortable error, in this case the application must call rd_kafka_abort_transaction() before attempting a new transaction with rd_kafka_begin_transaction().

    + +
    Remarks
    It is strongly recommended to always pass -1 (remaining transaction time) as the timeout_ms. Using other values risk internal state desynchronization in case any of the underlying protocol requests fail.
    +
    +This function will block until all outstanding messages are delivered and the transaction commit request has been successfully handled by the transaction coordinator, or until timeout_ms expires, which ever comes first. On timeout the application may call the function again.
    +
    +Will automatically call rd_kafka_flush() to ensure all queued messages are delivered before attempting to commit the transaction. If the application has enabled RD_KAFKA_EVENT_DR it must serve the event queue in a separate thread since rd_kafka_flush() will not serve delivery reports in this mode.
    +
    +This call is resumable when a retriable timeout error is returned. Calling the function again will resume the operation that is progressing in the background.
    + +
    Remarks
    The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - Producer instance.
    +
    timeout_ms int|null int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call this function again. Pass -1 to use the remaining transaction timeout, this is the recommended use.
    +
    Returns
    +
    \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether an abortable or fatal error has been raised by calling rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal() respectively. Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be complete commmitted within timeout_ms, this is a retriable error as the commit continues in the background, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, Other error codes not listed here may be returned, depending on broker version.
    +
    + +

    rd_kafka_committed()

    +
    public static rd_kafka_committed ( 
    +    \FFI\CData|null $rk, 
    +    \FFI\CData|null $partitions, 
    +    int|null $timeout_ms
    + ): int
    +
    +

    Retrieve committed offsets for topics+partitions.

    +

    The offset field of each requested partition will either be set to stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored offset for that partition.

    +

    Committed offsets will be returned according to the isolation.level configuration property, if set to read_committed (default) then only stable offsets for fully committed transactions will be returned, while read_uncommitted may return offsets for not yet committed transactions.

    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t*
    +
    partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
    +
    timeout_ms int|null int
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the offset or err field of each partitions’ element is filled in with the stored offset, or a partition specific error. Else returns an error code.
    +
    + +

    rd_kafka_conf()

    +
    public static rd_kafka_conf ( 
    +    \FFI\CData|null $rk
    + ): \FFI\CData|null
    +
    +
    Remarks
    the returned object is read-only and its lifetime is the same as the rd_kafka_t object.
    + +
    +
    Parameters
    +
    rk \FFI\CData|null rd_kafka_t* - )
    +
    Returns
    +
    \FFI\CData|null const rd_kafka_conf_t* - the configuration object used by an rd_kafka_t instance. For use with rd_kafka_conf_get(), et.al., to extract configuration properties from a running client.
    +
    + +

    rd_kafka_conf_destroy()

    +
    public static rd_kafka_conf_destroy ( 
    +    \FFI\CData|null $conf
    + ): void
    +
    +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    +

    rd_kafka_conf_dump()

    +
    public static rd_kafka_conf_dump ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|null $cntp
    + ): \FFI\CData|null
    +
    +

    Dump the configuration properties and values of conf to an array with "key", "value" pairs.

    +

    The number of entries in the array is returned in *cntp.

    +

    The dump must be freed with rd_kafka_conf_dump_free().

    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    cntp \FFI\CData|null size_t*
    +
    Returns
    +
    \FFI\CData|null const char**
    +
    + +

    rd_kafka_conf_dump_free()

    +
    public static rd_kafka_conf_dump_free ( 
    +    \FFI\CData|null $arr, 
    +    int|null $cnt
    + ): void
    +
    +
    +
    Parameters
    +
    arr \FFI\CData|null const char**
    +
    cnt int|null size_t
    +
    +

    rd_kafka_conf_dup()

    +
    public static rd_kafka_conf_dup ( 
    +    \FFI\CData|null $conf
    + ): \FFI\CData|null
    +
    +

    Creates a copy/duplicate of configuration object conf.

    +
    Remarks
    Interceptors are NOT copied to the new configuration object.
    +
    See also
    rd_kafka_interceptor_f_on_conf_dup
    + +
    +
    Parameters
    +
    conf \FFI\CData|null const rd_kafka_conf_t* - )
    +
    Returns
    +
    \FFI\CData|null rd_kafka_conf_t*
    +
    + +

    rd_kafka_conf_dup_filter()

    +
    public static rd_kafka_conf_dup_filter ( 
    +    \FFI\CData|null $conf, 
    +    int|null $filter_cnt, 
    +    \FFI\CData|null $filter
    + ): \FFI\CData|null
    +
    +
    +
    Parameters
    +
    conf \FFI\CData|null const rd_kafka_conf_t*
    +
    filter_cnt int|null size_t
    +
    filter \FFI\CData|null const char**
    +
    Returns
    +
    \FFI\CData|null rd_kafka_conf_t*
    +
    +

    rd_kafka_conf_enable_sasl_queue()

    +
    public static rd_kafka_conf_enable_sasl_queue ( 
    +    \FFI\CData|null $conf, 
    +    int|null $enable
    + ): void
    +
    +

    Enable/disable creation of a queue specific to SASL events and callbacks.

    +

    For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this configuration API allows an application to get a dedicated queue for the SASL events/callbacks. After enabling the queue with this API the application can retrieve the queue by calling rd_kafka_queue_get_sasl() on the client instance. This queue may then be served directly by the application (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as the background queue.

    +

    A convenience function is available to automatically forward the SASL queue to librdkafka's background thread, see rd_kafka_sasl_background_callbacks_enable().

    +

    By default (enable = 0) the main queue (as served by rd_kafka_poll(), et.al.) is used for SASL callbacks.

    +
    Remarks
    The SASL queue is currently only used by the SASL OAUTHBEARER mechanism's token_refresh_cb().
    +
    See also
    rd_kafka_queue_get_sasl()
    +
    +rd_kafka_sasl_background_callbacks_enable()
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    enable int|null int
    +
    + +

    rd_kafka_conf_get()

    +
    public static rd_kafka_conf_get ( 
    +    \FFI\CData|null $conf, 
    +    string|null $name, 
    +    \FFI\CData|null $dest, 
    +    \FFI\CData|null $dest_size
    + ): int
    +
    +

    Retrieve configuration value for property name.

    +

    If dest is non-NULL the value will be written to dest with at most dest_size.

    +

    *dest_size is updated to the full length of the value, thus if *dest_size initially is smaller than the full length the application may reallocate dest to fit the returned *dest_size and try again.

    +

    If dest is NULL only the full length of the value is returned.

    +

    Fallthrough: Topic-level configuration properties from the default_topic_conf may be retrieved using this interface.

    + +
    +
    Parameters
    +
    conf \FFI\CData|null const rd_kafka_conf_t*
    +
    name string|null const char*
    +
    dest \FFI\CData|null char*
    +
    dest_size \FFI\CData|null size_t*
    +
    Returns
    +
    int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK if the property name matched, else RD_KAFKA_CONF_UNKNOWN.
    +
    + +

    rd_kafka_conf_get_default_topic_conf()

    +
    public static rd_kafka_conf_get_default_topic_conf ( 
    +    \FFI\CData|null $conf
    + ): \FFI\CData|null
    +
    +

    Gets the default topic configuration as previously set with rd_kafka_conf_set_default_topic_conf() or that was implicitly created by configuring a topic-level property on the global conf object.

    + +
    Warning
    The returned topic configuration object is owned by the conf object. It may be modified but not destroyed and its lifetime is the same as the conf object or the next call to rd_kafka_conf_set_default_topic_conf().
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t* - )
    +
    Returns
    +
    \FFI\CData|null rd_kafka_topic_conf_t* - the conf’s default topic configuration (if any), or NULL.
    +
    + +

    rd_kafka_conf_interceptor_add_on_conf_destroy()

    +
    public static rd_kafka_conf_interceptor_add_on_conf_destroy ( 
    +    \FFI\CData|null $conf, 
    +    string|null $ic_name, 
    +    \FFI\CData|\Closure $on_conf_destroy, 
    +    \FFI\CData|object|string|null $ic_opaque
    + ): int
    +
    +

    Append an on_conf_destroy() interceptor.

    + +
    Remarks
    Multiple on_conf_destroy() interceptors are allowed to be added to the same configuration object.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
    +
    ic_name string|null const char* - Interceptor name, used in logging.
    +
    on_conf_destroy \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t*)(void*) - Function pointer.
    +
    ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR
    +
    + +

    rd_kafka_conf_interceptor_add_on_conf_dup()

    +
    public static rd_kafka_conf_interceptor_add_on_conf_dup ( 
    +    \FFI\CData|null $conf, 
    +    string|null $ic_name, 
    +    \FFI\CData|\Closure $on_conf_dup, 
    +    \FFI\CData|object|string|null $ic_opaque
    + ): int
    +
    +

    Append an on_conf_dup() interceptor.

    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
    +
    ic_name string|null const char* - Interceptor name, used in logging.
    +
    on_conf_dup \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t*)(rd_kafka_conf_t*, const rd_kafka_conf_t*, size_t, const char**, void*) - Function pointer.
    +
    ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
    +
    + +

    rd_kafka_conf_interceptor_add_on_conf_set()

    +
    public static rd_kafka_conf_interceptor_add_on_conf_set ( 
    +    \FFI\CData|null $conf, 
    +    string|null $ic_name, 
    +    \FFI\CData|\Closure $on_conf_set, 
    +    \FFI\CData|object|string|null $ic_opaque
    + ): int
    +
    +

    Append an on_conf_set() interceptor.

    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
    +
    ic_name string|null const char* - Interceptor name, used in logging.
    +
    on_conf_set \FFI\CData|\Closure rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t*)(rd_kafka_conf_t*, const char*, const char*, char*, size_t, void*) - Function pointer.
    +
    ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
    +
    + +

    rd_kafka_conf_interceptor_add_on_new()

    +
    public static rd_kafka_conf_interceptor_add_on_new ( 
    +    \FFI\CData|null $conf, 
    +    string|null $ic_name, 
    +    \FFI\CData|\Closure $on_new, 
    +    \FFI\CData|object|string|null $ic_opaque
    + ): int
    +
    +

    Append an on_new() interceptor.

    + +
    Remarks
    Since the on_new() interceptor is added to the configuration object it may be copied by rd_kafka_conf_dup(). An interceptor implementation must thus be able to handle the same interceptor,ic_opaque tuple to be used by multiple client instances.
    +
    +An interceptor plugin should check the return value to make sure it has not already been added.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
    +
    ic_name string|null const char* - Interceptor name, used in logging.
    +
    on_new \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t*)(rd_kafka_t*, const rd_kafka_conf_t*, void*, char*, size_t) - Function pointer.
    +
    ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
    +
    Returns
    +
    int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
    +
    + +

    rd_kafka_conf_new()

    +
    public static rd_kafka_conf_new (  ): \FFI\CData|null
    +
    +

    Create configuration object.

    +

    When providing your own configuration to the rd_kafka_*_new_*() calls the rd_kafka_conf_t objects needs to be created with this function which will set up the defaults. I.e.:

    +
    rd_kafka_conf_t *myconf;
    +rd_kafka_conf_res_t res;
    +
    +myconf = rd_kafka_conf_new();
    +res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600",
    +                        errstr, sizeof(errstr));
    +if (res != RD_KAFKA_CONF_OK)
    +   die("%s\n", errstr);
    +
    +rk = rd_kafka_new(..., myconf);
    +

    Please see CONFIGURATION.md for the default settings or use rd_kafka_conf_properties_show() to provide the information at runtime.

    +

    The properties are identical to the Apache Kafka configuration properties whenever possible.

    +
    Remarks
    A successful call to rd_kafka_new() will assume ownership of the conf object and rd_kafka_conf_destroy() must not be called.
    + +
    See also
    rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy()
    + +
    +
    Returns
    +
    \FFI\CData|null rd_kafka_conf_t* - ) - A new rd_kafka_conf_t object with defaults set.
    +
    + +

    rd_kafka_conf_properties_show()

    +
    public static rd_kafka_conf_properties_show ( 
    +    \FFI\CData|null $fp
    + ): void
    +
    +

    Prints a table to fp of all supported configuration properties, their default values as well as a description.

    +
    Remarks
    All properties and properties and values are shown, even those that have been disabled at build time due to missing dependencies.
    + +
    +
    Parameters
    +
    fp \FFI\CData|null FILE* - )
    +
    + +

    rd_kafka_conf_set()

    +
    public static rd_kafka_conf_set ( 
    +    \FFI\CData|null $conf, 
    +    string|null $name, 
    +    string|null $value, 
    +    \FFI\CData|null $errstr, 
    +    int|null $errstr_size
    + ): int
    +
    +

    Sets a configuration property.

    +

    conf must have been previously created with rd_kafka_conf_new().

    +

    Fallthrough: Topic-level configuration properties may be set using this interface in which case they are applied on the default_topic_conf. If no default_topic_conf has been set one will be created. Any subsequent rd_kafka_conf_set_default_topic_conf() calls will replace the current default topic configuration.

    + +
    Remarks
    Setting properties or values that were disabled at build time due to missing dependencies will return RD_KAFKA_CONF_INVALID.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    name string|null const char*
    +
    value string|null const char*
    +
    errstr \FFI\CData|null char*
    +
    errstr_size int|null size_t
    +
    Returns
    +
    int rd_kafka_conf_res_t - rd_kafka_conf_res_t to indicate success or failure. In case of failure errstr is updated to contain a human readable error string.
    +
    + +

    rd_kafka_conf_set_background_event_cb()

    +
    public static rd_kafka_conf_set_background_event_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $event_cb
    + ): void
    +
    +

    Generic event callback to be used with the event API to trigger callbacks for rd_kafka_event_t objects from a background thread serving the background queue.

    +

    How to use:

    +
      +
    1. First set the event callback on the configuration object with this function, followed by creating an rd_kafka_t instance with rd_kafka_new().
    2. +
    3. Get the instance's background queue with rd_kafka_queue_get_background() and pass it as the reply/response queue to an API that takes an event queue, such as rd_kafka_CreateTopics().
    4. +
    5. As the response event is ready and enqueued on the background queue the event callback will be triggered from the background thread.
    6. +
    7. Prior to destroying the client instance, loose your reference to the background queue by calling rd_kafka_queue_destroy().
    8. +
    +

    The application must destroy the rkev passed to event cb using rd_kafka_event_destroy().

    +

    The event_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    +
    Remarks
    This callback is a specialized alternative to the poll-based event API described in the Event interface section.
    +
    +The event_cb will be called spontaneously from a background thread completely managed by librdkafka. Take care to perform proper locking of application objects.
    +
    Warning
    The application MUST NOT call rd_kafka_destroy() from the event callback.
    +
    See also
    rd_kafka_queue_get_background
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    event_cb \FFI\CData|\Closure void()(rd_kafka_t, rd_kafka_event_t*, void*)
    +
    + +

    rd_kafka_conf_set_closesocket_cb()

    +
    public static rd_kafka_conf_set_closesocket_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $closesocket_cb
    + ): void
    +
    +

    Set close socket callback.

    +

    Close a socket (optionally opened with socket_cb()).

    +

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    +
    Remarks
    The callback will be called from an internal librdkafka thread.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    closesocket_cb \FFI\CData|\Closure int()(int, void)
    +
    + +

    rd_kafka_conf_set_connect_cb()

    +
    public static rd_kafka_conf_set_connect_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $connect_cb
    + ): void
    +
    +

    Set connect callback.

    +

    The connect callback is responsible for connecting socket sockfd to peer address addr. The id field contains the broker identifier.

    +

    connect_cb shall return 0 on success (socket connected) or an error number (errno) on error.

    +

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    +
    Remarks
    The callback will be called from an internal librdkafka thread.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    connect_cb \FFI\CData|\Closure int()(int, const struct sockaddr, int, const char*, void*)
    +
    + +

    rd_kafka_conf_set_consume_cb()

    +
    public static rd_kafka_conf_set_consume_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $consume_cb
    + ): void
    +
    +

    Consumer: Set consume callback for use with rd_kafka_consumer_poll()

    +

    The consume_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    consume_cb \FFI\CData|\Closure void()(rd_kafka_message_t, void*)
    +
    + +

    rd_kafka_conf_set_default_topic_conf()

    +
    public static rd_kafka_conf_set_default_topic_conf ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|null $tconf
    + ): void
    +
    +

    Sets the default topic configuration to use for automatically subscribed topics (e.g., through pattern-matched topics). The topic config object is not usable after this call.

    +
    Warning
    Any topic configuration settings that have been set on the global rd_kafka_conf_t object will be overwritten by this call since the implicitly created default topic config object is replaced by the user-supplied one.
    +
    Deprecated:
    Set default topic level configuration on the global rd_kafka_conf_t object instead.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    tconf \FFI\CData|null rd_kafka_topic_conf_t*
    +
    + +

    rd_kafka_conf_set_dr_cb()

    +
    public static rd_kafka_conf_set_dr_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $dr_cb
    + ): void
    +
    +
    Deprecated:
    See rd_kafka_conf_set_dr_msg_cb()
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    dr_cb \FFI\CData|\Closure void()(rd_kafka_t, void*, size_t, rd_kafka_resp_err_t, void*, void*)
    +
    + +

    rd_kafka_conf_set_dr_msg_cb()

    +
    public static rd_kafka_conf_set_dr_msg_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $dr_msg_cb
    + ): void
    +
    +

    Producer: Set delivery report callback in provided conf object.

    +

    The delivery report callback will be called once for each message accepted by rd_kafka_produce() (et.al) with err set to indicate the result of the produce request.

    +

    The callback is called when a message is succesfully produced or if librdkafka encountered a permanent failure. Delivery errors occur when the retry count is exceeded, when the message.timeout.ms timeout is exceeded or there is a permanent error like RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART.

    +

    An application must call rd_kafka_poll() at regular intervals to serve queued delivery report callbacks.

    +

    The broker-assigned offset can be retrieved with rkmessage->offset and the timestamp can be retrieved using rd_kafka_message_timestamp().

    +

    The dr_msg_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque(). The per-message msg_opaque value is available in rd_kafka_message_t._private.

    +
    Remarks
    The Idempotent Producer may return invalid timestamp (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and and offset (RD_KAFKA_OFFSET_INVALID) for retried messages that were previously successfully delivered but not properly acknowledged.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    dr_msg_cb \FFI\CData|\Closure void()(rd_kafka_t, const rd_kafka_message_t*, void*)
    +
    + +

    rd_kafka_conf_set_engine_callback_data()

    +
    public static rd_kafka_conf_set_engine_callback_data ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|object|string|null $callback_data
    + ): void
    +
    +

    Set callback_data for OpenSSL engine.

    + +
    Remarks
    The ssl.engine.location configuration must be set for this to have affect.
    +
    +The memory pointed to by value must remain valid for the lifetime of the configuration object and any Kafka clients that use it.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
    +
    callback_data \FFI\CData|object|string|null void* - passed to engine callbacks, e.g. ENGINE_load_ssl_client_cert.
    +
    + +

    rd_kafka_conf_set_error_cb()

    +
    public static rd_kafka_conf_set_error_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $error_cb
    + ): void
    +
    +

    Set error callback in provided conf object.

    +

    The error callback is used by librdkafka to signal warnings and errors back to the application.

    +

    These errors should generally be considered informational and non-permanent, the client will try to recover automatically from all type of errors. Given that the client and cluster configuration is correct the application should treat these as temporary errors.

    +

    error_cb will be triggered with err set to RD_KAFKA_RESP_ERR__FATAL if a fatal error has been raised; in this case use rd_kafka_fatal_error() to retrieve the fatal error code and error string, and then begin terminating the client instance.

    +

    If no error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set with rd_kafka_conf_set_events, then the errors will be logged instead.

    +

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    error_cb \FFI\CData|\Closure void()(rd_kafka_t, int, const char*, void*)
    +
    + +

    rd_kafka_conf_set_events()

    +
    public static rd_kafka_conf_set_events ( 
    +    \FFI\CData|null $conf, 
    +    int|null $events
    + ): void
    +
    +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    events int|null int
    +
    +

    rd_kafka_conf_set_log_cb()

    +
    public static rd_kafka_conf_set_log_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $log_cb
    + ): void
    +
    +

    Set logger callback.

    +

    The default is to print to stderr, but a syslog logger is also available, see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives. Alternatively the application may provide its own logger callback. Or pass func as NULL to disable logging.

    +

    This is the configuration alternative to the deprecated rd_kafka_set_logger()

    +
    Remarks
    The log_cb will be called spontaneously from librdkafka's internal threads unless logs have been forwarded to a poll queue through rd_kafka_set_log_queue(). An application MUST NOT call any librdkafka APIs or do any prolonged work in a non-forwarded log_cb.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    log_cb \FFI\CData|\Closure void()(const rd_kafka_t, int, const char*, const char*)
    +
    + +

    rd_kafka_conf_set_oauthbearer_token_refresh_cb()

    +
    public static rd_kafka_conf_set_oauthbearer_token_refresh_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $oauthbearer_token_refresh_cb
    + ): void
    +
    +

    Set SASL/OAUTHBEARER token refresh callback in provided conf object.

    + +

    The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll() whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved, typically based on the configuration defined in sasl.oauthbearer.config.

    +

    The callback should invoke rd_kafka_oauthbearer_set_token() or rd_kafka_oauthbearer_set_token_failure() to indicate success or failure, respectively.

    +

    The refresh operation is eventable and may be received via rd_kafka_queue_poll() with an event type of RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH.

    +

    Note that before any SASL/OAUTHBEARER broker connection can succeed the application must call rd_kafka_oauthbearer_set_token() once – either directly or, more typically, by invoking either rd_kafka_poll(), rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause retrieval of an initial token to occur.

    +

    Alternatively, the application can enable the SASL queue by calling rd_kafka_conf_enable_sasl_queue() on the configuration object prior to creating the client instance, get the SASL queue with rd_kafka_queue_get_sasl(), and either serve the queue manually by calling rd_kafka_queue_poll(), or redirecting the queue to the background thread to have the queue served automatically. For the latter case the SASL queue must be forwarded to the background queue with rd_kafka_queue_forward(). A convenience function is available to automatically forward the SASL queue to librdkafka's background thread, see rd_kafka_sasl_background_callbacks_enable().

    +

    An unsecured JWT refresh handler is provided by librdkafka for development and testing purposes, it is enabled by setting the enable.sasl.oauthbearer.unsecure.jwt property to true and is mutually exclusive to using a refresh callback.

    +
    See also
    rd_kafka_sasl_background_callbacks_enable()
    +
    +rd_kafka_queue_get_sasl()
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t* - the configuration to mutate.
    +
    oauthbearer_token_refresh_cb \FFI\CData|\Closure void()(rd_kafka_t, const char*, void*) - the callback to set; callback function arguments:
    rk - Kafka handle
    oauthbearer_config - Value of configuration property sasl.oauthbearer.config. opaque - Application-provided opaque set via rd_kafka_conf_set_opaque()
    +
    + +

    rd_kafka_conf_set_offset_commit_cb()

    +
    public static rd_kafka_conf_set_offset_commit_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $offset_commit_cb
    + ): void
    +
    +

    Consumer: Set offset commit callback for use with consumer groups.

    +

    The results of automatic or manual offset commits will be scheduled for this callback and is served by rd_kafka_consumer_poll().

    +

    If no partitions had valid offsets to commit this callback will be called with err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered an error.

    +

    The offsets list contains per-partition information:

    +
      +
    • offset: committed offset (attempted)
    • +
    • err: commit error
    • +
    +

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    offset_commit_cb \FFI\CData|\Closure void()(rd_kafka_t, rd_kafka_resp_err_t, rd_kafka_topic_partition_list_t*, void*)
    +
    + +

    rd_kafka_conf_set_opaque()

    +
    public static rd_kafka_conf_set_opaque ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|object|string|null $opaque
    + ): void
    +
    +

    Sets the application's opaque pointer that will be passed to callbacks.

    +
    See also
    rd_kafka_opaque()
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    opaque \FFI\CData|object|string|null void*
    +
    + +

    rd_kafka_conf_set_rebalance_cb()

    +
    public static rd_kafka_conf_set_rebalance_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $rebalance_cb
    + ): void
    +
    +

    Consumer: Set rebalance callback for use with coordinated consumer group balancing.

    +

    The err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions' contains the full partition set that was either assigned or revoked.

    +

    Registering a rebalance_cb turns off librdkafka's automatic partition assignment/revocation and instead delegates that responsibility to the application's rebalance_cb.

    +

    The rebalance callback is responsible for updating librdkafka's assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle arbitrary rebalancing failures where err is neither of those.

    +
    Remarks
    In this latter case (arbitrary error), the application must call rd_kafka_assign(rk, NULL) to synchronize state.
    +

    For eager/non-cooperative partition.assignment.strategy assignors, such as range and roundrobin, the application must use rd_kafka_assign() to set or clear the entire assignment. For the cooperative assignors, such as cooperative-sticky, the application must use rd_kafka_incremental_assign() for RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign() for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS.

    +

    Without a rebalance callback this is done automatically by librdkafka but registering a rebalance callback gives the application flexibility in performing other operations along with the assigning/revocation, such as fetching offsets from an alternate location (on assign) or manually committing offsets (on revoke).

    +

    rebalance_cb is always triggered exactly once when a rebalance completes with a new assignment, even if that assignment is empty. If an eager/non-cooperative assignor is configured, there will eventually be exactly one corresponding call to rebalance_cb to revoke these partitions (even if empty), whether this is due to a group rebalance or lost partitions. In the cooperative case, rebalance_cb will never be called if the set of partitions being revoked is empty (whether or not lost).

    +

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    +
    Remarks
    The partitions list is destroyed by librdkafka on return return from the rebalance_cb and must not be freed or saved by the application.
    +
    +Be careful when modifying the partitions list. Changing this list should only be done to change the initial offsets for each partition. But a function like rd_kafka_position() might have unexpected effects for instance when a consumer gets assigned a partition it used to consume at an earlier rebalance. In this case, the list of partitions will be updated with the old offset for that partition. In this case, it is generally better to pass a copy of the list (see rd_kafka_topic_partition_list_copy()). The result of rd_kafka_position() is typically outdated in RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS.
    +
    See also
    rd_kafka_assign()
    +
    +rd_kafka_incremental_assign()
    +
    +rd_kafka_incremental_unassign()
    +
    +rd_kafka_assignment_lost()
    +
    +rd_kafka_rebalance_protocol()
    +

    The following example shows the application's responsibilities:

    +
    static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
    +                          rd_kafka_topic_partition_list_t *partitions,
    +                          void *opaque) {
    +
    +switch (err)
    +    {
    +      case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
    +         // application may load offets from arbitrary external
    +         // storage here and update \p partitions
    +         if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
    +                 rd_kafka_incremental_assign(rk, partitions);
    +         else // EAGER
    +                 rd_kafka_assign(rk, partitions);
    +         break;
    +
    +      case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
    +         if (manual_commits) // Optional explicit manual commit
    +             rd_kafka_commit(rk, partitions, 0); // sync commit
    +
    +         if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
    +                 rd_kafka_incremental_unassign(rk, partitions);
    +         else // EAGER
    +                 rd_kafka_assign(rk, NULL);
    +         break;
    +
    +      default:
    +         handle_unlikely_error(err);
    +         rd_kafka_assign(rk, NULL); // sync state
    +         break;
    +     }
    +}
    +
    Remarks
    The above example lacks error handling for assign calls, see the examples/ directory.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    rebalance_cb \FFI\CData|\Closure void()(rd_kafka_t, rd_kafka_resp_err_t, rd_kafka_topic_partition_list_t*, void*)
    +
    + +

    rd_kafka_conf_set_resolve_cb()

    +
    public static rd_kafka_conf_set_resolve_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $resolve_cb
    + ): void
    +
    +

    Set address resolution callback.

    +

    The callback is responsible for resolving the hostname node and the service service into a list of socket addresses as getaddrinfo(3) would. The hints and res parameters function as they do for getaddrinfo(3). The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    +

    If the callback is invoked with a NULL node, service, and hints, the callback should instead free the addrinfo struct specified in res. In this case the callback must succeed; the return value will not be checked by the caller.

    +

    The callback's return value is interpreted as the return value of getaddrinfo(3).

    +
    Remarks
    The callback will be called from an internal librdkafka thread.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    resolve_cb \FFI\CData|\Closure int()(const char, const char*, const struct addrinfo*, struct addrinfo**, void*)
    +
    + +

    rd_kafka_conf_set_socket_cb()

    +
    public static rd_kafka_conf_set_socket_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $socket_cb
    + ): void
    +
    +

    Set socket callback.

    +

    The socket callback is responsible for opening a socket according to the supplied domain, type and protocol. The socket shall be created with CLOEXEC set in a racefree fashion, if possible.

    +

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    +

    Default:

    +
      +
    • on linux: racefree CLOEXEC
    • +
    • others : non-racefree CLOEXEC
    • +
    +
    Remarks
    The callback will be called from an internal librdkafka thread.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    socket_cb \FFI\CData|\Closure int()(int, int, int, void)
    +
    + +

    rd_kafka_conf_set_ssl_cert()

    +
    public static rd_kafka_conf_set_ssl_cert ( 
    +    \FFI\CData|null $conf, 
    +    int $cert_type, 
    +    int $cert_enc, 
    +    \FFI\CData|object|string|null $buffer, 
    +    int|null $size, 
    +    \FFI\CData|null $errstr, 
    +    int|null $errstr_size
    + ): int
    +
    +

    Set certificate/key cert_type from the cert_enc encoded memory at buffer of size bytes.

    + +
    Remarks
    Calling this method multiple times with the same cert_type will replace the previous value.
    +
    +Calling this method with buffer set to NULL will clear the configuration for cert_type.
    +
    +The private key may require a password, which must be specified with the ssl.key.password configuration property prior to calling this function.
    +
    +Private and public keys in PEM format may also be set with the ssl.key.pem and ssl.certificate.pem configuration properties.
    +
    +CA certificate in PEM format may also be set with the ssl.ca.pem configuration property.
    +
    +When librdkafka is linked to OpenSSL 3.0 and the certificate is encoded using an obsolete cipher, it might be necessary to set up an OpenSSL configuration file to load the "legacy" provider and set the OPENSSL_CONF environment variable. See https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more information.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
    +
    cert_type int rd_kafka_cert_type_t - Certificate or key type to configure.
    +
    cert_enc int rd_kafka_cert_enc_t - Buffer encoding type.
    +
    buffer \FFI\CData|object|string|null const void* - Memory pointer to encoded certificate or key. The memory is not referenced after this function returns.
    +
    size int|null size_t - Size of memory at buffer.
    +
    errstr \FFI\CData|null char* - Memory were a human-readable error string will be written on failure.
    +
    errstr_size int|null size_t - Size of errstr, including space for nul-terminator.
    +
    Returns
    +
    int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK on success or RD_KAFKA_CONF_INVALID if the memory in buffer is of incorrect encoding, or if librdkafka was not built with SSL support.
    +
    + +

    rd_kafka_conf_set_ssl_cert_verify_cb()

    +
    public static rd_kafka_conf_set_ssl_cert_verify_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $ssl_cert_verify_cb
    + ): int
    +
    +

    Sets the verification callback of the broker certificate.

    +

    The verification callback is triggered from internal librdkafka threads upon connecting to a broker. On each connection attempt the callback will be called for each certificate in the broker's certificate chain, starting at the root certification, as long as the application callback returns 1 (valid certificate). broker_name and broker_id correspond to the broker the connection is being made to. The x509_error argument indicates if OpenSSL's verification of the certificate succeed (0) or failed (an OpenSSL error code). The application may set the SSL context error code by returning 0 from the verify callback and providing a non-zero SSL context error code in x509_error. If the verify callback sets x509_error to 0, returns 1, and the original x509_error was non-zero, the error on the SSL context will be cleared. x509_error is always a valid pointer to an int.

    +

    depth is the depth of the current certificate in the chain, starting at the root certificate.

    +

    The certificate itself is passed in binary DER format in buf of size size.

    +

    The callback must return 1 if verification succeeds, or 0 if verification fails and then write a human-readable error message to errstr (limited to errstr_size bytes, including nul-term).

    +

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    + +
    Warning
    This callback will be called from internal librdkafka threads.
    +
    Remarks
    See <openssl/x509_vfy.h> in the OpenSSL source distribution for a list of x509_error codes.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    ssl_cert_verify_cb \FFI\CData|\Closure int()(rd_kafka_t, const char*, int32_t, int*, int, const char*, size_t, char*, size_t, void*)
    +
    Returns
    +
    int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK if SSL is supported in this build, else RD_KAFKA_CONF_INVALID.
    +
    + +

    rd_kafka_conf_set_stats_cb()

    +
    public static rd_kafka_conf_set_stats_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $stats_cb
    + ): void
    +
    +

    Set statistics callback in provided conf object.

    +

    The statistics callback is triggered from rd_kafka_poll() every statistics.interval.ms (needs to be configured separately). Function arguments:

    +
      +
    • rk - Kafka handle
    • +
    • json - String containing the statistics data in JSON format
    • +
    • json_len - Length of json string.
    • +
    • opaque - Application-provided opaque as set by rd_kafka_conf_set_opaque().
    • +
    +

    For more information on the format of json, see https://github.com/confluentinc/librdkafka/wiki/Statistics

    +

    If the application wishes to hold on to the json pointer and free it at a later time it must return 1 from the stats_cb. If the application returns 0 from the stats_cb then librdkafka will immediately free the json pointer.

    +

    See STATISTICS.md for a full definition of the JSON object.

    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    stats_cb \FFI\CData|\Closure int()(rd_kafka_t, char*, size_t, void*)
    +
    + +

    rd_kafka_conf_set_throttle_cb()

    +
    public static rd_kafka_conf_set_throttle_cb ( 
    +    \FFI\CData|null $conf, 
    +    \FFI\CData|\Closure $throttle_cb
    + ): void
    +
    +

    Set throttle callback.

    +

    The throttle callback is used to forward broker throttle times to the application for Produce and Fetch (consume) requests.

    +

    Callbacks are triggered whenever a non-zero throttle time is returned by the broker, or when the throttle time drops back to zero.

    +

    An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at regular intervals to serve queued callbacks.

    +

    The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

    +
    Remarks
    Requires broker version 0.9.0 or later.
    + +
    +
    Parameters
    +
    conf \FFI\CData|null rd_kafka_conf_t*
    +
    throttle_cb \FFI\CData|\Closure void()(rd_kafka_t, const char*, int32_t, int, void*)
    +
    + +

    rd_kafka_consume()

    +
    public static rd_kafka_consume ( 
    +    \FFI\CData|null $rkt, 
    +    int|null $partition, 
    +    int|null $timeout_ms
    + ): \FFI\CData|null
    +
    +

    Consume a single message from topic rkt and partition.

    +

    timeout_ms is maximum amount of time to wait for a message to be received. Consumer must have been previously started with rd_kafka_consume_start().

    + +

    Errors (when returning NULL):

    +
      +
    • ETIMEDOUT - timeout_ms was reached with no new messages fetched.
    • +
    • ENOENT - rkt + partition is unknown. (no prior rd_kafka_consume_start() call)
    • +
    +

    NOTE: The returned message's ..->err must be checked for errors. NOTE: ..->err == RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the end of the partition has been reached, which should typically not be considered an error. The application should handle this case (e.g., ignore).

    +
    Remarks
    on_consume() interceptors may be called from this function prior to passing message to application.
    + +
    +
    Parameters
    +
    rkt \FFI\CData|null rd_kafka_topic_t*
    +
    partition int|null int32_t
    +
    timeout_ms int|null int
    +
    Returns
    +
    \FFI\CData|null rd_kafka_message_t* - a message object on success or NULL on error. The message object must be destroyed with rd_kafka_message_destroy() when the application is done with it.
    +
    + +

    rd_kafka_consume_batch()

    +
    public static rd_kafka_consume_batch ( 
    +    \FFI\CData|null $rkt, 
    +    int|null $partition, 
    +    int|null $timeout_ms, 
    +    \FFI\CData|null $rkmessages, 
    +    int|null $rkmessages_size
    + ): int|null
    +
    +

    Consume up to rkmessages_size from topic rkt and partition putting a pointer to each message in the application provided array rkmessages (of size rkmessages_size entries).

    +

    rd_kafka_consume_batch() provides higher throughput performance than rd_kafka_consume().

    +

    timeout_ms is the maximum amount of time to wait for all of rkmessages_size messages to be put into rkmessages. If no messages were available within the timeout period this function returns 0 and rkmessages remains untouched. This differs somewhat from rd_kafka_consume().

    +

    The message objects must be destroyed with rd_kafka_message_destroy() when the application is done with it.

    + +
    See also
    rd_kafka_consume()
    +
    Remarks
    on_consume() interceptors may be called from this function prior to passing message to application.
    + +
    +
    Parameters
    +
    rkt \FFI\CData|null rd_kafka_topic_t*
    +
    partition int|null int32_t
    +
    timeout_ms int|null int
    +
    rkmessages \FFI\CData|null rd_kafka_message_t**
    +
    rkmessages_size int|null size_t
    +
    Returns
    +
    int|null ssize_t - the number of rkmessages added in rkmessages, or -1 on error (same error codes as for rd_kafka_consume().
    +
    + +

    rd_kafka_consume_batch_queue()

    +
    public static rd_kafka_consume_batch_queue ( 
    +    \FFI\CData|null $rkqu, 
    +    int|null $timeout_ms, 
    +    \FFI\CData|null $rkmessages, 
    +    int|null $rkmessages_size
    + ): int|null
    +
    +

    Consume batch of messages from queue.

    +
    See also
    rd_kafka_consume_batch()
    + +
    +
    Parameters
    +
    rkqu \FFI\CData|null rd_kafka_queue_t*
    +
    timeout_ms int|null int
    +
    rkmessages \FFI\CData|null rd_kafka_message_t**
    +
    rkmessages_size int|null size_t
    +
    Returns
    +
    int|null ssize_t
    +
    + +

    rd_kafka_consume_callback()

    +
    public static rd_kafka_consume_callback ( 
    +    \FFI\CData|null $rkt, 
    +    int|null $partition, 
    +    int|null $timeout_ms, 
    +    \FFI\CData|\Closure $consume_cb, 
    +    \FFI\CData|object|string|null $opaque
    + ): int|null
    +
    +

    Consumes messages from topic rkt and partition, calling the provided callback for each consumed messsage.

    +

    rd_kafka_consume_callback() provides higher throughput performance than both rd_kafka_consume() and rd_kafka_consume_batch().

    +

    timeout_ms is the maximum amount of time to wait for one or more messages to arrive.

    +

    The provided consume_cb function is called for each message, the application MUST NOT call rd_kafka_message_destroy() on the provided rkmessage.

    +

    The commit_opaque argument is passed to the consume_cb as commit_opaque.

    + +
    See also
    rd_kafka_consume()
    +
    Remarks
    on_consume() interceptors may be called from this function prior to passing message to application.
    +
    +This function will return early if a transaction control message is received, these messages are not exposed to the application but still enqueued on the consumer queue to make sure their offsets are stored.
    +
    Deprecated:
    This API is deprecated and subject for future removal. There is no new callback-based consume interface, use the poll/queue based alternatives.
    + +
    +
    Parameters
    +
    rkt \FFI\CData|null rd_kafka_topic_t*
    +
    partition int|null int32_t
    +
    timeout_ms int|null int
    +
    consume_cb \FFI\CData|\Closure void()(rd_kafka_message_t, void*)
    +
    opaque \FFI\CData|object|string|null void*
    +
    Returns
    +
    int|null int - the number of messages processed or -1 on error.
    +
    + +

    rd_kafka_consume_callback_queue()

    +
    public static rd_kafka_consume_callback_queue ( 
    +    \FFI\CData|null $rkqu, 
    +    int|null $timeout_ms, 
    +    \FFI\CData|\Closure $consume_cb, 
    +    \FFI\CData|object|string|null $opaque
    + ): int|null
    +
    +

    Consume multiple messages from queue with callback.

    +
    See also
    rd_kafka_consume_callback()
    +
    Deprecated:
    This API is deprecated and subject for future removal. There is no new callback-based consume interface, use the poll/queue based alternatives.
    + +
    +
    Parameters
    +
    rkqu \FFI\CData|null rd_kafka_queue_t*
    +
    timeout_ms int|null int
    +
    consume_cb \FFI\CData|\Closure void()(rd_kafka_message_t, void*)
    +
    opaque \FFI\CData|object|string|null void*
    +
    Returns
    +
    int|null int
    +
    + +

    rd_kafka_consume_queue()

    +
    public static rd_kafka_consume_queue ( 
    +    \FFI\CData|null $rkqu, 
    +    int|null $timeout_ms
    + ): \FFI\CData|null
    +
    +

    Consume from queue.

    +
    See also
    rd_kafka_consume()
    + +
    +
    Parameters
    +
    rkqu \FFI\CData|null rd_kafka_queue_t*
    +
    timeout_ms int|null int
    +
    Returns
    +
    \FFI\CData|null rd_kafka_message_t*
    +
    + +

    rd_kafka_consume_start()

    +
    public static rd_kafka_consume_start ( 
    +    \FFI\CData|null $rkt, 
    +    int|null $partition, 
    +    int|null $offset
    + ): int|null
    +
    +

    Start consuming messages for topic rkt and partition at offset offset which may either be an absolute (0..N) or one of the logical offsets:

    +
      +
    • RD_KAFKA_OFFSET_BEGINNING
    • +
    • RD_KAFKA_OFFSET_END
    • +
    • RD_KAFKA_OFFSET_STORED
    • +
    • RD_KAFKA_OFFSET_TAIL
    • +
    +

    rdkafka will attempt to keep queued.min.messages (config property) messages in the local queue by repeatedly fetching batches of messages from the broker until the threshold is reached.

    +

    The application shall use one of the rd_kafka_consume*() functions to consume messages from the local queue, each kafka message being represented as a rd_kafka_message_t * object.

    +

    rd_kafka_consume_start() must not be called multiple times for the same topic and partition without stopping consumption first with rd_kafka_consume_stop().

    + +

    Use rd_kafka_errno2err() to convert sytem errno to rd_kafka_resp_err_t

    + +
    +
    Parameters
    +
    rkt \FFI\CData|null rd_kafka_topic_t*
    +
    partition int|null int32_t
    +
    offset int|null int64_t
    +
    Returns
    +
    int|null int - 0 on success or -1 on error in which case errno is set accordingly:
      +
      +
    • EBUSY - Conflicts with an existing or previous subscription (RD_KAFKA_RESP_ERR__CONFLICT)
    • +
    • EINVAL - Invalid offset, or incomplete configuration (lacking group.id) (RD_KAFKA_RESP_ERR__INVALID_ARG)
    • +
    • ESRCH - requested partition is invalid. (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
    • +
    • ENOENT - topic is unknown in the Kafka cluster. (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
    • + + +

      rd_kafka_consume_start_queue()

      +
      public static rd_kafka_consume_start_queue ( 
      +    \FFI\CData|null $rkt, 
      +    int|null $partition, 
      +    int|null $offset, 
      +    \FFI\CData|null $rkqu
      + ): int|null
      +
      +

      Same as rd_kafka_consume_start() but re-routes incoming messages to the provided queue rkqu (which must have been previously allocated with rd_kafka_queue_new().

      +

      The application must use one of the rd_kafka_consume_*_queue() functions to receive fetched messages.

      +

      rd_kafka_consume_start_queue() must not be called multiple times for the same topic and partition without stopping consumption first with rd_kafka_consume_stop(). rd_kafka_consume_start() and rd_kafka_consume_start_queue() must not be combined for the same topic and partition.

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null rd_kafka_topic_t*
      +
      partition int|null int32_t
      +
      offset int|null int64_t
      +
      rkqu \FFI\CData|null rd_kafka_queue_t*
      +
      Returns
      +
      int|null int
      +
      + +

      rd_kafka_consume_stop()

      +
      public static rd_kafka_consume_stop ( 
      +    \FFI\CData|null $rkt, 
      +    int|null $partition
      + ): int|null
      +
      +

      Stop consuming messages for topic rkt and partition, purging all messages currently in the local queue.

      +

      NOTE: To enforce synchronisation this call will block until the internal fetcher has terminated and offsets are committed to configured storage method.

      +

      The application needs to be stop all consumers before calling rd_kafka_destroy() on the main object handle.

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null rd_kafka_topic_t*
      +
      partition int|null int32_t
      +
      Returns
      +
      int|null int - 0 on success or -1 on error (see errno).
      +
      + +

      rd_kafka_consumer_close()

      +
      public static rd_kafka_consumer_close ( 
      +    \FFI\CData|null $rk
      + ): int
      +
      +

      Close the consumer.

      +

      This call will block until the consumer has revoked its assignment, calling the rebalance_cb if it is configured, committed offsets to broker, and left the consumer group (if applicable). The maximum blocking time is roughly limited to session.timeout.ms.

      + +
      Remarks
      The application still needs to call rd_kafka_destroy() after this call finishes to clean up the underlying handle resources.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - )
      +
      Returns
      +
      int rd_kafka_resp_err_t - An error code indicating if the consumer close was succesful or not. RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised a fatal error.
      +
      + +

      rd_kafka_consumer_close_queue()

      +
      public static rd_kafka_consumer_close_queue ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $rkqu
      + ): \FFI\CData|null
      +
      +

      Asynchronously close the consumer.

      +

      Performs the same actions as rd_kafka_consumer_close() but in a background thread.

      +

      Rebalance events/callbacks (etc) will be forwarded to the application-provided rkqu. The application must poll/serve this queue until rd_kafka_consumer_closed() returns true.

      +
      Remarks
      Depending on consumer group join state there may or may not be rebalance events emitted on rkqu.
      + +
      See also
      rd_kafka_consumer_closed()
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      rkqu \FFI\CData|null rd_kafka_queue_t*
      +
      Returns
      +
      \FFI\CData|null rd_kafka_error_t* - an error object if the consumer close failed, else NULL.
      +
      + +

      rd_kafka_consumer_closed()

      +
      public static rd_kafka_consumer_closed ( 
      +    \FFI\CData|null $rk
      + ): int|null
      +
      +

      Should be used in conjunction with rd_kafka_consumer_close_queue() to know when the consumer has been closed.

      +
      See also
      rd_kafka_consumer_close_queue()
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - )
      +
      Returns
      +
      int|null int - 1 if the consumer is closed, else 0.
      +
      + +

      rd_kafka_consumer_group_metadata()

      +
      public static rd_kafka_consumer_group_metadata ( 
      +    \FFI\CData|null $rk
      + ): \FFI\CData|null
      +
      +
      Remarks
      The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
      +
      See also
      rd_kafka_send_offsets_to_transaction()
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - )
      +
      Returns
      +
      \FFI\CData|null rd_kafka_consumer_group_metadata_t* - the current consumer group metadata associated with this consumer, or NULL if rk is not a consumer configured with a group.id. This metadata object should be passed to the transactional producer’s rd_kafka_send_offsets_to_transaction() API.
      +
      + +

      rd_kafka_consumer_group_metadata_destroy()

      +
      public static rd_kafka_consumer_group_metadata_destroy ( 
      +    \FFI\CData|null $arg0
      + ): void
      +
      +
      +
      Parameters
      +
      arg0 \FFI\CData|null rd_kafka_consumer_group_metadata_t*
      +
      +

      rd_kafka_consumer_group_metadata_new()

      +
      public static rd_kafka_consumer_group_metadata_new ( 
      +    string|null $group_id
      + ): \FFI\CData|null
      +
      +

      Create a new consumer group metadata object. This is typically only used for writing tests.

      + +
      Remarks
      The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
      + +
      +
      Parameters
      +
      group_id string|null const char* - ) - The group id.
      +
      Returns
      +
      \FFI\CData|null rd_kafka_consumer_group_metadata_t*
      +
      + +

      rd_kafka_consumer_group_metadata_new_with_genid()

      +
      public static rd_kafka_consumer_group_metadata_new_with_genid ( 
      +    string|null $group_id, 
      +    int|null $generation_id, 
      +    string|null $member_id, 
      +    string|null $group_instance_id
      + ): \FFI\CData|null
      +
      +

      Create a new consumer group metadata object. This is typically only used for writing tests.

      + +
      Remarks
      The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
      + +
      +
      Parameters
      +
      group_id string|null const char* - The group id.
      +
      generation_id int|null int32_t - The group generation id.
      +
      member_id string|null const char* - The group member id.
      +
      group_instance_id string|null const char* - The group instance id (may be NULL).
      +
      Returns
      +
      \FFI\CData|null rd_kafka_consumer_group_metadata_t*
      +
      + +

      rd_kafka_consumer_group_metadata_read()

      +
      public static rd_kafka_consumer_group_metadata_read ( 
      +    \FFI\CData|null $cgmdp, 
      +    \FFI\CData|object|string|null $buffer, 
      +    int|null $size
      + ): \FFI\CData|null
      +
      +

      Reads serialized consumer group metadata and returns a consumer group metadata object. This is mainly for client binding use and not for application use.

      +
      Remarks
      The serialized metadata format is private and is not compatible across different versions or even builds of librdkafka. It should only be used in the same process runtime and must only be passed to rd_kafka_consumer_group_metadata_read().
      + +
      See also
      rd_kafka_consumer_group_metadata_write()
      + +
      +
      Parameters
      +
      cgmdp \FFI\CData|null rd_kafka_consumer_group_metadata_t** - On success this pointer will be updated to point to a new consumer group metadata object which must be freed with rd_kafka_consumer_group_metadata_destroy().
      +
      buffer \FFI\CData|object|string|null const void* - Pointer to the serialized data.
      +
      size int|null size_t - Size of the serialized data.
      +
      Returns
      +
      \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure.
      +
      + +

      rd_kafka_consumer_group_metadata_write()

      +
      public static rd_kafka_consumer_group_metadata_write ( 
      +    \FFI\CData|null $cgmd, 
      +    \FFI\CData|object|string|null $bufferp, 
      +    \FFI\CData|null $sizep
      + ): \FFI\CData|null
      +
      +

      Serialize the consumer group metadata to a binary format. This is mainly for client binding use and not for application use.

      +
      Remarks
      The serialized metadata format is private and is not compatible across different versions or even builds of librdkafka. It should only be used in the same process runtime and must only be passed to rd_kafka_consumer_group_metadata_read().
      + +
      See also
      rd_kafka_consumer_group_metadata_read()
      + +
      +
      Parameters
      +
      cgmd \FFI\CData|null const rd_kafka_consumer_group_metadata_t* - Metadata to be serialized.
      +
      bufferp \FFI\CData|object|string|null void** - On success this pointer will be updated to point to na allocated buffer containing the serialized metadata. The buffer must be freed with rd_kafka_mem_free().
      +
      sizep \FFI\CData|null size_t* - The pointed to size will be updated with the size of the serialized buffer.
      +
      Returns
      +
      \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure.
      +
      + +

      rd_kafka_consumer_group_state_code()

      +
      public static rd_kafka_consumer_group_state_code ( 
      +    string|null $name
      + ): int
      +
      +

      Returns a code for a state name.

      + +
      +
      Parameters
      +
      name string|null const char* - ) - The state name.
      +
      Returns
      +
      int rd_kafka_consumer_group_state_t - The group state value corresponding to the provided group state name.
      +
      + +

      rd_kafka_consumer_group_state_name()

      +
      public static rd_kafka_consumer_group_state_name ( 
      +    int $state
      + ): string|null
      +
      +

      Returns a name for a state code.

      + +
      +
      Parameters
      +
      state int rd_kafka_consumer_group_state_t - ) - The state value.
      +
      Returns
      +
      string|null const char* - The group state name corresponding to the provided group state value.
      +
      + +

      rd_kafka_consumer_poll()

      +
      public static rd_kafka_consumer_poll ( 
      +    \FFI\CData|null $rk, 
      +    int|null $timeout_ms
      + ): \FFI\CData|null
      +
      +

      Poll the consumer for messages or events.

      +

      Will block for at most timeout_ms milliseconds.

      +
      Remarks
      An application should make sure to call consumer_poll() at regular intervals, even if no messages are expected, to serve any queued callbacks waiting to be called. This is especially important when a rebalance_cb has been registered as it needs to be called and handled properly to synchronize internal consumer state.
      + +
      Remarks
      on_consume() interceptors may be called from this function prior to passing message to application.
      +
      +When subscribing to topics the application must call poll at least every max.poll.interval.ms to remain a member of the consumer group.
      +

      Noteworthy errors returned in ->err:

      +
        +
      • RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED - application failed to call poll within max.poll.interval.ms.
      • +
      +
      See also
      rd_kafka_message_t
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      timeout_ms int|null int
      +
      Returns
      +
      \FFI\CData|null rd_kafka_message_t* - A message object which is a proper message if ->err is RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other value.
      +
      + +

      rd_kafka_controllerid()

      +
      public static rd_kafka_controllerid ( 
      +    \FFI\CData|null $rk, 
      +    int|null $timeout_ms
      + ): int|null
      +
      +

      Returns the current ControllerId as reported in broker metadata.

      + +
      Remarks
      Requires broker version >=0.10.0 and api.version.request=true.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      timeout_ms int|null int - If there is no cached value from metadata retrieval then this specifies the maximum amount of time (in milliseconds) the call will block waiting for metadata to be retrieved. Use 0 for non-blocking calls.
      +
      Returns
      +
      int|null int32_t - the controller broker id (>= 0), or -1 if no ControllerId could be retrieved in the allotted timespan.
      +
      + +

      rd_kafka_default_topic_conf_dup()

      +
      public static rd_kafka_default_topic_conf_dup ( 
      +    \FFI\CData|null $rk
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      Returns
      +
      \FFI\CData|null rd_kafka_topic_conf_t*
      +
      +

      rd_kafka_destroy()

      +
      public static rd_kafka_destroy ( 
      +    \FFI\CData|null $rk
      + ): void
      +
      +

      Destroy Kafka handle.

      +
      Remarks
      This is a blocking operation.
      +
      +rd_kafka_consumer_close() will be called from this function if the instance type is RD_KAFKA_CONSUMER, a group.id was configured, and the rd_kafka_consumer_close() was not explicitly called by the application. This in turn may trigger consumer callbacks, such as rebalance_cb. Use rd_kafka_destroy_flags() with RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE to avoid this behaviour.
      +
      See also
      rd_kafka_destroy_flags()
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - )
      +
      + +

      rd_kafka_destroy_flags()

      +
      public static rd_kafka_destroy_flags ( 
      +    \FFI\CData|null $rk, 
      +    int|null $flags
      + ): void
      +
      +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      flags int|null int
      +
      +

      rd_kafka_dump()

      +
      public static rd_kafka_dump ( 
      +    \FFI\CData|null $fp, 
      +    \FFI\CData|null $rk
      + ): void
      +
      +

      Dumps rdkafka's internal state for handle rk to stream fp.

      +

      This is only useful for debugging rdkafka, showing state and statistics for brokers, topics, partitions, etc.

      + +
      +
      Parameters
      +
      fp \FFI\CData|null FILE*
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      + +

      rd_kafka_err2name()

      +
      public static rd_kafka_err2name ( 
      +    int $err
      + ): string|null
      +
      +

      Returns the error code name (enum name).

      + +
      +
      Parameters
      +
      err int rd_kafka_resp_err_t - ) - Error code to translate
      +
      Returns
      +
      string|null const char*
      +
      + +

      rd_kafka_err2str()

      +
      public static rd_kafka_err2str ( 
      +    int $err
      + ): string|null
      +
      +

      Returns a human readable representation of a kafka error.

      + +
      +
      Parameters
      +
      err int rd_kafka_resp_err_t - ) - Error code to translate
      +
      Returns
      +
      string|null const char*
      +
      + +

      rd_kafka_errno()

      +
      public static rd_kafka_errno (  ): int|null
      +
      +

      Returns the thread-local system errno.

      +

      On most platforms this is the same as errno but in case of different runtimes between library and application (e.g., Windows static DLLs) this provides a means for exposing the errno librdkafka uses.

      +
      Remarks
      The value is local to the current calling thread.
      +
      Deprecated:
      Use rd_kafka_last_error() to retrieve the last error code set by the legacy librdkafka APIs.
      + +
      +
      Returns
      +
      int|null int - )
      +
      + +

      rd_kafka_errno2err()

      +
      public static rd_kafka_errno2err ( 
      +    int|null $errnox
      + ): int
      +
      +

      Converts the system errno value errnox to a rd_kafka_resp_err_t error code upon failure from the following functions:

      +
        +
      • rd_kafka_topic_new()
      • +
      • rd_kafka_consume_start()
      • +
      • rd_kafka_consume_stop()
      • +
      • rd_kafka_consume()
      • +
      • rd_kafka_consume_batch()
      • +
      • rd_kafka_consume_callback()
      • +
      • rd_kafka_consume_queue()
      • +
      • rd_kafka_produce()
      • +
      + +
      Remarks
      A better alternative is to call rd_kafka_last_error() immediately after any of the above functions return -1 or NULL.
      +
      Deprecated:
      Use rd_kafka_last_error() to retrieve the last error code set by the legacy librdkafka APIs.
      +
      See also
      rd_kafka_last_error()
      + +
      +
      Parameters
      +
      errnox int|null int - ) - System errno value to convert
      +
      Returns
      +
      int rd_kafka_resp_err_t - Appropriate error code for errnox
      +
      + +

      rd_kafka_error_code()

      +
      public static rd_kafka_error_code ( 
      +    \FFI\CData|null $error
      + ): int
      +
      +
      +
      Parameters
      +
      error \FFI\CData|null const rd_kafka_error_t* - )
      +
      Returns
      +
      int rd_kafka_resp_err_t - the error code for error or RD_KAFKA_RESP_ERR_NO_ERROR if error is NULL.
      +
      + +

      rd_kafka_error_destroy()

      +
      public static rd_kafka_error_destroy ( 
      +    \FFI\CData|null $error
      + ): void
      +
      +

      Free and destroy an error object.

      +
      Remarks
      As a conveniance it is permitted to pass a NULL error.
      + +
      +
      Parameters
      +
      error \FFI\CData|null rd_kafka_error_t* - )
      +
      + +

      rd_kafka_error_is_fatal()

      +
      public static rd_kafka_error_is_fatal ( 
      +    \FFI\CData|null $error
      + ): int|null
      +
      +
      +
      Parameters
      +
      error \FFI\CData|null const rd_kafka_error_t* - )
      +
      Returns
      +
      int|null int - 1 if the error is a fatal error, indicating that the client instance is no longer usable, else 0 (also if error is NULL).
      +
      + +

      rd_kafka_error_is_retriable()

      +
      public static rd_kafka_error_is_retriable ( 
      +    \FFI\CData|null $error
      + ): int|null
      +
      +
      +
      Parameters
      +
      error \FFI\CData|null const rd_kafka_error_t* - )
      +
      Returns
      +
      int|null int - 1 if the operation may be retried, else 0 (also if error is NULL).
      +
      + +

      rd_kafka_error_name()

      +
      public static rd_kafka_error_name ( 
      +    \FFI\CData|null $error
      + ): string|null
      +
      +
      Remarks
      The lifetime of the returned pointer is the same as the error object.
      +
      See also
      rd_kafka_err2name()
      + +
      +
      Parameters
      +
      error \FFI\CData|null const rd_kafka_error_t* - )
      +
      Returns
      +
      string|null const char* - the error code name for error, e.g, “ERR_UNKNOWN_MEMBER_ID”, or an empty string if error is NULL.
      +
      + +

      rd_kafka_error_new()

      +
      public static rd_kafka_error_new ( 
      +    int $code, 
      +    string|null $fmt, 
      +    mixed $args
      + ): \FFI\CData|null
      +
      +

      Create a new error object with error code and optional human readable error string in fmt.

      +

      This method is mainly to be used for mocking errors in application test code.

      +

      The returned object must be destroyed with rd_kafka_error_destroy().

      + +
      +
      Parameters
      +
      code int rd_kafka_resp_err_t
      +
      fmt string|null const char*
      +
      args mixed
      +
      Returns
      +
      \FFI\CData|null rd_kafka_error_t*
      +
      + +

      rd_kafka_error_string()

      +
      public static rd_kafka_error_string ( 
      +    \FFI\CData|null $error
      + ): string|null
      +
      +
      Remarks
      The lifetime of the returned pointer is the same as the error object.
      + +
      +
      Parameters
      +
      error \FFI\CData|null const rd_kafka_error_t* - )
      +
      Returns
      +
      string|null const char* - a human readable error string for error, or an empty string if error is NULL.
      +
      + +

      rd_kafka_error_txn_requires_abort()

      +
      public static rd_kafka_error_txn_requires_abort ( 
      +    \FFI\CData|null $error
      + ): int|null
      +
      +
      Remarks
      The return value of this method is only valid for errors returned by the transactional API.
      + +
      +
      Parameters
      +
      error \FFI\CData|null const rd_kafka_error_t* - )
      +
      Returns
      +
      int|null int - 1 if the error is an abortable transaction error in which case the application must call rd_kafka_abort_transaction() and start a new transaction with rd_kafka_begin_transaction() if it wishes to proceed with transactions. Else returns 0 (also if error is NULL).
      +
      + +

      rd_kafka_event_AlterConfigs_result()

      +
      public static rd_kafka_event_AlterConfigs_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get AlterConfigs result.

      + +

      Event types: RD_KAFKA_EVENT_ALTERCONFIGS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_AlterConfigs_result_t* - the result of a AlterConfigs request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_AlterConsumerGroupOffsets_result()

      +
      public static rd_kafka_event_AlterConsumerGroupOffsets_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get AlterConsumerGroupOffsets result.

      + +
      Remarks
      The lifetime of the returned memory is the same as the lifetime of the rkev object.
      +

      Event types: RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_AlterConsumerGroupOffsets_result_t* - the result of a AlterConsumerGroupOffsets request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_AlterUserScramCredentials_result()

      +
      public static rd_kafka_event_AlterUserScramCredentials_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get AlterUserScramCredentials result.

      + +
      Remarks
      The lifetime of the returned memory is the same as the lifetime of the rkev object.
      +

      Event types: RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_t* - the result of a AlterUserScramCredentials request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_CreateAcls_result()

      +
      public static rd_kafka_event_CreateAcls_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Event types: RD_KAFKA_EVENT_CREATEACLS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_CreateAcls_result_t* - the result of a CreateAcls request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_CreatePartitions_result()

      +
      public static rd_kafka_event_CreatePartitions_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get CreatePartitions result.

      + +

      Event types: RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_CreatePartitions_result_t* - the result of a CreatePartitions request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_CreateTopics_result()

      +
      public static rd_kafka_event_CreateTopics_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get CreateTopics result.

      + +

      Event types: RD_KAFKA_EVENT_CREATETOPICS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_CreateTopics_result_t* - the result of a CreateTopics request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DeleteAcls_result()

      +
      public static rd_kafka_event_DeleteAcls_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Event types: RD_KAFKA_EVENT_DELETEACLS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DeleteAcls_result_t* - the result of a DeleteAcls request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DeleteConsumerGroupOffsets_result()

      +
      public static rd_kafka_event_DeleteConsumerGroupOffsets_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get DeleteConsumerGroupOffsets result.

      + +

      Event types: RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DeleteConsumerGroupOffsets_result_t* - the result of a DeleteConsumerGroupOffsets request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DeleteGroups_result()

      +
      public static rd_kafka_event_DeleteGroups_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get DeleteGroups result.

      + +

      Event types: RD_KAFKA_EVENT_DELETEGROUPS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DeleteGroups_result_t* - the result of a DeleteGroups request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DeleteRecords_result()

      +
      public static rd_kafka_event_DeleteRecords_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Event types: RD_KAFKA_EVENT_DELETERECORDS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DeleteRecords_result_t* - the result of a DeleteRecords request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DeleteTopics_result()

      +
      public static rd_kafka_event_DeleteTopics_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get DeleteTopics result.

      + +

      Event types: RD_KAFKA_EVENT_DELETETOPICS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DeleteTopics_result_t* - the result of a DeleteTopics request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DescribeAcls_result()

      +
      public static rd_kafka_event_DescribeAcls_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Event types: RD_KAFKA_EVENT_DESCRIBEACLS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DescribeAcls_result_t* - the result of a DescribeAcls request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DescribeCluster_result()

      +
      public static rd_kafka_event_DescribeCluster_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t*
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
      +
      +

      rd_kafka_event_DescribeConfigs_result()

      +
      public static rd_kafka_event_DescribeConfigs_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get DescribeConfigs result.

      + +

      Event types: RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DescribeConfigs_result_t* - the result of a DescribeConfigs request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DescribeConsumerGroups_result()

      +
      public static rd_kafka_event_DescribeConsumerGroups_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get DescribeConsumerGroups result.

      + +
      Remarks
      The lifetime of the returned memory is the same as the lifetime of the rkev object.
      +

      Event types: RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DescribeConsumerGroups_result_t* - the result of a DescribeConsumerGroups request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_DescribeTopics_result()

      +
      public static rd_kafka_event_DescribeTopics_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t*
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DescribeTopics_result_t*
      +
      +

      rd_kafka_event_DescribeUserScramCredentials_result()

      +
      public static rd_kafka_event_DescribeUserScramCredentials_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get DescribeUserScramCredentials result.

      + +
      Remarks
      The lifetime of the returned memory is the same as the lifetime of the rkev object.
      +

      Event types: RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_DescribeUserScramCredentials_result_t* - the result of a DescribeUserScramCredentials request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_IncrementalAlterConfigs_result()

      +
      public static rd_kafka_event_IncrementalAlterConfigs_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get IncrementalAlterConfigs result.

      + +

      Event types: RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_IncrementalAlterConfigs_result_t* - the result of a IncrementalAlterConfigs request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_ListConsumerGroupOffsets_result()

      +
      public static rd_kafka_event_ListConsumerGroupOffsets_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get ListConsumerGroupOffsets result.

      + +
      Remarks
      The lifetime of the returned memory is the same as the lifetime of the rkev object.
      +

      Event types: RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_ListConsumerGroupOffsets_result_t* - the result of a ListConsumerGroupOffsets request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_ListConsumerGroups_result()

      +
      public static rd_kafka_event_ListConsumerGroups_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Get ListConsumerGroups result.

      + +
      Remarks
      The lifetime of the returned memory is the same as the lifetime of the rkev object.
      +

      Event types: RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_ListConsumerGroups_result_t* - the result of a ListConsumerGroups request, or NULL if event is of different type.
      +
      + +

      rd_kafka_event_ListOffsets_result()

      +
      public static rd_kafka_event_ListOffsets_result ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t*
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_ListOffsets_result_t*
      +
      +

      rd_kafka_event_config_string()

      +
      public static rd_kafka_event_config_string ( 
      +    \FFI\CData|null $rkev
      + ): string|null
      +
      +

      The returned memory is read-only and its lifetime is the same as the event object.

      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config
      • +
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      string|null const char* - the associated configuration string for the event, or NULL if the configuration property is not set or if not applicable for the given event type.
      +
      + +

      rd_kafka_event_debug_contexts()

      +
      public static rd_kafka_event_debug_contexts ( 
      +    \FFI\CData|null $rkev, 
      +    \FFI\CData|null $dst, 
      +    int|null $dstsize
      + ): int|null
      +
      +

      Extract log debug context from event.

      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_LOG
      • +
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - the event to extract data from.
      +
      dst \FFI\CData|null char* - destination string for comma separated list.
      +
      dstsize int|null size_t - size of provided dst buffer.
      +
      Returns
      +
      int|null int - 0 on success or -1 if unsupported event type.
      +
      + +

      rd_kafka_event_destroy()

      +
      public static rd_kafka_event_destroy ( 
      +    \FFI\CData|null $rkev
      + ): void
      +
      +

      Destroy an event.

      +
      Remarks
      Any references to this event, such as extracted messages, will not be usable after this call.
      +
      +As a convenience it is okay to pass rkev as NULL in which case no action is performed.
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      + +

      rd_kafka_event_error()

      +
      public static rd_kafka_event_error ( 
      +    \FFI\CData|null $rkev
      + ): int
      +
      +

      Use rd_kafka_event_error_is_fatal() to detect if this is a fatal error.

      +

      Event types:

      +
        +
      • all
      • +
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      int rd_kafka_resp_err_t - the error code for the event.
      +
      + +

      rd_kafka_event_error_is_fatal()

      +
      public static rd_kafka_event_error_is_fatal ( 
      +    \FFI\CData|null $rkev
      + ): int|null
      +
      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_ERROR
      • +
      +
      See also
      rd_kafka_fatal_error()
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      int|null int - 1 if the error is a fatal error, else 0.
      +
      + +

      rd_kafka_event_error_string()

      +
      public static rd_kafka_event_error_string ( 
      +    \FFI\CData|null $rkev
      + ): string|null
      +
      +

      Event types:

      +
        +
      • all
      • +
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      string|null const char* - the error string (if any). An application should check that rd_kafka_event_error() returns non-zero before calling this function.
      +
      + +

      rd_kafka_event_log()

      +
      public static rd_kafka_event_log ( 
      +    \FFI\CData|null $rkev, 
      +    \FFI\CData|null $fac, 
      +    \FFI\CData|null $str, 
      +    \FFI\CData|null $level
      + ): int|null
      +
      +

      Extract log message from the event.

      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_LOG
      • +
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t*
      +
      fac \FFI\CData|null const char**
      +
      str \FFI\CData|null const char**
      +
      level \FFI\CData|null int*
      +
      Returns
      +
      int|null int - 0 on success or -1 if unsupported event type.
      +
      + +

      rd_kafka_event_message_array()

      +
      public static rd_kafka_event_message_array ( 
      +    \FFI\CData|null $rkev, 
      +    \FFI\CData|null $rkmessages, 
      +    int|null $size
      + ): int|null
      +
      +

      Extacts size message(s) from the event into the pre-allocated array rkmessages.

      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_FETCH (1 message)
      • +
      • RD_KAFKA_EVENT_DR (>=1 message(s))
      • +
      + +
      Remarks
      on_consume() interceptor may be called from this function prior to passing message to application.
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t*
      +
      rkmessages \FFI\CData|null const rd_kafka_message_t**
      +
      size int|null size_t
      +
      Returns
      +
      int|null size_t - the number of messages extracted.
      +
      + +

      rd_kafka_event_message_count()

      +
      public static rd_kafka_event_message_count ( 
      +    \FFI\CData|null $rkev
      + ): int|null
      +
      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_FETCH (1 message)
      • +
      • RD_KAFKA_EVENT_DR (>=1 message(s))
      • +
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      int|null size_t - the number of remaining messages in the event.
      +
      + +

      rd_kafka_event_message_next()

      +
      public static rd_kafka_event_message_next ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +

      Call repeatedly until it returns NULL.

      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_FETCH (1 message)
      • +
      • RD_KAFKA_EVENT_DR (>=1 message(s))
      • +
      +
      Remarks
      The returned message(s) MUST NOT be freed with rd_kafka_message_destroy().
      +
      +on_consume() interceptor may be called from this function prior to passing message to application.
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_message_t* - the next message from an event.
      +
      + +

      rd_kafka_event_name()

      +
      public static rd_kafka_event_name ( 
      +    \FFI\CData|null $rkev
      + ): string|null
      +
      +
      Remarks
      As a convenience it is okay to pass rkev as NULL in which case the name for RD_KAFKA_EVENT_NONE is returned.
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null const rd_kafka_event_t* - )
      +
      Returns
      +
      string|null const char* - the event type’s name for the given event.
      +
      + +

      rd_kafka_event_opaque()

      +
      public static rd_kafka_event_opaque ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|object|string|null
      +
      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_OFFSET_COMMIT
      • +
      • RD_KAFKA_EVENT_CREATETOPICS_RESULT
      • +
      • RD_KAFKA_EVENT_DELETETOPICS_RESULT
      • +
      • RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
      • +
      • RD_KAFKA_EVENT_CREATEACLS_RESULT
      • +
      • RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
      • +
      • RD_KAFKA_EVENT_DELETEACLS_RESULT
      • +
      • RD_KAFKA_EVENT_ALTERCONFIGS_RESULT
      • +
      • RD_KAFKA_EVENT_INCREMENTAL_ALTERCONFIGS_RESULT
      • +
      • RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT
      • +
      • RD_KAFKA_EVENT_DELETEGROUPS_RESULT
      • +
      • RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
      • +
      • RD_KAFKA_EVENT_DELETERECORDS_RESULT
      • +
      • RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
      • +
      • RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
      • +
      • RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
      • +
      • RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
      • +
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|object|string|null void* - the event opaque (if any) as passed to rd_kafka_commit() (et.al) or rd_kafka_AdminOptions_set_opaque(), depending on event type.
      +
      + +

      rd_kafka_event_stats()

      +
      public static rd_kafka_event_stats ( 
      +    \FFI\CData|null $rkev
      + ): string|null
      +
      +

      Extract stats from the event.

      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_STATS
      • +
      + +
      Remarks
      the returned string will be freed automatically along with the event object
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      string|null const char* - stats json string.
      +
      + +

      rd_kafka_event_topic_partition()

      +
      public static rd_kafka_event_topic_partition ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +
      Remarks
      The returned pointer MUST be freed with rd_kafka_topic_partition_destroy().
      +

      Event types: RD_KAFKA_EVENT_ERROR (for partition level errors)

      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null rd_kafka_topic_partition_t* - a newly allocated topic_partition container, if applicable for the event type, else NULL.
      +
      + +

      rd_kafka_event_topic_partition_list()

      +
      public static rd_kafka_event_topic_partition_list ( 
      +    \FFI\CData|null $rkev
      + ): \FFI\CData|null
      +
      +
      Remarks
      The list MUST NOT be freed with rd_kafka_topic_partition_list_destroy()
      +

      Event types:

      +
        +
      • RD_KAFKA_EVENT_REBALANCE
      • +
      • RD_KAFKA_EVENT_OFFSET_COMMIT
      • +
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null rd_kafka_event_t* - )
      +
      Returns
      +
      \FFI\CData|null rd_kafka_topic_partition_list_t* - the topic partition list from the event.
      +
      + +

      rd_kafka_event_type()

      +
      public static rd_kafka_event_type ( 
      +    \FFI\CData|null $rkev
      + ): int|null
      +
      +
      Remarks
      As a convenience it is okay to pass rkev as NULL in which case RD_KAFKA_EVENT_NONE is returned.
      + +
      +
      Parameters
      +
      rkev \FFI\CData|null const rd_kafka_event_t* - )
      +
      Returns
      +
      int|null rd_kafka_event_type_t - the event type for the given event.
      +
      + +

      rd_kafka_fatal_error()

      +
      public static rd_kafka_fatal_error ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $errstr, 
      +    int|null $errstr_size
      + ): int
      +
      +

      Returns the first fatal error set on this client instance, or RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has occurred.

      +

      This function is to be used with the Idempotent Producer and error_cb to detect fatal errors.

      +

      Generally all errors raised by error_cb are to be considered informational and temporary, the client will try to recover from all errors in a graceful fashion (by retrying, etc).

      +

      However, some errors should logically be considered fatal to retain consistency; in particular a set of errors that may occur when using the Idempotent Producer and the in-order or exactly-once producer guarantees can't be satisfied.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written to if there is a fatal error. - Writable size in errstr.
      +
      errstr_size int|null size_t
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else any other error code.
      +
      + +

      rd_kafka_flush()

      +
      public static rd_kafka_flush ( 
      +    \FFI\CData|null $rk, 
      +    int|null $timeout_ms
      + ): int
      +
      +

      Wait until all outstanding produce requests, et.al, are completed. This should typically be done prior to destroying a producer instance to make sure all queued and in-flight produce requests are completed before terminating.

      +
      Remarks
      This function will call rd_kafka_poll() and thus trigger callbacks.
      +
      +The linger.ms time will be ignored for the duration of the call, queued messages will be sent to the broker as soon as possible.
      +
      +If RD_KAFKA_EVENT_DR has been enabled (through rd_kafka_conf_set_events()) this function will not call rd_kafka_poll() but instead wait for the librdkafka-handled message count to reach zero. This requires the application to serve the event queue in a separate thread. In this mode only messages are counted, not other types of queued events.
      + +
      See also
      rd_kafka_outq_len()
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      timeout_ms int|null int
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__TIMED_OUT if timeout_ms was reached before all outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR
      +
      + +

      rd_kafka_get_debug_contexts()

      +
      public static rd_kafka_get_debug_contexts (  ): string|null
      +
      +

      Retrieve supported debug contexts for use with the "debug" configuration property. (runtime)

      + +
      +
      Returns
      +
      string|null const char* - ) - Comma-separated list of available debugging contexts.
      +
      + +

      rd_kafka_get_err_descs()

      +
      public static rd_kafka_get_err_descs ( 
      +    \FFI\CData|null $errdescs, 
      +    \FFI\CData|null $cntp
      + ): void
      +
      +
      +
      Parameters
      +
      errdescs \FFI\CData|null const struct rd_kafka_err_desc**
      +
      cntp \FFI\CData|null size_t*
      +
      +

      rd_kafka_get_watermark_offsets()

      +
      public static rd_kafka_get_watermark_offsets ( 
      +    \FFI\CData|null $rk, 
      +    string|null $topic, 
      +    int|null $partition, 
      +    \FFI\CData|null $low, 
      +    \FFI\CData|null $high
      + ): int
      +
      +

      Get last known low (oldest/beginning) and high (newest/end) offsets for partition.

      +

      The low offset is updated periodically (if statistics.interval.ms is set) while the high offset is updated on each fetched message set from the broker.

      +

      If there is no cached offset (either low or high, or both) then RD_KAFKA_OFFSET_INVALID will be returned for the respective offset.

      +

      Offsets are returned in *low and *high respectively.

      + +
      Remarks
      Shall only be used with an active consumer instance.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      topic string|null const char*
      +
      partition int|null int32_t
      +
      low \FFI\CData|null int64_t*
      +
      high \FFI\CData|null int64_t*
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
      +
      + +

      rd_kafka_group_list_destroy()

      +
      public static rd_kafka_group_list_destroy ( 
      +    \FFI\CData|null $grplist
      + ): void
      +
      +
      +
      Parameters
      +
      grplist \FFI\CData|null const struct rd_kafka_group_list*
      +
      +

      rd_kafka_group_result_error()

      +
      public static rd_kafka_group_result_error ( 
      +    \FFI\CData|null $groupres
      + ): \FFI\CData|null
      +
      +

      Group result provides per-group operation result information.

      + +
      Remarks
      lifetime of the returned error is the same as the groupres.
      + +
      +
      Parameters
      +
      groupres \FFI\CData|null const rd_kafka_group_result_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_error_t* - the error for the given group result, or NULL on success.
      +
      + +

      rd_kafka_group_result_name()

      +
      public static rd_kafka_group_result_name ( 
      +    \FFI\CData|null $groupres
      + ): string|null
      +
      +
      Remarks
      lifetime of the returned string is the same as the groupres.
      + +
      +
      Parameters
      +
      groupres \FFI\CData|null const rd_kafka_group_result_t* - )
      +
      Returns
      +
      string|null const char* - the name of the group for the given group result.
      +
      + +

      rd_kafka_group_result_partitions()

      +
      public static rd_kafka_group_result_partitions ( 
      +    \FFI\CData|null $groupres
      + ): \FFI\CData|null
      +
      +
      Remarks
      lifetime of the returned list is the same as the groupres.
      + +
      +
      Parameters
      +
      groupres \FFI\CData|null const rd_kafka_group_result_t* - )
      +
      Returns
      +
      \FFI\CData|null const rd_kafka_topic_partition_list_t* - the partitions/offsets for the given group result, if applicable to the request type, else NULL.
      +
      + +

      rd_kafka_handle_mock_cluster()

      +
      public static rd_kafka_handle_mock_cluster ( 
      +    \FFI\CData|null $rk
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      rk \FFI\CData|null const rd_kafka_t*
      +
      Returns
      +
      \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      +

      rd_kafka_header_add()

      +
      public static rd_kafka_header_add ( 
      +    \FFI\CData|null $hdrs, 
      +    string|null $name, 
      +    int|null $name_size, 
      +    \FFI\CData|object|string|null $value, 
      +    int|null $value_size
      + ): int
      +
      +

      Add header with name name and value val (copied) of size size (not including null-terminator).

      + +
      +
      Parameters
      +
      hdrs \FFI\CData|null rd_kafka_headers_t* - Headers list.
      +
      name string|null const char* - Header name.
      +
      name_size int|null ssize_t - Header name size (not including the null-terminator). If -1 the name length is automatically acquired using strlen().
      +
      value \FFI\CData|object|string|null const void* - Pointer to header value, or NULL (set size to 0 or -1).
      +
      value_size int|null ssize_t - Size of header value. If -1 the value is assumed to be a null-terminated string and the length is automatically acquired using strlen().
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, else RD_KAFKA_RESP_ERR_NO_ERROR.
      +
      + +

      rd_kafka_header_cnt()

      +
      public static rd_kafka_header_cnt ( 
      +    \FFI\CData|null $hdrs
      + ): int|null
      +
      +

      Returns the number of header key/value pairs.

      + +
      +
      Parameters
      +
      hdrs \FFI\CData|null const rd_kafka_headers_t* - ) - Headers to count
      +
      Returns
      +
      int|null size_t
      +
      + +

      rd_kafka_header_get()

      +
      public static rd_kafka_header_get ( 
      +    \FFI\CData|null $hdrs, 
      +    int|null $idx, 
      +    string|null $name, 
      +    \FFI\CData|object|string|null $valuep, 
      +    \FFI\CData|null $sizep
      + ): int
      +
      +

      Iterator for headers matching name.

      +
         Same semantics as rd_kafka_header_get_last()
      +
      + +
      +
      Parameters
      +
      hdrs \FFI\CData|null const rd_kafka_headers_t* - Headers to iterate.
      +
      idx int|null size_t - Iterator index, start at 0 and increment by one for each call as long as RD_KAFKA_RESP_ERR_NO_ERROR is returned.
      +
      name string|null const char* - Header name to match.
      +
      valuep \FFI\CData|object|string|null const void** - (out) Set to a (null-terminated) const pointer to the value (may be NULL).
      +
      sizep \FFI\CData|null size_t* - (out) Set to the value’s size (not including null-terminator).
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      + +

      rd_kafka_header_get_all()

      +
      public static rd_kafka_header_get_all ( 
      +    \FFI\CData|null $hdrs, 
      +    int|null $idx, 
      +    \FFI\CData|null $namep, 
      +    \FFI\CData|object|string|null $valuep, 
      +    \FFI\CData|null $sizep
      + ): int
      +
      +

      Iterator for all headers.

      +
         Same semantics as rd_kafka_header_get()
      +
      +
      See also
      rd_kafka_header_get()
      + +
      +
      Parameters
      +
      hdrs \FFI\CData|null const rd_kafka_headers_t*
      +
      idx int|null size_t
      +
      namep \FFI\CData|null const char**
      +
      valuep \FFI\CData|object|string|null const void**
      +
      sizep \FFI\CData|null size_t*
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      + +

      rd_kafka_header_get_last()

      +
      public static rd_kafka_header_get_last ( 
      +    \FFI\CData|null $hdrs, 
      +    string|null $name, 
      +    \FFI\CData|object|string|null $valuep, 
      +    \FFI\CData|null $sizep
      + ): int
      +
      +

      Find last header in list hdrs matching name.

      + +
      Remarks
      The returned pointer in valuep includes a trailing null-terminator that is not accounted for in sizep.
      +
      +The returned pointer is only valid as long as the headers list and the header item is valid.
      + +
      +
      Parameters
      +
      hdrs \FFI\CData|null const rd_kafka_headers_t* - Headers list.
      +
      name string|null const char* - Header to find (last match).
      +
      valuep \FFI\CData|object|string|null const void** - (out) Set to a (null-terminated) const pointer to the value (may be NULL).
      +
      sizep \FFI\CData|null size_t* - (out) Set to the value’s size (not including null-terminator).
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if an entry was found, else RD_KAFKA_RESP_ERR__NOENT.
      +
      + +

      rd_kafka_header_remove()

      +
      public static rd_kafka_header_remove ( 
      +    \FFI\CData|null $hdrs, 
      +    string|null $name
      + ): int
      +
      +

      Remove all headers for the given key (if any).

      + +
      +
      Parameters
      +
      hdrs \FFI\CData|null rd_kafka_headers_t*
      +
      name string|null const char*
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed.
      +
      + +

      rd_kafka_headers_copy()

      +
      public static rd_kafka_headers_copy ( 
      +    \FFI\CData|null $src
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      src \FFI\CData|null const rd_kafka_headers_t*
      +
      Returns
      +
      \FFI\CData|null rd_kafka_headers_t*
      +
      +

      rd_kafka_headers_destroy()

      +
      public static rd_kafka_headers_destroy ( 
      +    \FFI\CData|null $hdrs
      + ): void
      +
      +
      +
      Parameters
      +
      hdrs \FFI\CData|null rd_kafka_headers_t*
      +
      +

      rd_kafka_headers_new()

      +
      public static rd_kafka_headers_new ( 
      +    int|null $initial_count
      + ): \FFI\CData|null
      +
      +

      Create a new headers list.

      + +
      +
      Parameters
      +
      initial_count int|null size_t - ) - Preallocate space for this number of headers. Any number of headers may be added, updated and removed regardless of the initial count.
      +
      Returns
      +
      \FFI\CData|null rd_kafka_headers_t*
      +
      + +

      rd_kafka_incremental_assign()

      +
      public static rd_kafka_incremental_assign ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $partitions
      + ): \FFI\CData|null
      +
      +

      Incrementally add partitions to the current assignment.

      +

      If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, this method should be used in a rebalance callback to adjust the current assignment appropriately in the case where the rebalance type is RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the partition list passed to the callback (or a copy of it), even if the list is empty. partitions must not be NULL. This method may also be used outside the context of a rebalance callback.

      + +
      Remarks
      The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      partitions \FFI\CData|null const rd_kafka_topic_partition_list_t*
      +
      Returns
      +
      \FFI\CData|null rd_kafka_error_t* - NULL on success, or an error object if the operation was unsuccessful.
      +
      + +

      rd_kafka_incremental_unassign()

      +
      public static rd_kafka_incremental_unassign ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $partitions
      + ): \FFI\CData|null
      +
      +

      Incrementally remove partitions from the current assignment.

      +

      If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, this method should be used in a rebalance callback to adjust the current assignment appropriately in the case where the rebalance type is RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the partition list passed to the callback (or a copy of it), even if the list is empty. partitions must not be NULL. This method may also be used outside the context of a rebalance callback.

      + +
      Remarks
      The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      partitions \FFI\CData|null const rd_kafka_topic_partition_list_t*
      +
      Returns
      +
      \FFI\CData|null rd_kafka_error_t* - NULL on success, or an error object if the operation was unsuccessful.
      +
      + +

      rd_kafka_init_transactions()

      +
      public static rd_kafka_init_transactions ( 
      +    \FFI\CData|null $rk, 
      +    int|null $timeout_ms
      + ): \FFI\CData|null
      +
      +

      Initialize transactions for the producer instance.

      +

      This function ensures any transactions initiated by previous instances of the producer with the same transactional.id are completed. If the previous instance failed with a transaction in progress the previous transaction will be aborted. This function needs to be called before any other transactional or produce functions are called when the transactional.id is configured.

      +

      If the last transaction had begun completion (following transaction commit) but not yet finished, this function will await the previous transaction's completion.

      +

      When any previous transactions have been fenced this function will acquire the internal producer id and epoch, used in all future transactional messages issued by this producer instance.

      + +
      Remarks
      This function may block up to timeout_ms milliseconds.
      +
      +This call is resumable when a retriable timeout error is returned. Calling the function again will resume the operation that is progressing in the background.
      + +
      Remarks
      The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Producer instance.
      +
      timeout_ms int|null int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call init_transactions() again. If an infinite timeout (-1) is passed, the timeout will be adjusted to 2 * transaction.timeout.ms.
      +
      Returns
      +
      \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator could be not be contacted within timeout_ms (retriable), RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction coordinator is not available (retriable), RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction would not complete within timeout_ms (retriable), RD_KAFKA_RESP_ERR__STATE if transactions have already been started or upon fatal error, RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not support transactions (<Apache Kafka 0.11), this also raises a fatal error, RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT if the configured transaction.timeout.ms is outside the broker-configured range, this also raises a fatal error, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, or timeout_ms is out of range. Other error codes not listed here may be returned, depending on broker version.
      +
      + +

      rd_kafka_interceptor_add_on_acknowledgement()

      +
      public static rd_kafka_interceptor_add_on_acknowledgement ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_acknowledgement, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_acknowledgement() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_acknowledgement \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer.
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_broker_state_change()

      +
      public static rd_kafka_interceptor_add_on_broker_state_change ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_broker_state_change, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_broker_state_change() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_broker_state_change \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_broker_state_change_t*)(rd_kafka_t*, int32_t, const char*, const char*, int, const char*, void*)
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_commit()

      +
      public static rd_kafka_interceptor_add_on_commit ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_commit, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_commit() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_commit \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t*)(rd_kafka_t*, const rd_kafka_topic_partition_list_t*, rd_kafka_resp_err_t, void*)
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_consume()

      +
      public static rd_kafka_interceptor_add_on_consume ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_consume, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_consume() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_consume \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer.
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_destroy()

      +
      public static rd_kafka_interceptor_add_on_destroy ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_destroy, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_destroy() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_destroy \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_destroy_t*)(rd_kafka_t*, void*) - Function pointer.
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_request_sent()

      +
      public static rd_kafka_interceptor_add_on_request_sent ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_request_sent, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_request_sent() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_request_sent \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t*)(rd_kafka_t*, int, const char*, int32_t, int16_t, int16_t, int32_t, size_t, void*)
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_response_received()

      +
      public static rd_kafka_interceptor_add_on_response_received ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_response_received, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_response_received() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_response_received \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t*)(rd_kafka_t*, int, const char*, int32_t, int16_t, int16_t, int32_t, size_t, int64_t, rd_kafka_resp_err_t, void*)
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_send()

      +
      public static rd_kafka_interceptor_add_on_send ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_send, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_send() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_send \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer.
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_thread_exit()

      +
      public static rd_kafka_interceptor_add_on_thread_exit ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_thread_exit, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_thread_exit() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_thread_exit \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t*)(rd_kafka_t*, rd_kafka_thread_type_t, const char*, void*)
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_add_on_thread_start()

      +
      public static rd_kafka_interceptor_add_on_thread_start ( 
      +    \FFI\CData|null $rk, 
      +    string|null $ic_name, 
      +    \FFI\CData|\Closure $on_thread_start, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      Append an on_thread_start() interceptor.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      ic_name string|null const char* - Interceptor name, used in logging.
      +
      on_thread_start \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t*)(rd_kafka_t*, rd_kafka_thread_type_t, const char*, void*)
      +
      ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
      +
      + +

      rd_kafka_interceptor_f_on_acknowledgement_t()

      +
      public static rd_kafka_interceptor_f_on_acknowledgement_t ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $rkmessage, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_acknowledgement() is called to inform interceptors that a message was succesfully delivered or permanently failed delivery. The interceptor chain is called from internal librdkafka background threads, or rd_kafka_produce*() if the partitioner failed.

      + +
      Remarks
      This interceptor is only used by producer instances.
      +
      +The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
      +
      Warning
      The on_acknowledgement() method may be called from internal librdkafka threads. An on_acknowledgement() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      rkmessage \FFI\CData|null rd_kafka_message_t* - The message being produced. Immutable.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_broker_state_change_t()

      +
      public static rd_kafka_interceptor_f_on_broker_state_change_t ( 
      +    \FFI\CData|null $rk, 
      +    int|null $broker_id, 
      +    string|null $secproto, 
      +    string|null $name, 
      +    int|null $port, 
      +    string|null $state, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_broker_state_change() is called just after a broker has been created or its state has been changed.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      broker_id int|null int32_t - The broker id (-1 is used for bootstrap brokers).
      +
      secproto string|null const char* - The security protocol.
      +
      name string|null const char* - The original name of the broker.
      +
      port int|null int - The port of the broker.
      +
      state string|null const char* - Broker state name.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_commit_t()

      +
      public static rd_kafka_interceptor_f_on_commit_t ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $offsets, 
      +    int $err, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_commit() is called on completed or failed offset commit. It is called from internal librdkafka threads.

      + +
      Remarks
      This interceptor is only used by consumer instances.
      +
      Warning
      The on_commit() interceptor is called from internal librdkafka threads. An on_commit() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      offsets \FFI\CData|null const rd_kafka_topic_partition_list_t* - List of topic+partition+offset+error that were committed. The error message of each partition should be checked for error.
      +
      err int rd_kafka_resp_err_t - The commit error, if any.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_conf_destroy_t()

      +
      public static rd_kafka_interceptor_f_on_conf_destroy_t ( 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_conf_destroy() is called from rd_kafka_*_conf_destroy() in the order the interceptors were added.

      + +
      +
      Parameters
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      + +

      rd_kafka_interceptor_f_on_conf_dup_t()

      +
      public static rd_kafka_interceptor_f_on_conf_dup_t ( 
      +    \FFI\CData|null $new_conf, 
      +    \FFI\CData|null $old_conf, 
      +    int|null $filter_cnt, 
      +    \FFI\CData|null $filter, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_conf_dup() is called from rd_kafka_conf_dup() in the order the interceptors were added and is used to let an interceptor re-register its conf interecptors with a new opaque value. The on_conf_dup() method is called prior to the configuration from old_conf being copied to new_conf.

      + +
      Remarks
      No on_conf_* interceptors are copied to the new configuration object on rd_kafka_conf_dup().
      + +
      +
      Parameters
      +
      new_conf \FFI\CData|null rd_kafka_conf_t* - New configuration object.
      +
      old_conf \FFI\CData|null const rd_kafka_conf_t* - Old configuration object to copy properties from.
      +
      filter_cnt int|null size_t - Number of property names to filter in filter. - Property names to filter out (ignore) when setting up new_conf.
      +
      filter \FFI\CData|null const char**
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure (which is logged but otherwise ignored).
      +
      + +

      rd_kafka_interceptor_f_on_conf_set_t()

      +
      public static rd_kafka_interceptor_f_on_conf_set_t ( 
      +    \FFI\CData|null $conf, 
      +    string|null $name, 
      +    string|null $val, 
      +    \FFI\CData|null $errstr, 
      +    int|null $errstr_size, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_conf_set() is called from rd_kafka_*_conf_set() in the order the interceptors were added.

      + +
      +
      Parameters
      +
      conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
      +
      name string|null const char* - The configuration property to set.
      +
      val string|null const char* - The configuration value to set, or NULL for reverting to default in which case the previous value should be freed.
      +
      errstr \FFI\CData|null char* - A human readable error string in case the interceptor fails.
      +
      errstr_size int|null size_t - Maximum space (including \0) in errstr.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK if the property was known and successfully handled by the interceptor, RD_KAFKA_CONF_INVALID if the property was handled by the interceptor but the value was invalid, or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle this property, in which case the property is passed on on the interceptor in the chain, finally ending up at the built-in configuration handler.
      +
      + +

      rd_kafka_interceptor_f_on_consume_t()

      +
      public static rd_kafka_interceptor_f_on_consume_t ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $rkmessage, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_consume() is called just prior to passing the message to the application in rd_kafka_consumer_poll(), rd_kafka_consume*(), the event interface, etc.

      + +
      Remarks
      This interceptor is only used by consumer instances.
      +
      +The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      rkmessage \FFI\CData|null rd_kafka_message_t* - The message being consumed. Immutable.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_destroy_t()

      +
      public static rd_kafka_interceptor_f_on_destroy_t ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_destroy() is called from rd_kafka_destroy() or (rd_kafka_new() if rd_kafka_new() fails during initialization).

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      + +

      rd_kafka_interceptor_f_on_new_t()

      +
      public static rd_kafka_interceptor_f_on_new_t ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $conf, 
      +    \FFI\CData|object|string|null $ic_opaque, 
      +    \FFI\CData|null $errstr, 
      +    int|null $errstr_size
      + ): int
      +
      +

      on_new() is called from rd_kafka_new() prior toreturning the newly created client instance to the application.

      + +
      Warning
      The rk client instance will not be fully set up when this interceptor is called and the interceptor MUST NOT call any other rk-specific APIs than rd_kafka_interceptor_add..().
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      conf \FFI\CData|null const rd_kafka_conf_t* - The client instance’s final configuration.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      errstr \FFI\CData|null char* - A human readable error string in case the interceptor fails.
      +
      errstr_size int|null size_t - Maximum space (including \0) in errstr.
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_request_sent_t()

      +
      public static rd_kafka_interceptor_f_on_request_sent_t ( 
      +    \FFI\CData|null $rk, 
      +    int|null $sockfd, 
      +    string|null $brokername, 
      +    int|null $brokerid, 
      +    int|null $ApiKey, 
      +    int|null $ApiVersion, 
      +    int|null $CorrId, 
      +    int|null $size, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_request_sent() is called when a request has been fully written to a broker TCP connections socket.

      + +
      Warning
      The on_request_sent() interceptor is called from internal librdkafka broker threads. An on_request_sent() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      sockfd int|null int - Socket file descriptor.
      +
      brokername string|null const char* - Broker request is being sent to.
      +
      brokerid int|null int32_t - Broker request is being sent to.
      +
      ApiKey int|null int16_t - Kafka protocol request type.
      +
      ApiVersion int|null int16_t - Kafka protocol request type version.
      +
      CorrId int|null int32_t - Kafka protocol request correlation id.
      +
      size int|null size_t - Size of request.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_response_received_t()

      +
      public static rd_kafka_interceptor_f_on_response_received_t ( 
      +    \FFI\CData|null $rk, 
      +    int|null $sockfd, 
      +    string|null $brokername, 
      +    int|null $brokerid, 
      +    int|null $ApiKey, 
      +    int|null $ApiVersion, 
      +    int|null $CorrId, 
      +    int|null $size, 
      +    int|null $rtt, 
      +    int $err, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_response_received() is called when a protocol response has been fully received from a broker TCP connection socket but before the response payload is parsed.

      + +
      Warning
      The on_response_received() interceptor is called from internal librdkafka broker threads. An on_response_received() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      sockfd int|null int - Socket file descriptor (always -1).
      +
      brokername string|null const char* - Broker response was received from, possibly empty string on error.
      +
      brokerid int|null int32_t - Broker response was received from.
      +
      ApiKey int|null int16_t - Kafka protocol request type or -1 on error.
      +
      ApiVersion int|null int16_t - Kafka protocol request type version or -1 on error.
      +
      CorrId int|null int32_t - Kafka protocol request correlation id, possibly -1 on error.
      +
      size int|null size_t - Size of response, possibly 0 on error.
      +
      rtt int|null int64_t - Request round-trip-time in microseconds, possibly -1 on error.
      +
      err int rd_kafka_resp_err_t - Receive error.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_send_t()

      +
      public static rd_kafka_interceptor_f_on_send_t ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $rkmessage, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_send() is called from rd_kafka_produce*() (et.al) prior to the partitioner being called.

      + +
      Remarks
      This interceptor is only used by producer instances.
      +
      +The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
      +
      +If the partitioner fails or an unknown partition was specified, the on_acknowledgement() interceptor chain will be called from within the rd_kafka_produce*() call to maintain send-acknowledgement symmetry.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      rkmessage \FFI\CData|null rd_kafka_message_t* - The message being produced. Immutable.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_thread_exit_t()

      +
      public static rd_kafka_interceptor_f_on_thread_exit_t ( 
      +    \FFI\CData|null $rk, 
      +    int $thread_type, 
      +    string|null $thread_name, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_thread_exit() is called just prior to a librdkafka-managed thread exiting from the exiting thread itself.

      + +
      Remarks
      Depending on the thread type, librdkafka may execute additional code on the thread after on_thread_exit() returns.
      +
      Warning
      The on_thread_exit() interceptor is called from internal librdkafka threads. An on_thread_exit() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      thread_type int rd_kafka_thread_type_t - Thread type.n
      +
      thread_name string|null const char* - Human-readable thread name, may not be unique.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_interceptor_f_on_thread_start_t()

      +
      public static rd_kafka_interceptor_f_on_thread_start_t ( 
      +    \FFI\CData|null $rk, 
      +    int $thread_type, 
      +    string|null $thread_name, 
      +    \FFI\CData|object|string|null $ic_opaque
      + ): int
      +
      +

      on_thread_start() is called from a newly created librdkafka-managed thread.

      + +
      Warning
      The on_thread_start() interceptor is called from internal librdkafka threads. An on_thread_start() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - The client instance.
      +
      thread_type int rd_kafka_thread_type_t - Thread type.
      +
      thread_name string|null const char* - Human-readable thread name, may not be unique.
      +
      ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
      +
      Returns
      +
      int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
      +
      + +

      rd_kafka_last_error()

      +
      public static rd_kafka_last_error (  ): int
      +
      +

      Returns the last error code generated by a legacy API call in the current thread.

      +

      The legacy APIs are the ones using errno to propagate error value, namely:

      +
        +
      • rd_kafka_topic_new()
      • +
      • rd_kafka_consume_start()
      • +
      • rd_kafka_consume_stop()
      • +
      • rd_kafka_consume()
      • +
      • rd_kafka_consume_batch()
      • +
      • rd_kafka_consume_callback()
      • +
      • rd_kafka_consume_queue()
      • +
      • rd_kafka_produce()
      • +
      +

      The main use for this function is to avoid converting system errno values to rd_kafka_resp_err_t codes for legacy APIs.

      +
      Remarks
      The last error is stored per-thread, if multiple rd_kafka_t handles are used in the same application thread the developer needs to make sure rd_kafka_last_error() is called immediately after a failed API call.
      +
      +errno propagation from librdkafka is not safe on Windows and should not be used, use rd_kafka_last_error() instead.
      + +
      +
      Returns
      +
      int rd_kafka_resp_err_t - )
      +
      + +

      rd_kafka_list_groups()

      +
      public static rd_kafka_list_groups ( 
      +    \FFI\CData|null $rk, 
      +    string|null $group, 
      +    \FFI\CData|null $grplistp, 
      +    int|null $timeout_ms
      + ): int
      +
      +

      List and describe client groups in cluster.

      +

      group is an optional group name to describe, otherwise (NULL) all groups are returned.

      +

      timeout_ms is the (approximate) maximum time to wait for response from brokers and must be a positive value.

      + +

      The grplistp remains untouched if any error code is returned, with the exception of RD_KAFKA_RESP_ERR__PARTIAL which behaves as RD_KAFKA_RESP_ERR__NO_ERROR (success) but with an incomplete group list.

      +
      See also
      Use rd_kafka_group_list_destroy() to release list memory.
      +
      Deprecated:
      Use rd_kafka_ListConsumerGroups() and rd_kafka_DescribeConsumerGroups() instead.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      group string|null const char*
      +
      grplistp \FFI\CData|null const struct rd_kafka_group_list**
      +
      timeout_ms int|null int
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__NO_ERROR on success and grplistp is updated to point to a newly allocated list of groups. RD_KAFKA_RESP_ERR__PARTIAL if not all brokers responded in time but at least one group is returned in grplistlp. RD_KAFKA_RESP_ERR__TIMED_OUT if no groups were returned in the given timeframe but not all brokers have yet responded, or if the list of brokers in the cluster could not be obtained within the given timeframe. RD_KAFKA_RESP_ERR__TRANSPORT if no brokers were found. Other error codes may also be returned from the request layer.
      +
      + +

      rd_kafka_log_print()

      +
      public static rd_kafka_log_print ( 
      +    \FFI\CData|null $rk, 
      +    int|null $level, 
      +    string|null $fac, 
      +    string|null $buf
      + ): void
      +
      +
      +
      Parameters
      +
      rk \FFI\CData|null const rd_kafka_t*
      +
      level int|null int
      +
      fac string|null const char*
      +
      buf string|null const char*
      +
      +

      rd_kafka_log_syslog()

      +
      public static rd_kafka_log_syslog ( 
      +    \FFI\CData|null $rk, 
      +    int|null $level, 
      +    string|null $fac, 
      +    string|null $buf
      + ): void
      +
      +

      Builtin log sink: print to syslog.

      +
      Remarks
      This logger is only available if librdkafka was built with syslog support.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null const rd_kafka_t*
      +
      level int|null int
      +
      fac string|null const char*
      +
      buf string|null const char*
      +
      + +

      rd_kafka_mem_calloc()

      +
      public static rd_kafka_mem_calloc ( 
      +    \FFI\CData|null $rk, 
      +    int|null $num, 
      +    int|null $size
      + ): \FFI\CData|object|string|null
      +
      +

      Allocate and zero memory using the same allocator librdkafka uses.

      +

      This is typically an abstraction for the calloc(3) call and makes sure the application can use the same memory allocator as librdkafka for allocating pointers that are used by librdkafka.

      +

      rk can be set to return memory allocated by a specific rk instance otherwise pass NULL for rk.

      +
      Remarks
      Memory allocated by rd_kafka_mem_calloc() must be freed using rd_kafka_mem_free()
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      num int|null size_t
      +
      size int|null size_t
      +
      Returns
      +
      \FFI\CData|object|string|null void*
      +
      + +

      rd_kafka_mem_free()

      +
      public static rd_kafka_mem_free ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|object|string|null $ptr
      + ): void
      +
      +

      Free pointer returned by librdkafka.

      +

      This is typically an abstraction for the free(3) call and makes sure the application can use the same memory allocator as librdkafka for freeing pointers returned by librdkafka.

      +

      In standard setups it is usually not necessary to use this interface rather than the free(3) functione.

      +

      rk must be set for memory returned by APIs that take an rk argument, for other APIs pass NULL for rk.

      +
      Remarks
      rd_kafka_mem_free() must only be used for pointers returned by APIs that explicitly mention using this function for freeing.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      ptr \FFI\CData|object|string|null void*
      +
      + +

      rd_kafka_mem_malloc()

      +
      public static rd_kafka_mem_malloc ( 
      +    \FFI\CData|null $rk, 
      +    int|null $size
      + ): \FFI\CData|object|string|null
      +
      +

      Allocate memory using the same allocator librdkafka uses.

      +

      This is typically an abstraction for the malloc(3) call and makes sure the application can use the same memory allocator as librdkafka for allocating pointers that are used by librdkafka.

      +

      rk can be set to return memory allocated by a specific rk instance otherwise pass NULL for rk.

      +
      Remarks
      Memory allocated by rd_kafka_mem_malloc() must be freed using rd_kafka_mem_free()
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      size int|null size_t
      +
      Returns
      +
      \FFI\CData|object|string|null void*
      +
      + +

      rd_kafka_memberid()

      +
      public static rd_kafka_memberid ( 
      +    \FFI\CData|null $rk
      + ): \FFI\CData|null
      +
      +

      Returns this client's broker-assigned group member id.

      +
      Remarks
      This currently requires the high-level KafkaConsumer
      + +
      +
      Parameters
      +
      rk \FFI\CData|null const rd_kafka_t* - )
      +
      Returns
      +
      \FFI\CData|null char* - An allocated string containing the current broker-assigned group member id, or NULL if not available. The application must free the string with free() or rd_kafka_mem_free()
      +
      + +

      rd_kafka_message_broker_id()

      +
      public static rd_kafka_message_broker_id ( 
      +    \FFI\CData|null $rkmessage
      + ): int|null
      +
      +

      Returns the broker id of the broker the message was produced to or fetched from.

      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null const rd_kafka_message_t* - )
      +
      Returns
      +
      int|null int32_t - a broker id if known, else -1.
      +
      + +

      rd_kafka_message_destroy()

      +
      public static rd_kafka_message_destroy ( 
      +    \FFI\CData|null $rkmessage
      + ): void
      +
      +
      +
      Parameters
      +
      rkmessage \FFI\CData|null rd_kafka_message_t*
      +
      +

      rd_kafka_message_detach_headers()

      +
      public static rd_kafka_message_detach_headers ( 
      +    \FFI\CData|null $rkmessage, 
      +    \FFI\CData|null $hdrsp
      + ): int
      +
      +

      Get the message header list and detach the list from the message making the application the owner of the headers. The application must eventually destroy the headers using rd_kafka_headers_destroy(). The message's headers will be set to NULL.

      +

      Otherwise same semantics as rd_kafka_message_headers()

      +
      See also
      rd_kafka_message_headers
      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null rd_kafka_message_t*
      +
      hdrsp \FFI\CData|null rd_kafka_headers_t**
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      + +

      rd_kafka_message_errstr()

      +
      public static rd_kafka_message_errstr ( 
      +    \FFI\CData|null $rkmessage
      + ): string|null
      +
      +

      Returns the error string for an errored rd_kafka_message_t or NULL if there was no error.

      +
      Remarks
      This function MUST NOT be used with the producer.
      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null const rd_kafka_message_t* - )
      +
      Returns
      +
      string|null const char*
      +
      + +

      rd_kafka_message_headers()

      +
      public static rd_kafka_message_headers ( 
      +    \FFI\CData|null $rkmessage, 
      +    \FFI\CData|null $hdrsp
      + ): int
      +
      +

      Get the message header list.

      +

      The returned pointer in *hdrsp is associated with the rkmessage and must not be used after destruction of the message object or the header list is replaced with rd_kafka_message_set_headers().

      + +
      Remarks
      Headers require broker version 0.11.0.0 or later.
      +
      +As an optimization the raw protocol headers are parsed on the first call to this function.
      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null const rd_kafka_message_t*
      +
      hdrsp \FFI\CData|null rd_kafka_headers_t**
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if headers were returned, RD_KAFKA_RESP_ERR__NOENT if the message has no headers, or another error code if the headers could not be parsed.
      +
      + +

      rd_kafka_message_latency()

      +
      public static rd_kafka_message_latency ( 
      +    \FFI\CData|null $rkmessage
      + ): int|null
      +
      +

      Returns the latency for a produced message measured from the produce() call.

      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null const rd_kafka_message_t* - )
      +
      Returns
      +
      int|null int64_t - the latency in microseconds, or -1 if not available.
      +
      + +

      rd_kafka_message_leader_epoch()

      +
      public static rd_kafka_message_leader_epoch ( 
      +    \FFI\CData|null $rkmessage
      + ): int|null
      +
      +
      Remarks
      This API must only be used on consumed messages without error.
      +
      +Requires broker version >= 2.10 (KIP-320).
      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null const rd_kafka_message_t* - )
      +
      Returns
      +
      int|null int32_t - the message’s partition leader epoch at the time the message was fetched and if known, else -1.
      +
      + +

      rd_kafka_message_set_headers()

      +
      public static rd_kafka_message_set_headers ( 
      +    \FFI\CData|null $rkmessage, 
      +    \FFI\CData|null $hdrs
      + ): void
      +
      +

      Replace the message's current headers with a new list.

      + +
      Remarks
      The existing headers object, if any, will be destroyed.
      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null rd_kafka_message_t* - The message to set headers.
      +
      hdrs \FFI\CData|null rd_kafka_headers_t* - New header list. The message object assumes ownership of the list, the list will be destroyed automatically with the message object. The new headers list may be updated until the message object is passed or returned to librdkafka.
      +
      + +

      rd_kafka_message_status()

      +
      public static rd_kafka_message_status ( 
      +    \FFI\CData|null $rkmessage
      + ): int
      +
      +

      Returns the message's persistence status in the topic log.

      +
      Remarks
      The message status is not available in on_acknowledgement interceptors.
      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null const rd_kafka_message_t* - )
      +
      Returns
      +
      int rd_kafka_msg_status_t
      +
      + +

      rd_kafka_message_timestamp()

      +
      public static rd_kafka_message_timestamp ( 
      +    \FFI\CData|null $rkmessage, 
      +    \FFI\CData|null $tstype
      + ): int|null
      +
      +

      Returns the message timestamp for a consumed message.

      +

      The timestamp is the number of milliseconds since the epoch (UTC).

      +

      tstype (if not NULL) is updated to indicate the type of timestamp.

      + +
      Remarks
      Message timestamps require broker version 0.10.0 or later.
      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null const rd_kafka_message_t*
      +
      tstype \FFI\CData|null rd_kafka_timestamp_type_t*
      +
      Returns
      +
      int|null int64_t - message timestamp, or -1 if not available.
      +
      + +

      rd_kafka_metadata()

      +
      public static rd_kafka_metadata ( 
      +    \FFI\CData|null $rk, 
      +    int|null $all_topics, 
      +    \FFI\CData|null $only_rkt, 
      +    \FFI\CData|null $metadatap, 
      +    int|null $timeout_ms
      + ): int
      +
      +

      Request Metadata from broker.

      +

      Parameters:

      +
        +
      • all_topics if non-zero: request info about all topics in cluster, if zero: only request info about locally known topics.
      • +
      • only_rkt only request info about this topic
      • +
      • metadatap pointer to hold metadata result. The *metadatap pointer must be released with rd_kafka_metadata_destroy().
      • +
      • timeout_ms maximum response time before failing.
      • +
      +
      Remarks
      Consumer: If all_topics is non-zero the Metadata response information may trigger a re-join if any subscribed topics have changed partition count or existence state.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      all_topics int|null int
      +
      only_rkt \FFI\CData|null rd_kafka_topic_t*
      +
      metadatap \FFI\CData|null const struct rd_kafka_metadata**
      +
      timeout_ms int|null int
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or other error code on error.
      +
      + +

      rd_kafka_metadata_destroy()

      +
      public static rd_kafka_metadata_destroy ( 
      +    \FFI\CData|\Closure $metadata
      + ): void
      +
      +
      +
      Parameters
      +
      metadata \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_metadata*)(rd_kafka_t*, int, rd_kafka_topic_t*, const struct rd_kafka_metadata**, int)
      +
      +

      rd_kafka_mock_broker_error_stack_cnt()

      +
      public static rd_kafka_mock_broker_error_stack_cnt ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $broker_id, 
      +    int|null $ApiKey, 
      +    \FFI\CData|null $cntp
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      broker_id int|null int32_t
      +
      ApiKey int|null int16_t
      +
      cntp \FFI\CData|null size_t*
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_broker_push_request_error_rtts()

      +
      public static rd_kafka_mock_broker_push_request_error_rtts ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $broker_id, 
      +    int|null $ApiKey, 
      +    int|null $cnt, 
      +    mixed $args
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      broker_id int|null int32_t
      +
      ApiKey int|null int16_t
      +
      cnt int|null size_t
      +
      args mixed
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_broker_push_request_errors()

      +
      public static rd_kafka_mock_broker_push_request_errors ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $broker_id, 
      +    int|null $ApiKey, 
      +    int|null $cnt, 
      +    mixed $args
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      broker_id int|null int32_t
      +
      ApiKey int|null int16_t
      +
      cnt int|null size_t
      +
      args mixed
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_broker_set_down()

      +
      public static rd_kafka_mock_broker_set_down ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $broker_id
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      broker_id int|null int32_t
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_broker_set_rack()

      +
      public static rd_kafka_mock_broker_set_rack ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $broker_id, 
      +    string|null $rack
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      broker_id int|null int32_t
      +
      rack string|null const char*
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_broker_set_rtt()

      +
      public static rd_kafka_mock_broker_set_rtt ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $broker_id, 
      +    int|null $rtt_ms
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      broker_id int|null int32_t
      +
      rtt_ms int|null int
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_broker_set_up()

      +
      public static rd_kafka_mock_broker_set_up ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $broker_id
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      broker_id int|null int32_t
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_clear_request_errors()

      +
      public static rd_kafka_mock_clear_request_errors ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $ApiKey
      + ): void
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      ApiKey int|null int16_t
      +
      +

      rd_kafka_mock_clear_requests()

      +
      public static rd_kafka_mock_clear_requests ( 
      +    \FFI\CData|null $mcluster
      + ): void
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      +

      rd_kafka_mock_cluster_bootstraps()

      +
      public static rd_kafka_mock_cluster_bootstraps ( 
      +    \FFI\CData|null $mcluster
      + ): string|null
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null const rd_kafka_mock_cluster_t*
      +
      Returns
      +
      string|null const char*
      +
      +

      rd_kafka_mock_cluster_destroy()

      +
      public static rd_kafka_mock_cluster_destroy ( 
      +    \FFI\CData|null $mcluster
      + ): void
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      +

      rd_kafka_mock_cluster_handle()

      +
      public static rd_kafka_mock_cluster_handle ( 
      +    \FFI\CData|null $mcluster
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null const rd_kafka_mock_cluster_t*
      +
      Returns
      +
      \FFI\CData|null rd_kafka_t*
      +
      +

      rd_kafka_mock_cluster_new()

      +
      public static rd_kafka_mock_cluster_new ( 
      +    \FFI\CData|null $rk, 
      +    int|null $broker_cnt
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      broker_cnt int|null int
      +
      Returns
      +
      \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      +

      rd_kafka_mock_coordinator_set()

      +
      public static rd_kafka_mock_coordinator_set ( 
      +    \FFI\CData|null $mcluster, 
      +    string|null $key_type, 
      +    string|null $key, 
      +    int|null $broker_id
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      key_type string|null const char*
      +
      key string|null const char*
      +
      broker_id int|null int32_t
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_get_requests()

      +
      public static rd_kafka_mock_get_requests ( 
      +    \FFI\CData|null $mcluster, 
      +    \FFI\CData|null $cntp
      + ): \FFI\CData|null
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      cntp \FFI\CData|null size_t*
      +
      Returns
      +
      \FFI\CData|null rd_kafka_mock_request_t**
      +
      +

      rd_kafka_mock_partition_set_follower()

      +
      public static rd_kafka_mock_partition_set_follower ( 
      +    \FFI\CData|null $mcluster, 
      +    string|null $topic, 
      +    int|null $partition, 
      +    int|null $broker_id
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      topic string|null const char*
      +
      partition int|null int32_t
      +
      broker_id int|null int32_t
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_partition_set_follower_wmarks()

      +
      public static rd_kafka_mock_partition_set_follower_wmarks ( 
      +    \FFI\CData|null $mcluster, 
      +    string|null $topic, 
      +    int|null $partition, 
      +    int|null $lo, 
      +    int|null $hi
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      topic string|null const char*
      +
      partition int|null int32_t
      +
      lo int|null int64_t
      +
      hi int|null int64_t
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_partition_set_leader()

      +
      public static rd_kafka_mock_partition_set_leader ( 
      +    \FFI\CData|null $mcluster, 
      +    string|null $topic, 
      +    int|null $partition, 
      +    int|null $broker_id
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      topic string|null const char*
      +
      partition int|null int32_t
      +
      broker_id int|null int32_t
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_push_request_errors()

      +
      public static rd_kafka_mock_push_request_errors ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $ApiKey, 
      +    int|null $cnt, 
      +    mixed $args
      + ): void
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      ApiKey int|null int16_t
      +
      cnt int|null size_t
      +
      args mixed
      +
      +

      rd_kafka_mock_push_request_errors_array()

      +
      public static rd_kafka_mock_push_request_errors_array ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $ApiKey, 
      +    int|null $cnt, 
      +    \FFI\CData|null $errors
      + ): void
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      ApiKey int|null int16_t
      +
      cnt int|null size_t
      +
      errors \FFI\CData|null const rd_kafka_resp_err_t*
      +
      +

      rd_kafka_mock_request_api_key()

      +
      public static rd_kafka_mock_request_api_key ( 
      +    \FFI\CData|null $mreq
      + ): int|null
      +
      +
      +
      Parameters
      +
      mreq \FFI\CData|null rd_kafka_mock_request_t*
      +
      Returns
      +
      int|null int16_t
      +
      +

      rd_kafka_mock_request_destroy()

      +
      public static rd_kafka_mock_request_destroy ( 
      +    \FFI\CData|null $mreq
      + ): void
      +
      +
      +
      Parameters
      +
      mreq \FFI\CData|null rd_kafka_mock_request_t*
      +
      +

      rd_kafka_mock_request_id()

      +
      public static rd_kafka_mock_request_id ( 
      +    \FFI\CData|null $mreq
      + ): int|null
      +
      +
      +
      Parameters
      +
      mreq \FFI\CData|null rd_kafka_mock_request_t*
      +
      Returns
      +
      int|null int32_t
      +
      +

      rd_kafka_mock_request_timestamp()

      +
      public static rd_kafka_mock_request_timestamp ( 
      +    \FFI\CData|null $mreq
      + ): int|null
      +
      +
      +
      Parameters
      +
      mreq \FFI\CData|null rd_kafka_mock_request_t*
      +
      Returns
      +
      int|null int64_t
      +
      +

      rd_kafka_mock_set_apiversion()

      +
      public static rd_kafka_mock_set_apiversion ( 
      +    \FFI\CData|null $mcluster, 
      +    int|null $ApiKey, 
      +    int|null $MinVersion, 
      +    int|null $MaxVersion
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      ApiKey int|null int16_t
      +
      MinVersion int|null int16_t
      +
      MaxVersion int|null int16_t
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_start_request_tracking()

      +
      public static rd_kafka_mock_start_request_tracking ( 
      +    \FFI\CData|null $mcluster
      + ): void
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      +

      rd_kafka_mock_stop_request_tracking()

      +
      public static rd_kafka_mock_stop_request_tracking ( 
      +    \FFI\CData|null $mcluster
      + ): void
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      +

      rd_kafka_mock_topic_create()

      +
      public static rd_kafka_mock_topic_create ( 
      +    \FFI\CData|null $mcluster, 
      +    string|null $topic, 
      +    int|null $partition_cnt, 
      +    int|null $replication_factor
      + ): int
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      topic string|null const char*
      +
      partition_cnt int|null int
      +
      replication_factor int|null int
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      +

      rd_kafka_mock_topic_set_error()

      +
      public static rd_kafka_mock_topic_set_error ( 
      +    \FFI\CData|null $mcluster, 
      +    string|null $topic, 
      +    int $err
      + ): void
      +
      +
      +
      Parameters
      +
      mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
      +
      topic string|null const char*
      +
      err int rd_kafka_resp_err_t
      +
      +

      rd_kafka_msg_partitioner_consistent()

      +
      public static rd_kafka_msg_partitioner_consistent ( 
      +    \FFI\CData|null $rkt, 
      +    \FFI\CData|object|string|null $key, 
      +    int|null $keylen, 
      +    int|null $partition_cnt, 
      +    \FFI\CData|object|string|null $opaque, 
      +    \FFI\CData|object|string|null $msg_opaque
      + ): int|null
      +
      +

      Consistent partitioner.

      +

      Uses consistent hashing to map identical keys onto identical partitions.

      +

      The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null const rd_kafka_topic_t*
      +
      key \FFI\CData|object|string|null const void*
      +
      keylen int|null size_t
      +
      partition_cnt int|null int32_t
      +
      opaque \FFI\CData|object|string|null void*
      +
      msg_opaque \FFI\CData|object|string|null void*
      +
      Returns
      +
      int|null int32_t - a “random” partition between 0 and partition_cnt - 1 based on the CRC value of the key
      +
      + +

      rd_kafka_msg_partitioner_consistent_random()

      +
      public static rd_kafka_msg_partitioner_consistent_random ( 
      +    \FFI\CData|null $rkt, 
      +    \FFI\CData|object|string|null $key, 
      +    int|null $keylen, 
      +    int|null $partition_cnt, 
      +    \FFI\CData|object|string|null $opaque, 
      +    \FFI\CData|object|string|null $msg_opaque
      + ): int|null
      +
      +

      Consistent-Random partitioner.

      +

      This is the default partitioner. Uses consistent hashing to map identical keys onto identical partitions, and messages without keys will be assigned via the random partitioner.

      +

      The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null const rd_kafka_topic_t*
      +
      key \FFI\CData|object|string|null const void*
      +
      keylen int|null size_t
      +
      partition_cnt int|null int32_t
      +
      opaque \FFI\CData|object|string|null void*
      +
      msg_opaque \FFI\CData|object|string|null void*
      +
      Returns
      +
      int|null int32_t - a “random” partition between 0 and partition_cnt - 1 based on the CRC value of the key (if provided)
      +
      + +

      rd_kafka_msg_partitioner_fnv1a()

      +
      public static rd_kafka_msg_partitioner_fnv1a ( 
      +    \FFI\CData|null $rkt, 
      +    \FFI\CData|object|string|null $key, 
      +    int|null $keylen, 
      +    int|null $partition_cnt, 
      +    \FFI\CData|object|string|null $rkt_opaque, 
      +    \FFI\CData|object|string|null $msg_opaque
      + ): int|null
      +
      +

      FNV-1a partitioner.

      +

      Uses consistent hashing to map identical keys onto identical partitions using FNV-1a hashing.

      +

      The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null const rd_kafka_topic_t*
      +
      key \FFI\CData|object|string|null const void*
      +
      keylen int|null size_t
      +
      partition_cnt int|null int32_t
      +
      rkt_opaque \FFI\CData|object|string|null void*
      +
      msg_opaque \FFI\CData|object|string|null void*
      +
      Returns
      +
      int|null int32_t - a partition between 0 and partition_cnt - 1.
      +
      + +

      rd_kafka_msg_partitioner_fnv1a_random()

      +
      public static rd_kafka_msg_partitioner_fnv1a_random ( 
      +    \FFI\CData|null $rkt, 
      +    \FFI\CData|object|string|null $key, 
      +    int|null $keylen, 
      +    int|null $partition_cnt, 
      +    \FFI\CData|object|string|null $rkt_opaque, 
      +    \FFI\CData|object|string|null $msg_opaque
      + ): int|null
      +
      +

      Consistent-Random FNV-1a partitioner.

      +

      Uses consistent hashing to map identical keys onto identical partitions using FNV-1a hashing. Messages without keys will be assigned via the random partitioner.

      +

      The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null const rd_kafka_topic_t*
      +
      key \FFI\CData|object|string|null const void*
      +
      keylen int|null size_t
      +
      partition_cnt int|null int32_t
      +
      rkt_opaque \FFI\CData|object|string|null void*
      +
      msg_opaque \FFI\CData|object|string|null void*
      +
      Returns
      +
      int|null int32_t - a partition between 0 and partition_cnt - 1.
      +
      + +

      rd_kafka_msg_partitioner_murmur2()

      +
      public static rd_kafka_msg_partitioner_murmur2 ( 
      +    \FFI\CData|null $rkt, 
      +    \FFI\CData|object|string|null $key, 
      +    int|null $keylen, 
      +    int|null $partition_cnt, 
      +    \FFI\CData|object|string|null $rkt_opaque, 
      +    \FFI\CData|object|string|null $msg_opaque
      + ): int|null
      +
      +

      Murmur2 partitioner (Java compatible).

      +

      Uses consistent hashing to map identical keys onto identical partitions using Java-compatible Murmur2 hashing.

      +

      The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null const rd_kafka_topic_t*
      +
      key \FFI\CData|object|string|null const void*
      +
      keylen int|null size_t
      +
      partition_cnt int|null int32_t
      +
      rkt_opaque \FFI\CData|object|string|null void*
      +
      msg_opaque \FFI\CData|object|string|null void*
      +
      Returns
      +
      int|null int32_t - a partition between 0 and partition_cnt - 1.
      +
      + +

      rd_kafka_msg_partitioner_murmur2_random()

      +
      public static rd_kafka_msg_partitioner_murmur2_random ( 
      +    \FFI\CData|null $rkt, 
      +    \FFI\CData|object|string|null $key, 
      +    int|null $keylen, 
      +    int|null $partition_cnt, 
      +    \FFI\CData|object|string|null $rkt_opaque, 
      +    \FFI\CData|object|string|null $msg_opaque
      + ): int|null
      +
      +

      Consistent-Random Murmur2 partitioner (Java compatible).

      +

      Uses consistent hashing to map identical keys onto identical partitions using Java-compatible Murmur2 hashing. Messages without keys will be assigned via the random partitioner.

      +

      The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null const rd_kafka_topic_t*
      +
      key \FFI\CData|object|string|null const void*
      +
      keylen int|null size_t
      +
      partition_cnt int|null int32_t
      +
      rkt_opaque \FFI\CData|object|string|null void*
      +
      msg_opaque \FFI\CData|object|string|null void*
      +
      Returns
      +
      int|null int32_t - a partition between 0 and partition_cnt - 1.
      +
      + +

      rd_kafka_msg_partitioner_random()

      +
      public static rd_kafka_msg_partitioner_random ( 
      +    \FFI\CData|null $rkt, 
      +    \FFI\CData|object|string|null $key, 
      +    int|null $keylen, 
      +    int|null $partition_cnt, 
      +    \FFI\CData|object|string|null $opaque, 
      +    \FFI\CData|object|string|null $msg_opaque
      + ): int|null
      +
      +

      Random partitioner.

      +

      Will try not to return unavailable partitions.

      +

      The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

      + +
      +
      Parameters
      +
      rkt \FFI\CData|null const rd_kafka_topic_t*
      +
      key \FFI\CData|object|string|null const void*
      +
      keylen int|null size_t
      +
      partition_cnt int|null int32_t
      +
      opaque \FFI\CData|object|string|null void*
      +
      msg_opaque \FFI\CData|object|string|null void*
      +
      Returns
      +
      int|null int32_t - a random partition between 0 and partition_cnt - 1.
      +
      + +

      rd_kafka_name()

      +
      public static rd_kafka_name ( 
      +    \FFI\CData|null $rk
      + ): string|null
      +
      +
      +
      Parameters
      +
      rk \FFI\CData|null const rd_kafka_t*
      +
      Returns
      +
      string|null const char*
      +
      +

      rd_kafka_new()

      +
      public static rd_kafka_new ( 
      +    int $type, 
      +    \FFI\CData|null $conf, 
      +    \FFI\CData|null $errstr, 
      +    int|null $errstr_size
      + ): \FFI\CData|null
      +
      +

      Creates a new Kafka handle and starts its operation according to the specified type (RD_KAFKA_CONSUMER or RD_KAFKA_PRODUCER).

      +

      conf is an optional struct created with rd_kafka_conf_new() that will be used instead of the default configuration. The conf object is freed by this function on success and must not be used or destroyed by the application subsequently. See rd_kafka_conf_set() et.al for more information.

      +

      errstr must be a pointer to memory of at least size errstr_size where rd_kafka_new() may write a human readable error message in case the creation of a new handle fails. In which case the function returns NULL.

      +
      Remarks
      RD_KAFKA_CONSUMER: When a new RD_KAFKA_CONSUMER rd_kafka_t handle is created it may either operate in the legacy simple consumer mode using the rd_kafka_consume_start() interface, or the High-level KafkaConsumer API.
      +
      +An application must only use one of these groups of APIs on a given rd_kafka_t RD_KAFKA_CONSUMER handle.
      + +
      See also
      To destroy the Kafka handle, use rd_kafka_destroy().
      + +
      +
      Parameters
      +
      type int rd_kafka_type_t
      +
      conf \FFI\CData|null rd_kafka_conf_t*
      +
      errstr \FFI\CData|null char*
      +
      errstr_size int|null size_t
      +
      Returns
      +
      \FFI\CData|null rd_kafka_t* - The Kafka handle on success or NULL on error (see errstr)
      +
      + +

      rd_kafka_oauthbearer_set_token()

      +
      public static rd_kafka_oauthbearer_set_token ( 
      +    \FFI\CData|null $rk, 
      +    string|null $token_value, 
      +    int|null $md_lifetime_ms, 
      +    string|null $md_principal_name, 
      +    \FFI\CData|null $extensions, 
      +    int|null $extension_size, 
      +    \FFI\CData|null $errstr, 
      +    int|null $errstr_size
      + ): int
      +
      +

      Set SASL/OAUTHBEARER token and metadata.

      + +

      The SASL/OAUTHBEARER token refresh callback or event handler should invoke this method upon success. The extension keys must not include the reserved key "`auth`", and all extension keys and values must conform to the required format as per https://tools.ietf.org/html/rfc7628#section-3.1:

      +
      key            = 1*(ALPHA)
      +value          = *(VCHAR / SP / HTAB / CR / LF )
      +
      +
      See also
      rd_kafka_oauthbearer_set_token_failure
      +
      +rd_kafka_conf_set_oauthbearer_token_refresh_cb
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      token_value string|null const char* - the mandatory token value to set, often (but not necessarily) a JWS compact serialization as per https://tools.ietf.org/html/rfc7515#section-3.1.
      +
      md_lifetime_ms int|null int64_t - when the token expires, in terms of the number of milliseconds since the epoch.
      +
      md_principal_name string|null const char* - the mandatory Kafka principal name associated with the token.
      +
      extensions \FFI\CData|null const char** - optional SASL extensions key-value array with extensions_size elements (number of keys * 2), where [i] is the key and [i+1] is the key’s value, to be communicated to the broker as additional key-value pairs during the initial client response as per https://tools.ietf.org/html/rfc7628#section-3.1. The key-value pairs are copied.
      +
      extension_size int|null size_t - the number of SASL extension keys plus values, which must be a non-negative multiple of 2.
      +
      errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
      +
      errstr_size int|null size_t
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise errstr set and:
      +RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are invalid;
      +RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not supported by this build;
      +RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is not configured as the client’s authentication mechanism.
      +
      + +

      rd_kafka_oauthbearer_set_token_failure()

      +
      public static rd_kafka_oauthbearer_set_token_failure ( 
      +    \FFI\CData|null $rk, 
      +    string|null $errstr
      + ): int
      +
      +

      SASL/OAUTHBEARER token refresh failure indicator.

      + +

      The SASL/OAUTHBEARER token refresh callback or event handler should invoke this method upon failure.

      + +
      See also
      rd_kafka_oauthbearer_set_token
      +
      +rd_kafka_conf_set_oauthbearer_token_refresh_cb
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - Client instance.
      +
      errstr string|null const char* - mandatory human readable error reason for failing to acquire a token.
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise:
      +RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not supported by this build;
      +RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is not configured as the client’s authentication mechanism,
      +RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied.
      +
      + +

      rd_kafka_offset_store()

      +
      public static rd_kafka_offset_store ( 
      +    \FFI\CData|null $rkt, 
      +    int|null $partition, 
      +    int|null $offset
      + ): int
      +
      +

      Store offset offset + 1 for topic rkt partition partition.

      +

      The offset + 1 will be committed (written) to broker (or file) according to auto.commit.interval.ms or manual offset-less commit()

      +
      Deprecated:
      This API lacks support for partition leader epochs, which makes it at risk for unclean leader election log truncation issues. Use rd_kafka_offsets_store() and rd_kafka_offset_store_message() instead.
      +
      Warning
      This method may only be called for partitions that are currently assigned. Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. Since v1.9.0.
      +
      +Avoid storing offsets after calling rd_kafka_seek() (et.al) as this may later interfere with resuming a paused partition, instead store offsets prior to calling seek.
      +
      Remarks
      enable.auto.offset.store must be set to "false" when using this API.
      + +
      +
      Parameters
      +
      rkt \FFI\CData|null rd_kafka_topic_t*
      +
      partition int|null int32_t
      +
      offset int|null int64_t
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
      +
      + +

      rd_kafka_offset_store_message()

      +
      public static rd_kafka_offset_store_message ( 
      +    \FFI\CData|null $rkmessage
      + ): \FFI\CData|null
      +
      +

      Store offset +1 for the consumed message.

      +

      The message offset + 1 will be committed to broker according to auto.commit.interval.ms or manual offset-less commit()

      +
      Warning
      This method may only be called for partitions that are currently assigned. Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. Since v1.9.0.
      +
      +Avoid storing offsets after calling rd_kafka_seek() (et.al) as this may later interfere with resuming a paused partition, instead store offsets prior to calling seek.
      +
      Remarks
      enable.auto.offset.store must be set to "false" when using this API.
      + +
      +
      Parameters
      +
      rkmessage \FFI\CData|null rd_kafka_message_t* - )
      +
      Returns
      +
      \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure.
      +
      + +

      rd_kafka_offsets_for_times()

      +
      public static rd_kafka_offsets_for_times ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $offsets, 
      +    int|null $timeout_ms
      + ): int
      +
      +

      Look up the offsets for the given partitions by timestamp.

      +

      The returned offset for each partition is the earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition.

      +

      The timestamps to query are represented as offset in offsets on input, and offset will contain the offset on output.

      +

      The function will block for at most timeout_ms milliseconds.

      +
      Remarks
      Duplicate Topic+Partitions are not supported.
      +
      +Per-partition errors may be returned in rd_kafka_topic_partition_t.err
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      offsets \FFI\CData|null rd_kafka_topic_partition_list_t*
      +
      timeout_ms int|null int
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note that per-partition errors might be set), RD_KAFKA_RESP_ERR__TIMED_OUT if not all offsets could be fetched within timeout_ms, RD_KAFKA_RESP_ERR__INVALID_ARG if the offsets list is empty, RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if all partitions are unknown, RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE if unable to query leaders for the given partitions.
      +
      + +

      rd_kafka_offsets_store()

      +
      public static rd_kafka_offsets_store ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $offsets
      + ): int
      +
      +

      Store offsets for next auto-commit for one or more partitions.

      +

      The offset will be committed (written) to the offset store according to auto.commit.interval.ms or manual offset-less commit().

      +

      Per-partition success/error status propagated through each partition's .err for all return values (even NO_ERROR) except INVALID_ARG.

      +
      Warning
      This method may only be called for partitions that are currently assigned. Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. Since v1.9.0.
      +
      +Avoid storing offsets after calling rd_kafka_seek() (et.al) as this may later interfere with resuming a paused partition, instead store offsets prior to calling seek.
      +
      Remarks
      The .offset field is stored as is, it will NOT be + 1.
      +
      +enable.auto.offset.store must be set to "false" when using this API.
      +
      +The leader epoch, if set, will be used to fence outdated partition leaders. See rd_kafka_topic_partition_set_leader_epoch().
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      offsets \FFI\CData|null rd_kafka_topic_partition_list_t*
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or RD_KAFKA_RESP_ERR__INVALID_ARG if enable.auto.offset.store is true, or RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE if none of the offsets could be stored.
      +
      + +

      rd_kafka_opaque()

      +
      public static rd_kafka_opaque ( 
      +    \FFI\CData|null $rk
      + ): \FFI\CData|object|string|null
      +
      +
      +
      Parameters
      +
      rk \FFI\CData|null const rd_kafka_t*
      +
      Returns
      +
      \FFI\CData|object|string|null void*
      +
      +

      rd_kafka_outq_len()

      +
      public static rd_kafka_outq_len ( 
      +    \FFI\CData|null $rk
      + ): int|null
      +
      +

      Returns the current out queue length.

      +

      The out queue length is the sum of:

      +
        +
      • number of messages waiting to be sent to, or acknowledged by, the broker.
      • +
      • number of delivery reports (e.g., dr_msg_cb) waiting to be served by rd_kafka_poll() or rd_kafka_flush().
      • +
      • number of callbacks (e.g., error_cb, stats_cb, etc) waiting to be served by rd_kafka_poll(), rd_kafka_consumer_poll() or rd_kafka_flush().
      • +
      • number of events waiting to be served by background_event_cb() in the background queue (see rd_kafka_conf_set_background_event_cb).
      • +
      +

      An application should wait for the return value of this function to reach zero before terminating to make sure outstanding messages, requests (such as offset commits), callbacks and events are fully processed. See rd_kafka_flush().

      + +
      See also
      rd_kafka_flush()
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - )
      +
      Returns
      +
      int|null int - number of messages and events waiting in queues.
      +
      + +

      rd_kafka_pause_partitions()

      +
      public static rd_kafka_pause_partitions ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $partitions
      + ): int
      +
      +

      Pause producing or consumption for the provided list of partitions.

      +

      Success or error is returned per-partition err in the partitions list.

      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR
      +
      + +

      rd_kafka_plugin_f_conf_init_t()

      +
      public static rd_kafka_plugin_f_conf_init_t ( 
      +    \FFI\CData|null $conf, 
      +    \FFI\CData|object|string|null $plug_opaquep, 
      +    \FFI\CData|null $errstr, 
      +    int|null $errstr_size
      + ): int
      +
      +

      Plugin's configuration initializer method called each time the library is referenced from configuration (even if previously loaded by another client instance).

      +
      Remarks
      This method MUST be implemented by plugins and have the symbol name conf_init
      + +
      Remarks
      A plugin may add an on_conf_destroy() interceptor to clean up plugin-specific resources created in the plugin's conf_init() method.
      + +
      +
      Parameters
      +
      conf \FFI\CData|null rd_kafka_conf_t* - Configuration set up to this point.
      +
      plug_opaquep \FFI\CData|object|string|null void** - Plugin can set this pointer to a per-configuration opaque pointer.
      +
      errstr \FFI\CData|null char* - String buffer of size errstr_size where plugin must write a human readable error string in the case the initializer fails (returns non-zero). - Maximum space (including \0) in errstr.
      +
      errstr_size int|null size_t
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
      +
      + +

      rd_kafka_poll()

      +
      public static rd_kafka_poll ( 
      +    \FFI\CData|null $rk, 
      +    int|null $timeout_ms
      + ): int|null
      +
      +

      Polls the provided kafka handle for events.

      +

      Events will cause application-provided callbacks to be called.

      +

      The timeout_ms argument specifies the maximum amount of time (in milliseconds) that the call will block waiting for events. For non-blocking calls, provide 0 as timeout_ms. To wait indefinitely for an event, provide -1.

      +
      Remarks
      An application should make sure to call poll() at regular intervals to serve any queued callbacks waiting to be called.
      +
      +If your producer doesn't have any callback set (in particular via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb) you might choose not to call poll(), though this is not recommended.
      +

      Events:

      +
        +
      • delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer]
      • +
      • error callbacks (rd_kafka_conf_set_error_cb()) [all]
      • +
      • stats callbacks (rd_kafka_conf_set_stats_cb()) [all]
      • +
      • throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all]
      • +
      • OAUTHBEARER token refresh callbacks (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all]
      • +
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      timeout_ms int|null int
      +
      Returns
      +
      int|null int - the number of events served.
      +
      + +

      rd_kafka_poll_set_consumer()

      +
      public static rd_kafka_poll_set_consumer ( 
      +    \FFI\CData|null $rk
      + ): int
      +
      +

      Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's queue (rd_kafka_consumer_poll()).

      +
      Warning
      It is not permitted to call rd_kafka_poll() after directing the main queue with rd_kafka_poll_set_consumer().
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t* - )
      +
      Returns
      +
      int rd_kafka_resp_err_t
      +
      + +

      rd_kafka_position()

      +
      public static rd_kafka_position ( 
      +    \FFI\CData|null $rk, 
      +    \FFI\CData|null $partitions
      + ): int
      +
      +

      Retrieve current positions (offsets) for topics+partitions.

      +

      The offset field of each requested partition will be set to the offset of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was no previous message.

      +
      Remarks
      In this context the last consumed message is the offset consumed by the current librdkafka instance and, in case of rebalancing, not necessarily the last message fetched from the partition.
      + +
      +
      Parameters
      +
      rk \FFI\CData|null rd_kafka_t*
      +
      partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
      +
      Returns
      +
      int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the offset or err field of each partitions’ element is filled in with the stored offset, or a partition specific error. Else returns an error code.
      +
      + +

      rd_kafka_produce()

      +
      public static rd_kafka_produce ( 
      +    \FFI\CData|null $rkt, 
      +    int|null $partition, 
      +    int|null $msgflags, 
      +    \FFI\CData|object|string|null $payload, 
      +    int|null $len, 
      +    \FFI\CData|object|string|null $key, 
      +    int|null $keylen, 
      +    \FFI\CData|object|string|null $msg_opaque
      + ): int|null
      +
      +

      Produce and send a single message to broker.

      +

      rkt is the target topic which must have been previously created with rd_kafka_topic_new().

      +

      rd_kafka_produce() is an asynchronous non-blocking API. See rd_kafka_conf_set_dr_msg_cb on how to setup a callback to be called once the delivery status (success or failure) is known. The delivery report is triggered by the application calling rd_kafka_poll() (at regular intervals) or rd_kafka_flush() (at termination).

      +

      Since producing is asynchronous, you should call rd_kafka_flush() before you destroy the producer. Otherwise, any outstanding messages will be silently discarded.

      +

      When temporary errors occur, librdkafka automatically retries to produce the messages. Retries are triggered after retry.backoff.ms and when the leader broker for the given partition is available. Otherwise, librdkafka falls back to polling the topic metadata to monitor when a new leader is elected (see the topic.metadata.refresh.fast.interval.ms and topic.metadata.refresh.interval.ms configurations) and then performs a retry. A delivery error will occur if the message could not be produced within message.timeout.ms.

      +

      See the "Message reliability" chapter in INTRODUCTION.md for more information.

      +

      partition is the target partition, either:

      +
        +
      • RD_KAFKA_PARTITION_UA (unassigned) for automatic partitioning using the topic's partitioner function, or
      • +
      • a fixed partition (0..N)
      • +
      +

      msgflags is zero or more of the following flags OR:ed together: RD_KAFKA_MSG_F_BLOCK - block produce*() call if queue.buffering.max.messages or queue.buffering.max.kbytes are exceeded. Messages are considered in-queue from the point they are accepted by produce() until their corresponding delivery report callback/event returns. It is thus a requirement to call rd_kafka_poll() (or equiv.) from a separate thread when F_BLOCK is used. See WARNING on RD_KAFKA_MSG_F_BLOCK above.

      +

      RD_KAFKA_MSG_F_FREE - rdkafka will free(3) payload when it is done with it. RD_KAFKA_MSG_F_COPY - the payload data will be copied and the payload pointer will not be used by rdkafka after the call returns. RD_KAFKA_MSG_F_PARTITION - produce_batch() will honour per-message partition, either set manually or by the configured partitioner.

      +

      .._F_FREE and .._F_COPY are mutually exclusive. If neither of these are set, the caller must ensure that the memory backing payload remains valid and is not modified or reused until the delivery callback is invoked. Other buffers passed to rd_kafka_produce() don't have this restriction on reuse, i.e. the memory backing the key or the topic name may be reused as soon as rd_kafka_produce() returns.

      +

      If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then the memory associated with the payload is still the caller's responsibility.

      +

      payload is the message payload of size len bytes.

      +

      key is an optional message key of size keylen bytes, if non-NULL it will be passed to the topic partitioner as well as be sent with the message to the broker and passed on to the consumer.

      +

      msg_opaque is an optional application-provided per-message opaque pointer that will provided in the message's delivery report callback (dr_msg_cb or dr_cb) and the rd_kafka_message_t _private field.

      +
      Remarks
      on_send() and on_acknowledgement() interceptors may be called from this function. on_acknowledgement() will only be called if the message fails partitioning.
      +
      +If the producer is transactional (transactional.id is configured) producing is only allowed during an on-going transaction, namely after rd_kafka_begin_transaction() has been called.
      + +
      See also
      Use rd_kafka_errno2err() to convert errno to rdkafka error code.
      + +
      +
      Parameters
      +
      rkt \FFI\CData|null rd_kafka_topic_t*
      +
      partition int|null int32_t
      +
      msgflags int|null int
      +
      payload \FFI\CData|object|string|null void*
      +
      len int|null size_t
      +
      key \FFI\CData|object|string|null const void*
      +
      keylen int|null size_t
      +
      msg_opaque \FFI\CData|object|string|null void*
      +
      Returns
      +
      int|null int - 0 on success or -1 on error in which case errno is set accordingly:
        +
        +
      • ENOBUFS - maximum number of outstanding messages has been reached: "queue.buffering.max.messages" (RD_KAFKA_RESP_ERR__QUEUE_FULL)
      • +
      • EMSGSIZE - message is larger than configured max size: "messages.max.bytes". (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
      • +
      • ESRCH - requested partition is unknown in the Kafka cluster. (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
      • +
      • ENOENT - topic is unknown in the Kafka cluster. (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
      • +
      • ECANCELED - fatal error has been raised on producer, see rd_kafka_fatal_error(), (RD_KAFKA_RESP_ERR__FATAL).
      • +
      • ENOEXEC - transactional state forbids producing (RD_KAFKA_RESP_ERR__STATE)
      • + + +

        rd_kafka_produce_batch()

        +
        public static rd_kafka_produce_batch ( 
        +    \FFI\CData|null $rkt, 
        +    int|null $partition, 
        +    int|null $msgflags, 
        +    \FFI\CData|null $rkmessages, 
        +    int|null $message_cnt
        + ): int|null
        +
        +

        Produce multiple messages.

        +

        If partition is RD_KAFKA_PARTITION_UA the configured partitioner will be run for each message (slower), otherwise the messages will be enqueued to the specified partition directly (faster).

        +

        The messages are provided in the array rkmessages of count message_cnt elements. The partition and msgflags are used for all provided messages.

        +

        Honoured rkmessages[] fields are:

        +
          +
        • payload,len Message payload and length
        • +
        • key,key_len Optional message key
        • +
        • _private Message opaque pointer (msg_opaque)
        • +
        • err Will be set according to success or failure, see rd_kafka_produce() for possible error codes. Application only needs to check for errors if return value != message_cnt.
        • +
        +
        Remarks
        If RD_KAFKA_MSG_F_PARTITION is set in msgflags, the .partition field of the rkmessages is used instead of partition.
        + +
        Remarks
        This interface does NOT support setting message headers on the provided rkmessages.
        + +
        +
        Parameters
        +
        rkt \FFI\CData|null rd_kafka_topic_t*
        +
        partition int|null int32_t
        +
        msgflags int|null int
        +
        rkmessages \FFI\CData|null rd_kafka_message_t*
        +
        message_cnt int|null int
        +
        Returns
        +
        int|null int - the number of messages succesfully enqueued for producing.
        +
        + +

        rd_kafka_producev()

        +
        public static rd_kafka_producev ( 
        +    \FFI\CData|null $rk, 
        +    mixed $args
        + ): int
        +
        +

        Produce and send a single message to broker.

        +

        The message is defined by a va-arg list using rd_kafka_vtype_t tag tuples which must be terminated with a single RD_KAFKA_V_END.

        + +
        See also
        rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        args mixed
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code as described in rd_kafka_produce(). RD_KAFKA_RESP_ERR__CONFLICT is returned if _V_HEADER and _V_HEADERS are mixed.
        +
        + +

        rd_kafka_produceva()

        +
        public static rd_kafka_produceva ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $vus, 
        +    int|null $cnt
        + ): \FFI\CData|null
        +
        +

        Produce and send a single message to broker.

        +

        The message is defined by an array of rd_kafka_vu_t of count cnt.

        + +
        See also
        rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        vus \FFI\CData|null const rd_kafka_vu_t*
        +
        cnt int|null size_t
        +
        Returns
        +
        \FFI\CData|null rd_kafka_error_t* - an error object on failure or NULL on success. See rd_kafka_producev() for specific error codes.
        +
        + +

        rd_kafka_purge()

        +
        public static rd_kafka_purge ( 
        +    \FFI\CData|null $rk, 
        +    int|null $purge_flags
        + ): int
        +
        +

        Purge messages currently handled by the producer instance.

        + +

        The application will need to call rd_kafka_poll() or rd_kafka_flush() afterwards to serve the delivery report callbacks of the purged messages.

        +

        Messages purged from internal queues fail with the delivery report error code set to RD_KAFKA_RESP_ERR__PURGE_QUEUE, while purged messages that are in-flight to or from the broker will fail with the error code set to RD_KAFKA_RESP_ERR__PURGE_INFLIGHT.

        +
        Warning
        Purging messages that are in-flight to or from the broker will ignore any subsequent acknowledgement for these messages received from the broker, effectively making it impossible for the application to know if the messages were successfully produced or not. This may result in duplicate messages if the application retries these messages at a later time.
        +
        Remarks
        This call may block for a short time while background thread queues are purged.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Client instance.
        +
        purge_flags int|null int - Tells which messages to purge and how.
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, RD_KAFKA_RESP_ERR__INVALID_ARG if the purge flags are invalid or unknown, RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if called on a non-producer client instance.
        +
        + +

        rd_kafka_query_watermark_offsets()

        +
        public static rd_kafka_query_watermark_offsets ( 
        +    \FFI\CData|null $rk, 
        +    string|null $topic, 
        +    int|null $partition, 
        +    \FFI\CData|null $low, 
        +    \FFI\CData|null $high, 
        +    int|null $timeout_ms
        + ): int
        +
        +

        Query broker for low (oldest/beginning) and high (newest/end) offsets for partition.

        +

        Offsets are returned in *low and *high respectively.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        low \FFI\CData|null int64_t*
        +
        high \FFI\CData|null int64_t*
        +
        timeout_ms int|null int
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
        +
        + +

        rd_kafka_queue_cb_event_enable()

        +
        public static rd_kafka_queue_cb_event_enable ( 
        +    \FFI\CData|null $rkqu, 
        +    \FFI\CData|\Closure $event_cb, 
        +    \FFI\CData|object|string|null $opaque
        + ): void
        +
        +

        Enable callback event triggering for queue.

        +

        The callback will be called from an internal librdkafka thread when a new element is enqueued on a previously empty queue.

        +

        To remove event triggering call with event_cb = NULL.

        +

        The qev_opaque is passed to the callback's qev_opaque argument.

        +
        Remarks
        IO and callback event triggering are mutually exclusive.
        +
        +Since the callback may be triggered from internal librdkafka threads, the application must not perform any pro-longed work in the callback, or call any librdkafka APIs (for the same rd_kafka_t handle).
        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t*
        +
        event_cb \FFI\CData|\Closure void()(rd_kafka_t, void*)
        +
        opaque \FFI\CData|object|string|null void*
        +
        + +

        rd_kafka_queue_destroy()

        +
        public static rd_kafka_queue_destroy ( 
        +    \FFI\CData|null $rkqu
        + ): void
        +
        +

        Destroy a queue, purging all of its enqueued messages.

        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t* - )
        +
        + +

        rd_kafka_queue_forward()

        +
        public static rd_kafka_queue_forward ( 
        +    \FFI\CData|null $src, 
        +    \FFI\CData|null $dst
        + ): void
        +
        +

        Forward/re-route queue src to dst. If dst is NULL the forwarding is removed.

        +

        The internal refcounts for both queues are increased.

        +
        Remarks
        Regardless of whether dst is NULL or not, after calling this function, src will not forward it's fetch queue to the consumer queue.
        + +
        +
        Parameters
        +
        src \FFI\CData|null rd_kafka_queue_t*
        +
        dst \FFI\CData|null rd_kafka_queue_t*
        +
        + +

        rd_kafka_queue_get_background()

        +
        public static rd_kafka_queue_get_background ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        The background thread queue provides the application with an automatically polled queue that triggers the event callback in a background thread, this background thread is completely managed by librdkafka.

        +

        The background thread queue is automatically created if a generic event handler callback is configured with rd_kafka_conf_set_background_event_cb() or if rd_kafka_queue_get_background() is called.

        +

        The background queue is polled and served by librdkafka and MUST NOT be polled, forwarded, or otherwise managed by the application, it may only be used as the destination queue passed to queue-enabled APIs, such as the Admin API.

        +

        Use rd_kafka_queue_destroy() to loose the reference.

        +
        Warning
        The background queue MUST NOT be read from (polled, consumed, etc), or forwarded from.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the background thread queue, or NULL if the background queue is not enabled.
        +
        + +

        rd_kafka_queue_get_consumer()

        +
        public static rd_kafka_queue_get_consumer ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Use rd_kafka_queue_destroy() to loose the reference.

        +
        Remarks
        rd_kafka_queue_destroy() MUST be called on this queue prior to calling rd_kafka_consumer_close().
        +
        +Polling the returned queue counts as a consumer poll, and will reset the timer for max.poll.interval.ms. If this queue is forwarded to a "destq", polling destq also counts as a consumer poll (this works for any number of forwards). However, even if this queue is unforwarded or forwarded elsewhere, polling destq will continue to count as a consumer poll.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the librdkafka consumer queue. This is the queue served by rd_kafka_consumer_poll().
        +
        + +

        rd_kafka_queue_get_main()

        +
        public static rd_kafka_queue_get_main ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Use rd_kafka_queue_destroy() to loose the reference.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the main librdkafka event queue. This is the queue served by rd_kafka_poll().
        +
        + +

        rd_kafka_queue_get_partition()

        +
        public static rd_kafka_queue_get_partition ( 
        +    \FFI\CData|null $rk, 
        +    string|null $topic, 
        +    int|null $partition
        + ): \FFI\CData|null
        +
        +

        Use rd_kafka_queue_destroy() to loose the reference.

        +
        Remarks
        rd_kafka_queue_destroy() MUST be called on this queue
        +
        +This function only works on consumers.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the partition’s queue, or NULL if partition is invalid.
        +
        + +

        rd_kafka_queue_get_sasl()

        +
        public static rd_kafka_queue_get_sasl ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Use rd_kafka_queue_destroy() to loose the reference.

        +
        See also
        rd_kafka_sasl_background_callbacks_enable()
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the SASL callback queue, if a SASL mechanism with callbacks is configured (currently only OAUTHBEARER), else returns NULL.
        +
        + +

        rd_kafka_queue_io_event_enable()

        +
        public static rd_kafka_queue_io_event_enable ( 
        +    \FFI\CData|null $rkqu, 
        +    int|null $fd, 
        +    \FFI\CData|object|string|null $payload, 
        +    int|null $size
        + ): void
        +
        +

        Enable IO event triggering for queue.

        +

        To ease integration with IO based polling loops this API allows an application to create a separate file-descriptor that librdkafka will write payload (of size size) to whenever a new element is enqueued on a previously empty queue.

        +

        To remove event triggering call with fd = -1.

        +

        librdkafka will maintain a copy of the payload.

        +
        Remarks
        IO and callback event triggering are mutually exclusive.
        +
        +When using forwarded queues the IO event must only be enabled on the final forwarded-to (destination) queue.
        +
        +The file-descriptor/socket must be set to non-blocking.
        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t*
        +
        fd int|null int
        +
        payload \FFI\CData|object|string|null const void*
        +
        size int|null size_t
        +
        + +

        rd_kafka_queue_length()

        +
        public static rd_kafka_queue_length ( 
        +    \FFI\CData|null $rkqu
        + ): int|null
        +
        +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t* - )
        +
        Returns
        +
        int|null size_t - the current number of elements in queue.
        +
        + +

        rd_kafka_queue_new()

        +
        public static rd_kafka_queue_new ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Create a new message queue.

        +

        See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t*
        +
        + +

        rd_kafka_queue_poll()

        +
        public static rd_kafka_queue_poll ( 
        +    \FFI\CData|null $rkqu, 
        +    int|null $timeout_ms
        + ): \FFI\CData|null
        +
        +

        Poll a queue for an event for max timeout_ms.

        + +
        Remarks
        Use rd_kafka_event_destroy() to free the event.
        +
        See also
        rd_kafka_conf_set_background_event_cb()
        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t*
        +
        timeout_ms int|null int
        +
        Returns
        +
        \FFI\CData|null rd_kafka_event_t* - an event, or NULL.
        +
        + +

        rd_kafka_queue_poll_callback()

        +
        public static rd_kafka_queue_poll_callback ( 
        +    \FFI\CData|null $rkqu, 
        +    int|null $timeout_ms
        + ): int|null
        +
        +

        Poll a queue for events served through callbacks for max timeout_ms.

        + +
        Remarks
        This API must only be used for queues with callbacks registered for all expected event types. E.g., not a message queue.
        +
        +Also see rd_kafka_conf_set_background_event_cb() for triggering event callbacks from a librdkafka-managed background thread.
        +
        See also
        rd_kafka_conf_set_background_event_cb()
        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t*
        +
        timeout_ms int|null int
        +
        Returns
        +
        int|null int - the number of events served.
        +
        + +

        rd_kafka_queue_yield()

        +
        public static rd_kafka_queue_yield ( 
        +    \FFI\CData|null $rkqu
        + ): void
        +
        +

        Cancels the current rd_kafka_queue_poll() on rkqu.

        +

        An application may use this from another thread to force an immediate return to the calling code (caller of rd_kafka_queue_poll()). Must not be used from signal handlers since that may cause deadlocks.

        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t* - )
        +
        + +

        rd_kafka_rebalance_protocol()

        +
        public static rd_kafka_rebalance_protocol ( 
        +    \FFI\CData|null $rk
        + ): string|null
        +
        +

        The rebalance protocol currently in use. This will be "NONE" if the consumer has not (yet) joined a group, else it will match the rebalance protocol ("EAGER", "COOPERATIVE") of the configured and selected assignor(s). All configured assignors must have the same protocol type, meaning online migration of a consumer group from using one protocol to another (in particular upgading from EAGER to COOPERATIVE) without a restart is not currently supported.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        string|null const char* - NULL on error, or one of “NONE”, “EAGER”, “COOPERATIVE” on success.
        +
        + +

        rd_kafka_resume_partitions()

        +
        public static rd_kafka_resume_partitions ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $partitions
        + ): int
        +
        +

        Resume producing consumption for the provided list of partitions.

        +

        Success or error is returned per-partition err in the partitions list.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR
        +
        + +

        rd_kafka_sasl_background_callbacks_enable()

        +
        public static rd_kafka_sasl_background_callbacks_enable ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Enable SASL OAUTHBEARER refresh callbacks on the librdkafka background thread.

        +

        This serves as an alternative for applications that do not call rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means of automatically trigger the refresh callbacks, which are needed to initiate connections to the brokers in the case a custom OAUTHBEARER refresh callback is configured.

        + +
        See also
        rd_kafka_queue_get_sasl()
        +
        +rd_kafka_conf_set_oauthbearer_token_refresh_cb()
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on error.
        +
        + +

        rd_kafka_sasl_set_credentials()

        +
        public static rd_kafka_sasl_set_credentials ( 
        +    \FFI\CData|null $rk, 
        +    string|null $username, 
        +    string|null $password
        + ): \FFI\CData|null
        +
        +

        Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by this Kafka client.

        +

        This function sets or resets the SASL username and password credentials used by this Kafka client. The new credentials will be used the next time this client needs to authenticate to a broker. This function will not disconnect existing connections that might have been made using the old credentials.

        +
        Remarks
        This function only applies to the SASL PLAIN and SCRAM mechanisms.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        username string|null const char*
        +
        password string|null const char*
        +
        Returns
        +
        \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on error.
        +
        + +

        rd_kafka_seek()

        +
        public static rd_kafka_seek ( 
        +    \FFI\CData|null $rkt, 
        +    int|null $partition, 
        +    int|null $offset, 
        +    int|null $timeout_ms
        + ): int
        +
        +

        Seek consumer for topic+partition to offset which is either an absolute or logical offset.

        +

        If timeout_ms is specified (not 0) the seek call will wait this long for the consumer to update its fetcher state for the given partition with the new offset. This guarantees that no previously fetched messages for the old offset (or fetch position) will be passed to the application.

        +

        If the timeout is reached the internal state will be unknown to the caller and this function returns RD_KAFKA_RESP_ERR__TIMED_OUT.

        +

        If timeout_ms is 0 it will initiate the seek but return immediately without any error reporting (e.g., async).

        +

        This call will purge all pre-fetched messages for the given partition, which may be up to queued.max.message.kbytes in size. Repeated use of seek may thus lead to increased network usage as messages are re-fetched from the broker.

        +
        Remarks
        Seek must only be performed for already assigned/consumed partitions, use rd_kafka_assign() (et.al) to set the initial starting offset for a new assignmenmt.
        + +
        Deprecated:
        Use rd_kafka_seek_partitions().
        + +
        +
        Parameters
        +
        rkt \FFI\CData|null rd_kafka_topic_t*
        +
        partition int|null int32_t
        +
        offset int|null int64_t
        +
        timeout_ms int|null int
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__NO_ERROR on success else an error code.
        +
        + +

        rd_kafka_seek_partitions()

        +
        public static rd_kafka_seek_partitions ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $partitions, 
        +    int|null $timeout_ms
        + ): \FFI\CData|null
        +
        +

        Seek consumer for partitions in partitions to the per-partition offset in the .offset field of partitions.

        +

        The offset may be either absolute (>= 0) or a logical offset.

        +

        If timeout_ms is specified (not 0) the seek call will wait this long for the consumer to update its fetcher state for the given partition with the new offset. This guarantees that no previously fetched messages for the old offset (or fetch position) will be passed to the application.

        +

        If the timeout is reached the internal state will be unknown to the caller and this function returns RD_KAFKA_RESP_ERR__TIMED_OUT.

        +

        If timeout_ms is 0 it will initiate the seek but return immediately without any error reporting (e.g., async).

        +

        This call will purge all pre-fetched messages for the given partition, which may be up to queued.max.message.kbytes in size. Repeated use of seek may thus lead to increased network usage as messages are re-fetched from the broker.

        +

        Individual partition errors are reported in the per-partition .err field of partitions.

        +
        Remarks
        Seek must only be performed for already assigned/consumed partitions, use rd_kafka_assign() (et.al) to set the initial starting offset for a new assignmenmt.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        timeout_ms int|null int
        +
        Returns
        +
        \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure.
        +
        + +

        rd_kafka_send_offsets_to_transaction()

        +
        public static rd_kafka_send_offsets_to_transaction ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $offsets, 
        +    \FFI\CData|null $cgmetadata, 
        +    int|null $timeout_ms
        + ): \FFI\CData|null
        +
        +

        Sends a list of topic partition offsets to the consumer group coordinator for cgmetadata, and marks the offsets as part part of the current transaction. These offsets will be considered committed only if the transaction is committed successfully.

        +

        The offsets should be the next message your application will consume, i.e., the last processed message's offset + 1 for each partition. Either track the offsets manually during processing or use rd_kafka_position() (on the consumer) to get the current offsets for the partitions assigned to the consumer.

        +

        Use this method at the end of a consume-transform-produce loop prior to committing the transaction with rd_kafka_commit_transaction().

        + +
        Remarks
        This function must be called on the transactional producer instance, not the consumer.
        +
        +The consumer must disable auto commits (set enable.auto.commit to false on the consumer).
        +
        +Logical and invalid offsets (such as RD_KAFKA_OFFSET_INVALID) in offsets will be ignored, if there are no valid offsets in offsets the function will return NULL and no action will be taken.
        +
        +This call is retriable but not resumable, which means a new request with a new set of provided offsets and group metadata will be sent to the transaction coordinator if the call is retried.
        +
        +It is highly recommended to retry the call (upon retriable error) with identical offsets and cgmetadata parameters. Failure to do so risks inconsistent state between what is actually included in the transaction and what the application thinks is included in the transaction.
        + +
        Remarks
        The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Producer instance.
        +
        offsets \FFI\CData|null const rd_kafka_topic_partition_list_t* - List of offsets to commit to the consumer group upon successful commit of the transaction. Offsets should be the next message to consume, e.g., last processed message + 1.
        +
        cgmetadata \FFI\CData|null const rd_kafka_consumer_group_metadata_t* - The current consumer group metadata as returned by rd_kafka_consumer_group_metadata() on the consumer instance the provided offsets were consumed from.
        +
        timeout_ms int|null int - Maximum time allowed to register the offsets on the broker.
        +
        Returns
        +
        \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether an abortable or fatal error has been raised by calling rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal() respectively. Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED if the producer is not authorized to write the consumer offsets to the group coordinator, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, or if the consumer_group_id or offsets are empty. Other error codes not listed here may be returned, depending on broker version.
        +
        + +

        rd_kafka_set_log_level()

        +
        public static rd_kafka_set_log_level ( 
        +    \FFI\CData|null $rk, 
        +    int|null $level
        + ): void
        +
        +

        Specifies the maximum logging level emitted by internal kafka logging and debugging.

        +
        Deprecated:
        Set the "log_level" configuration property instead.
        +
        Remarks
        If the "debug" configuration property is set the log level is automatically adjusted to LOG_DEBUG (7).
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        level int|null int
        +
        + +

        rd_kafka_set_log_queue()

        +
        public static rd_kafka_set_log_queue ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $rkqu
        + ): int
        +
        +

        Forward librdkafka logs (and debug) to the specified queue for serving with one of the ..poll() calls.

        +

        This allows an application to serve log callbacks (log_cb) in its thread of choice.

        + +
        Remarks
        The configuration property log.queue MUST also be set to true.
        +
        +librdkafka maintains its own reference to the provided queue.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Client instance.
        +
        rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to forward logs to. If the value is NULL the logs are forwarded to the main queue.
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error, eg RD_KAFKA_RESP_ERR__NOT_CONFIGURED when log.queue is not set to true.
        +
        + +

        rd_kafka_set_logger()

        +
        public static rd_kafka_set_logger ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|\Closure $func
        + ): void
        +
        +

        Set logger function.

        +

        The default is to print to stderr, but a syslog logger is also available, see rd_kafka_log_(print|syslog) for the builtin alternatives. Alternatively the application may provide its own logger callback. Or pass 'func' as NULL to disable logging.

        +
        Deprecated:
        Use rd_kafka_conf_set_log_cb()
        +
        Remarks
        rk may be passed as NULL in the callback.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        func \FFI\CData|\Closure void()(const rd_kafka_t, int, const char*, const char*)
        +
        + +

        rd_kafka_subscribe()

        +
        public static rd_kafka_subscribe ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $topics
        + ): int
        +
        +

        Subscribe to topic set using balanced consumer groups.

        +

        Wildcard (regex) topics are supported: any topic name in the topics list that is prefixed with "^" will be regex-matched to the full list of topics in the cluster and matching topics will be added to the subscription list.

        +

        The full topic list is retrieved every topic.metadata.refresh.interval.ms to pick up new or delete topics that match the subscription. If there is any change to the matched topics the consumer will immediately rejoin the group with the updated set of subscribed topics.

        +

        Regex and full topic names can be mixed in topics.

        +
        Remarks
        Only the .topic field is used in the supplied topics list, all other fields are ignored.
        +
        +subscribe() is an asynchronous method which returns immediately: background threads will (re)join the group, wait for group rebalance, issue any registered rebalance_cb, assign() the assigned partitions, and then start fetching messages. This cycle may take up to session.timeout.ms * 2 or more to complete.
        +
        +After this call returns a consumer error will be returned by rd_kafka_consumer_poll (et.al) for each unavailable topic in the topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART for non-existent topics, and RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. The consumer error will be raised through rd_kafka_consumer_poll() (et.al.) with the rd_kafka_message_t.err field set to one of the error codes mentioned above. The subscribe function itself is asynchronous and will not return an error on unavailable topics.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topics \FFI\CData|null const rd_kafka_topic_partition_list_t*
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid topics or regexes or duplicate entries, RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error.
        +
        + +

        rd_kafka_subscription()

        +
        public static rd_kafka_subscription ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $topics
        + ): int
        +
        +

        Returns the current topic subscription.

        + +
        Remarks
        The application is responsible for calling rd_kafka_topic_partition_list_destroy on the returned list.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topics \FFI\CData|null rd_kafka_topic_partition_list_t**
        +
        Returns
        +
        int rd_kafka_resp_err_t - An error code on failure, otherwise topic is updated to point to a newly allocated topic list (possibly empty).
        +
        + +

        rd_kafka_test_fatal_error()

        +
        public static rd_kafka_test_fatal_error ( 
        +    \FFI\CData|null $rk, 
        +    int $err, 
        +    string|null $reason
        + ): int
        +
        +

        Trigger a fatal error for testing purposes.

        +

        Since there is no practical way to trigger real fatal errors in the idempotent producer, this method allows an application to trigger fabricated fatal errors in tests to check its error handling code.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Client instance.
        +
        err int rd_kafka_resp_err_t - The underlying error code.
        +
        reason string|null const char* - A human readable error reason. Will be prefixed with “test_fatal_error: ” to differentiate from real fatal errors.
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if a fatal error was triggered, or RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error has already been triggered.
        +
        + +

        rd_kafka_thread_cnt()

        +
        public static rd_kafka_thread_cnt (  ): int|null
        +
        +

        Retrieve the current number of threads in use by librdkafka.

        +

        Used by regression tests.

        + +
        +
        Returns
        +
        int|null int - )
        +
        + +

        rd_kafka_topic_conf_destroy()

        +
        public static rd_kafka_topic_conf_destroy ( 
        +    \FFI\CData|null $topic_conf
        + ): void
        +
        +
        +
        Parameters
        +
        topic_conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        +

        rd_kafka_topic_conf_dump()

        +
        public static rd_kafka_topic_conf_dump ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|null $cntp
        + ): \FFI\CData|null
        +
        +

        Dump the topic configuration properties and values of conf to an array with "key", "value" pairs.

        +

        The number of entries in the array is returned in *cntp.

        +

        The dump must be freed with rd_kafka_conf_dump_free().

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        cntp \FFI\CData|null size_t*
        +
        Returns
        +
        \FFI\CData|null const char**
        +
        + +

        rd_kafka_topic_conf_dup()

        +
        public static rd_kafka_topic_conf_dup ( 
        +    \FFI\CData|null $conf
        + ): \FFI\CData|null
        +
        +
        +
        Parameters
        +
        conf \FFI\CData|null const rd_kafka_topic_conf_t*
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_conf_t*
        +
        +

        rd_kafka_topic_conf_get()

        +
        public static rd_kafka_topic_conf_get ( 
        +    \FFI\CData|null $conf, 
        +    string|null $name, 
        +    \FFI\CData|null $dest, 
        +    \FFI\CData|null $dest_size
        + ): int
        +
        +

        Retrieve topic configuration value for property name.

        +
        See also
        rd_kafka_conf_get()
        + +
        +
        Parameters
        +
        conf \FFI\CData|null const rd_kafka_topic_conf_t*
        +
        name string|null const char*
        +
        dest \FFI\CData|null char*
        +
        dest_size \FFI\CData|null size_t*
        +
        Returns
        +
        int rd_kafka_conf_res_t
        +
        + +

        rd_kafka_topic_conf_new()

        +
        public static rd_kafka_topic_conf_new (  ): \FFI\CData|null
        +
        +

        Create topic configuration object.

        +
        See also
        Same semantics as for rd_kafka_conf_new().
        + +
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_conf_t* - )
        +
        + +

        rd_kafka_topic_conf_set()

        +
        public static rd_kafka_topic_conf_set ( 
        +    \FFI\CData|null $conf, 
        +    string|null $name, 
        +    string|null $value, 
        +    \FFI\CData|null $errstr, 
        +    int|null $errstr_size
        + ): int
        +
        +

        Sets a single rd_kafka_topic_conf_t value by property name.

        +

        topic_conf should have been previously set up with rd_kafka_topic_conf_new().

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        name string|null const char*
        +
        value string|null const char*
        +
        errstr \FFI\CData|null char*
        +
        errstr_size int|null size_t
        +
        Returns
        +
        int rd_kafka_conf_res_t - rd_kafka_conf_res_t to indicate success or failure.
        +
        + +

        rd_kafka_topic_conf_set_msg_order_cmp()

        +
        public static rd_kafka_topic_conf_set_msg_order_cmp ( 
        +    \FFI\CData|null $topic_conf, 
        +    \FFI\CData|\Closure $msg_order_cmp
        + ): void
        +
        +

        Producer: Set message queueing order comparator callback.

        +

        The callback may be called in any thread at any time, it may be called multiple times for the same message.

        +

        Ordering comparator function constraints:

        +
          +
        • MUST be stable sort (same input gives same output).
        • +
        • MUST NOT call any rd_kafka_*() functions.
        • +
        • MUST NOT block or execute for prolonged periods of time.
        • +
        +

        The comparator shall compare the two messages and return:

        +
          +
        • < 0 if message a should be inserted before message b.
        • +
        • >=0 if message a should be inserted after message b.
        • +
        +
        Remarks
        Insert sorting will be used to enqueue the message in the correct queue position, this comes at a cost of O(n).
        +
        +If queuing.strategy=fifo new messages are enqueued to the tail of the queue regardless of msg_order_cmp, but retried messages are still affected by msg_order_cmp.
        +
        Warning
        THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL, DO NOT USE IN PRODUCTION.
        + +
        +
        Parameters
        +
        topic_conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        msg_order_cmp \FFI\CData|\Closure int()(const rd_kafka_message_t, const rd_kafka_message_t*)
        +
        + +

        rd_kafka_topic_conf_set_opaque()

        +
        public static rd_kafka_topic_conf_set_opaque ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|object|string|null $opaque
        + ): void
        +
        +

        Sets the application's opaque pointer that will be passed to all topic callbacks as the rkt_opaque argument.

        +
        See also
        rd_kafka_topic_opaque()
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        opaque \FFI\CData|object|string|null void*
        +
        + +

        rd_kafka_topic_conf_set_partitioner_cb()

        +
        public static rd_kafka_topic_conf_set_partitioner_cb ( 
        +    \FFI\CData|null $topic_conf, 
        +    \FFI\CData|\Closure $partitioner
        + ): void
        +
        +

        Producer: Set partitioner callback in provided topic conf object.

        +

        The partitioner may be called in any thread at any time, it may be called multiple times for the same message/key.

        +

        The callback's rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The callback's msg_opaque argument is the per-message opaque passed to produce().

        +

        Partitioner function constraints:

        +
          +
        • MUST NOT call any rd_kafka_*() functions except: rd_kafka_topic_partition_available()
        • +
        • MUST NOT block or execute for prolonged periods of time.
        • +
        • MUST return a value between 0 and partition_cnt-1, or the special RD_KAFKA_PARTITION_UA value if partitioning could not be performed.
        • +
        + +
        +
        Parameters
        +
        topic_conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        partitioner \FFI\CData|\Closure int32_t()(const rd_kafka_topic_t, const void*, size_t, int32_t, void*, void*)
        +
        + +

        rd_kafka_topic_destroy()

        +
        public static rd_kafka_topic_destroy ( 
        +    \FFI\CData|null $rkt
        + ): void
        +
        +

        Loose application's topic handle refcount as previously created with rd_kafka_topic_new().

        +
        Remarks
        Since topic objects are refcounted (both internally and for the app) the topic object might not actually be destroyed by this call, but the application must consider the object destroyed.
        + +
        +
        Parameters
        +
        rkt \FFI\CData|null rd_kafka_topic_t* - )
        +
        + +

        rd_kafka_topic_name()

        +
        public static rd_kafka_topic_name ( 
        +    \FFI\CData|null $rkt
        + ): string|null
        +
        +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        Returns
        +
        string|null const char*
        +
        +

        rd_kafka_topic_new()

        +
        public static rd_kafka_topic_new ( 
        +    \FFI\CData|null $rk, 
        +    string|null $topic, 
        +    \FFI\CData|null $conf
        + ): \FFI\CData|null
        +
        +

        Creates a new topic handle for topic named topic.

        +

        conf is an optional configuration for the topic created with rd_kafka_topic_conf_new() that will be used instead of the default topic configuration. The conf object is freed by this function and must not be used or destroyed by the application subsequently. See rd_kafka_topic_conf_set() et.al for more information.

        +

        Topic handles are refcounted internally and calling rd_kafka_topic_new() again with the same topic name will return the previous topic handle without updating the original handle's configuration. Applications must eventually call rd_kafka_topic_destroy() for each succesfull call to rd_kafka_topic_new() to clear up resources.

        + +
        See also
        rd_kafka_topic_destroy()
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topic string|null const char*
        +
        conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_t* - the new topic handle or NULL on error (use rd_kafka_errno2err() to convert system errno to an rd_kafka_resp_err_t error code.
        +
        + +

        rd_kafka_topic_opaque()

        +
        public static rd_kafka_topic_opaque ( 
        +    \FFI\CData|null $rkt
        + ): \FFI\CData|object|string|null
        +
        +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        Returns
        +
        \FFI\CData|object|string|null void*
        +
        +

        rd_kafka_topic_partition_available()

        +
        public static rd_kafka_topic_partition_available ( 
        +    \FFI\CData|null $rkt, 
        +    int|null $partition
        + ): int|null
        +
        +

        Check if partition is available (has a leader broker).

        + +
        Warning
        This function must only be called from inside a partitioner function
        + +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        partition int|null int32_t
        +
        Returns
        +
        int|null int - 1 if the partition is available, else 0.
        +
        + +

        rd_kafka_topic_partition_destroy()

        +
        public static rd_kafka_topic_partition_destroy ( 
        +    \FFI\CData|null $rktpar
        + ): void
        +
        +

        Destroy a rd_kafka_topic_partition_t.

        +
        Remarks
        This must not be called for elements in a topic partition list.
        + +
        +
        Parameters
        +
        rktpar \FFI\CData|null rd_kafka_topic_partition_t* - )
        +
        + +

        rd_kafka_topic_partition_get_leader_epoch()

        +
        public static rd_kafka_topic_partition_get_leader_epoch ( 
        +    \FFI\CData|null $rktpar
        + ): int|null
        +
        +
        Remarks
        See KIP-320 for more information.
        + +
        +
        Parameters
        +
        rktpar \FFI\CData|null const rd_kafka_topic_partition_t* - ) - Partition object.
        +
        Returns
        +
        int|null int32_t - the offset leader epoch, if relevant and known, else -1.
        +
        + +

        rd_kafka_topic_partition_list_add()

        +
        public static rd_kafka_topic_partition_list_add ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $partition
        + ): \FFI\CData|null
        +
        +

        Add topic+partition to list.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t* - List to extend
        +
        topic string|null const char* - Topic name (copied)
        +
        partition int|null int32_t - Partition id
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_partition_t* - The object which can be used to fill in additionals fields.
        +
        + +

        rd_kafka_topic_partition_list_add_range()

        +
        public static rd_kafka_topic_partition_list_add_range ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $start, 
        +    int|null $stop
        + ): void
        +
        +

        Add range of partitions from start to stop inclusive.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t* - List to extend
        +
        topic string|null const char* - Topic name (copied)
        +
        start int|null int32_t - Start partition of range
        +
        stop int|null int32_t - Last partition of range (inclusive)
        +
        + +

        rd_kafka_topic_partition_list_copy()

        +
        public static rd_kafka_topic_partition_list_copy ( 
        +    \FFI\CData|null $src
        + ): \FFI\CData|null
        +
        +

        Make a copy of an existing list.

        + +
        +
        Parameters
        +
        src \FFI\CData|null const rd_kafka_topic_partition_list_t* - ) - The existing list to copy.
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_partition_list_t* - A new list fully populated to be identical to src
        +
        + +

        rd_kafka_topic_partition_list_del()

        +
        public static rd_kafka_topic_partition_list_del ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $partition
        + ): int|null
        +
        +

        Delete partition from list.

        + +
        Remarks
        Any held indices to elems[] are unusable after this call returns 1.
        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t* - List to modify
        +
        topic string|null const char* - Topic name to match
        +
        partition int|null int32_t - Partition to match
        +
        Returns
        +
        int|null int - 1 if partition was found (and removed), else 0.
        +
        + +

        rd_kafka_topic_partition_list_del_by_idx()

        +
        public static rd_kafka_topic_partition_list_del_by_idx ( 
        +    \FFI\CData|null $rktparlist, 
        +    int|null $idx
        + ): int|null
        +
        +

        Delete partition from list by elems[] index.

        + +
        See also
        rd_kafka_topic_partition_list_del()
        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        idx int|null int
        +
        Returns
        +
        int|null int - 1 if partition was found (and removed), else 0.
        +
        + +

        rd_kafka_topic_partition_list_destroy()

        +
        public static rd_kafka_topic_partition_list_destroy ( 
        +    \FFI\CData|null $rkparlist
        + ): void
        +
        +
        +
        Parameters
        +
        rkparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        +

        rd_kafka_topic_partition_list_find()

        +
        public static rd_kafka_topic_partition_list_find ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $partition
        + ): \FFI\CData|null
        +
        +

        Find element by topic and partition.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_partition_t* - a pointer to the first matching element, or NULL if not found.
        +
        + +

        rd_kafka_topic_partition_list_new()

        +
        public static rd_kafka_topic_partition_list_new ( 
        +    int|null $size
        + ): \FFI\CData|null
        +
        +

        Create a new list/vector Topic+Partition container.

        + +
        Remarks
        Use rd_kafka_topic_partition_list_destroy() to free all resources in use by a list and the list itself.
        +
        See also
        rd_kafka_topic_partition_list_add()
        + +
        +
        Parameters
        +
        size int|null int - ) - Initial allocated size used when the expected number of elements is known or can be estimated. Avoids reallocation and possibly relocation of the elems array.
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_partition_list_t* - A newly allocated Topic+Partition list.
        +
        + +

        rd_kafka_topic_partition_list_set_offset()

        +
        public static rd_kafka_topic_partition_list_set_offset ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $partition, 
        +    int|null $offset
        + ): int
        +
        +

        Set offset to offset for topic and partition.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        offset int|null int64_t
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if partition was not found in the list.
        +
        + +

        rd_kafka_topic_partition_list_sort()

        +
        public static rd_kafka_topic_partition_list_sort ( 
        +    \FFI\CData|null $rktparlist, 
        +    \FFI\CData|\Closure $cmp, 
        +    \FFI\CData|object|string|null $opaque
        + ): void
        +
        +

        Sort list using comparator cmp.

        +

        If cmp is NULL the default comparator will be used that sorts by ascending topic name and partition.

        +

        cmp_opaque is provided as the cmp_opaque argument to cmp.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        cmp \FFI\CData|\Closure int()(const void, const void*, void*)
        +
        opaque \FFI\CData|object|string|null void*
        +
        + +

        rd_kafka_topic_partition_set_leader_epoch()

        +
        public static rd_kafka_topic_partition_set_leader_epoch ( 
        +    \FFI\CData|null $rktpar, 
        +    int|null $leader_epoch
        + ): void
        +
        +

        Sets the offset leader epoch (use -1 to clear).

        + +
        Remarks
        See KIP-320 for more information.
        + +
        +
        Parameters
        +
        rktpar \FFI\CData|null rd_kafka_topic_partition_t* - Partition object.
        +
        leader_epoch int|null int32_t - Offset leader epoch, use -1 to reset.
        +
        + +

        rd_kafka_topic_result_error()

        +
        public static rd_kafka_topic_result_error ( 
        +    \FFI\CData|null $topicres
        + ): int
        +
        +

        Topic result provides per-topic operation result information.

        + +
        +
        Parameters
        +
        topicres \FFI\CData|null const rd_kafka_topic_result_t* - )
        +
        Returns
        +
        int rd_kafka_resp_err_t - the error code for the given topic result.
        +
        + +

        rd_kafka_topic_result_error_string()

        +
        public static rd_kafka_topic_result_error_string ( 
        +    \FFI\CData|null $topicres
        + ): string|null
        +
        +
        Remarks
        lifetime of the returned string is the same as the topicres.
        + +
        +
        Parameters
        +
        topicres \FFI\CData|null const rd_kafka_topic_result_t* - )
        +
        Returns
        +
        string|null const char* - the human readable error string for the given topic result, or NULL if there was no error.
        +
        + +

        rd_kafka_topic_result_name()

        +
        public static rd_kafka_topic_result_name ( 
        +    \FFI\CData|null $topicres
        + ): string|null
        +
        +
        Remarks
        lifetime of the returned string is the same as the topicres.
        + +
        +
        Parameters
        +
        topicres \FFI\CData|null const rd_kafka_topic_result_t* - )
        +
        Returns
        +
        string|null const char* - the name of the topic for the given topic result.
        +
        + +

        rd_kafka_type()

        +
        public static rd_kafka_type ( 
        +    \FFI\CData|null $rk
        + ): int
        +
        +
        +
        Parameters
        +
        rk \FFI\CData|null const rd_kafka_t*
        +
        Returns
        +
        int rd_kafka_type_t
        +
        +

        rd_kafka_unittest()

        +
        public static rd_kafka_unittest (  ): int|null
        +
        +

        Run librdkafka's built-in unit-tests.

        + +
        +
        Returns
        +
        int|null int - ) - the number of failures, or 0 if all tests passed.
        +
        + +

        rd_kafka_unsubscribe()

        +
        public static rd_kafka_unsubscribe ( 
        +    \FFI\CData|null $rk
        + ): int
        +
        +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        Returns
        +
        int rd_kafka_resp_err_t
        +
        +

        rd_kafka_version()

        +
        public static rd_kafka_version (  ): int|null
        +
        +

        Returns the librdkafka version as integer.

        + +
        See also
        See RD_KAFKA_VERSION for how to parse the integer format.
        +
        +Use rd_kafka_version_str() to retreive the version as a string.
        + +
        +
        Returns
        +
        int|null int - ) - Version integer.
        +
        + +

        rd_kafka_version_str()

        +
        public static rd_kafka_version_str (  ): string|null
        +
        +

        Returns the librdkafka version as string.

        + +
        +
        Returns
        +
        string|null const char* - ) - Version string
        +
        + +

        rd_kafka_wait_destroyed()

        +
        public static rd_kafka_wait_destroyed ( 
        +    int|null $timeout_ms
        + ): int|null
        +
        +

        Wait for all rd_kafka_t objects to be destroyed.

        +

        Returns 0 if all kafka objects are now destroyed, or -1 if the timeout was reached.

        +
        Remarks
        This function is deprecated.
        + +
        +
        Parameters
        +
        timeout_ms int|null int - )
        +
        Returns
        +
        int|null int
        +
        + +

        rd_kafka_yield()

        +
        public static rd_kafka_yield ( 
        +    \FFI\CData|null $rk
        + ): void
        +
        +

        Cancels the current callback dispatcher (rd_kafka_poll(), rd_kafka_consume_callback(), etc).

        +

        A callback may use this to force an immediate return to the calling code (caller of e.g. rd_kafka_poll()) without processing any further events.

        +
        Remarks
        This function MUST ONLY be called from within a librdkafka callback.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        + +

        requireMethod()

        +
        public static requireMethod ( 
        +    string $name
        + ): void
        +
        +

        Method must be supported by current binding version otherwise an exception is thrown.

        +
        +
        Parameters
        +
        name string
        +
        +

        requireVersion()

        +
        public static requireVersion ( 
        +    string $operator, 
        +    string $version
        + ): void
        +
        +

        Version must match the current binding otherwise an exception is thrown.

        +
        +
        Parameters
        +
        operator string
        +
        version string
        +
        +

        type()

        +
        public static type ( 
        +    string|\FFI\CType $type
        + ): \FFI\CType
        +
        +
        +
        Parameters
        +
        type string|\FFI\CType
        +
        Returns
        +
        \FFI\CType
        +
        +

        versionMatches()

        +
        public static versionMatches ( 
        +    string $operator, 
        +    string $version
        + ): bool
        +
        +

        Whether version matches the current binding version

        +
        +
        Parameters
        +
        operator string
        +
        version string
        +
        Returns
        +
        bool
        +
        +

        Test Coverage 💛

        +
          +
        • 💛 + Lines: 81.82% (72 / 88)
        • +
        • 🧡 + Methods: 61.11% (11 / 18)
        • +
        + + + + + + + + + + + + + + + + +
        +
        + + + +
        + +
        + + + +
        +
        +
        +
        + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/LogCallbackProxy/index.html b/api/RdKafka/FFI/LogCallbackProxy/index.html new file mode 100644 index 00000000..73259c73 --- /dev/null +++ b/api/RdKafka/FFI/LogCallbackProxy/index.html @@ -0,0 +1,2896 @@ + + + + + + + + + + + + + + + + + + + + + + + + + LogCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + + Skip to content + + +
        +
        + +
        + + + + +
        + + +
        + +
        + + + + + + + + + +
        +
        + + + +
        +
        +
        + + + + + + + +
        +
        +
        + + + +
        +
        +
        + + + +
        +
        +
        + + + +
        +
        + + + + + + + +

        Class LogCallbackProxy

        +

        Class \RdKafka\FFI\LogCallbackProxy

        + +

        Methods

        +

        __invoke()

        +
        public __invoke ( 
        +    \FFI\CData $rdkafka, 
        +    int $level, 
        +    string $facility, 
        +    string $message
        + ): void
        +
        +
        +
        Parameters
        +
        rdkafka \FFI\CData
        +
        level int
        +
        facility string
        +
        message string
        +
        +

        Test Coverage 💚

        +
          +
        • 💚 + Lines: 100% (6 / 6)
        • +
        • 💚 + Methods: 100% (1 / 1)
        • +
        + + + + + + + + + + + + + + + + +
        +
        + + + +
        + +
        + + + +
        +
        +
        +
        + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/Methods/index.html b/api/RdKafka/FFI/Methods/index.html new file mode 100644 index 00000000..825bf256 --- /dev/null +++ b/api/RdKafka/FFI/Methods/index.html @@ -0,0 +1,22023 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Methods - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        + + + + Skip to content + + +
        +
        + +
        + + + + +
        + + +
        + +
        + + + + + + + + + +
        +
        + + + +
        +
        +
        + + + + + + + +
        +
        +
        + + + +
        +
        +
        + + + +
        +
        +
        + + + +
        +
        + + + + + + + +

        Trait Methods

        +

        Trait \RdKafka\FFI\Methods

        +

        Description of librdkafka methods and constants is extracted from the official documentation.

        + +

        Methods

        +

        getFFI()

        +
        public static getFFI (  ): \FFI
        +
        +
        +
        Returns
        +
        \FFI
        +
        +

        rd_kafka_version()

        +
        public static rd_kafka_version (  ): int|null
        +
        +

        Returns the librdkafka version as integer.

        + +
        See also
        See RD_KAFKA_VERSION for how to parse the integer format.
        +
        +Use rd_kafka_version_str() to retreive the version as a string.
        + +
        +
        Returns
        +
        int|null int - ) - Version integer.
        +
        + +

        rd_kafka_version_str()

        +
        public static rd_kafka_version_str (  ): string|null
        +
        +

        Returns the librdkafka version as string.

        + +
        +
        Returns
        +
        string|null const char* - ) - Version string
        +
        + +

        rd_kafka_get_debug_contexts()

        +
        public static rd_kafka_get_debug_contexts (  ): string|null
        +
        +

        Retrieve supported debug contexts for use with the "debug" configuration property. (runtime)

        + +
        +
        Returns
        +
        string|null const char* - ) - Comma-separated list of available debugging contexts.
        +
        + +

        rd_kafka_get_err_descs()

        +
        public static rd_kafka_get_err_descs ( 
        +    \FFI\CData|null $errdescs, 
        +    \FFI\CData|null $cntp
        + ): void
        +
        +
        +
        Parameters
        +
        errdescs \FFI\CData|null const struct rd_kafka_err_desc**
        +
        cntp \FFI\CData|null size_t*
        +
        +

        rd_kafka_err2str()

        +
        public static rd_kafka_err2str ( 
        +    int $err
        + ): string|null
        +
        +

        Returns a human readable representation of a kafka error.

        + +
        +
        Parameters
        +
        err int rd_kafka_resp_err_t - ) - Error code to translate
        +
        Returns
        +
        string|null const char*
        +
        + +

        rd_kafka_err2name()

        +
        public static rd_kafka_err2name ( 
        +    int $err
        + ): string|null
        +
        +

        Returns the error code name (enum name).

        + +
        +
        Parameters
        +
        err int rd_kafka_resp_err_t - ) - Error code to translate
        +
        Returns
        +
        string|null const char*
        +
        + +

        rd_kafka_last_error()

        +
        public static rd_kafka_last_error (  ): int
        +
        +

        Returns the last error code generated by a legacy API call in the current thread.

        +

        The legacy APIs are the ones using errno to propagate error value, namely:

        +
          +
        • rd_kafka_topic_new()
        • +
        • rd_kafka_consume_start()
        • +
        • rd_kafka_consume_stop()
        • +
        • rd_kafka_consume()
        • +
        • rd_kafka_consume_batch()
        • +
        • rd_kafka_consume_callback()
        • +
        • rd_kafka_consume_queue()
        • +
        • rd_kafka_produce()
        • +
        +

        The main use for this function is to avoid converting system errno values to rd_kafka_resp_err_t codes for legacy APIs.

        +
        Remarks
        The last error is stored per-thread, if multiple rd_kafka_t handles are used in the same application thread the developer needs to make sure rd_kafka_last_error() is called immediately after a failed API call.
        +
        +errno propagation from librdkafka is not safe on Windows and should not be used, use rd_kafka_last_error() instead.
        + +
        +
        Returns
        +
        int rd_kafka_resp_err_t - )
        +
        + +

        rd_kafka_errno2err()

        +
        public static rd_kafka_errno2err ( 
        +    int|null $errnox
        + ): int
        +
        +

        Converts the system errno value errnox to a rd_kafka_resp_err_t error code upon failure from the following functions:

        +
          +
        • rd_kafka_topic_new()
        • +
        • rd_kafka_consume_start()
        • +
        • rd_kafka_consume_stop()
        • +
        • rd_kafka_consume()
        • +
        • rd_kafka_consume_batch()
        • +
        • rd_kafka_consume_callback()
        • +
        • rd_kafka_consume_queue()
        • +
        • rd_kafka_produce()
        • +
        + +
        Remarks
        A better alternative is to call rd_kafka_last_error() immediately after any of the above functions return -1 or NULL.
        +
        Deprecated:
        Use rd_kafka_last_error() to retrieve the last error code set by the legacy librdkafka APIs.
        +
        See also
        rd_kafka_last_error()
        + +
        +
        Parameters
        +
        errnox int|null int - ) - System errno value to convert
        +
        Returns
        +
        int rd_kafka_resp_err_t - Appropriate error code for errnox
        +
        + +

        rd_kafka_errno()

        +
        public static rd_kafka_errno (  ): int|null
        +
        +

        Returns the thread-local system errno.

        +

        On most platforms this is the same as errno but in case of different runtimes between library and application (e.g., Windows static DLLs) this provides a means for exposing the errno librdkafka uses.

        +
        Remarks
        The value is local to the current calling thread.
        +
        Deprecated:
        Use rd_kafka_last_error() to retrieve the last error code set by the legacy librdkafka APIs.
        + +
        +
        Returns
        +
        int|null int - )
        +
        + +

        rd_kafka_fatal_error()

        +
        public static rd_kafka_fatal_error ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $errstr, 
        +    int|null $errstr_size
        + ): int
        +
        +

        Returns the first fatal error set on this client instance, or RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has occurred.

        +

        This function is to be used with the Idempotent Producer and error_cb to detect fatal errors.

        +

        Generally all errors raised by error_cb are to be considered informational and temporary, the client will try to recover from all errors in a graceful fashion (by retrying, etc).

        +

        However, some errors should logically be considered fatal to retain consistency; in particular a set of errors that may occur when using the Idempotent Producer and the in-order or exactly-once producer guarantees can't be satisfied.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Client instance.
        +
        errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written to if there is a fatal error. - Writable size in errstr.
        +
        errstr_size int|null size_t
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else any other error code.
        +
        + +

        rd_kafka_test_fatal_error()

        +
        public static rd_kafka_test_fatal_error ( 
        +    \FFI\CData|null $rk, 
        +    int $err, 
        +    string|null $reason
        + ): int
        +
        +

        Trigger a fatal error for testing purposes.

        +

        Since there is no practical way to trigger real fatal errors in the idempotent producer, this method allows an application to trigger fabricated fatal errors in tests to check its error handling code.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Client instance.
        +
        err int rd_kafka_resp_err_t - The underlying error code.
        +
        reason string|null const char* - A human readable error reason. Will be prefixed with “test_fatal_error: ” to differentiate from real fatal errors.
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if a fatal error was triggered, or RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error has already been triggered.
        +
        + +

        rd_kafka_topic_partition_destroy()

        +
        public static rd_kafka_topic_partition_destroy ( 
        +    \FFI\CData|null $rktpar
        + ): void
        +
        +

        Destroy a rd_kafka_topic_partition_t.

        +
        Remarks
        This must not be called for elements in a topic partition list.
        + +
        +
        Parameters
        +
        rktpar \FFI\CData|null rd_kafka_topic_partition_t* - )
        +
        + +

        rd_kafka_topic_partition_list_new()

        +
        public static rd_kafka_topic_partition_list_new ( 
        +    int|null $size
        + ): \FFI\CData|null
        +
        +

        Create a new list/vector Topic+Partition container.

        + +
        Remarks
        Use rd_kafka_topic_partition_list_destroy() to free all resources in use by a list and the list itself.
        +
        See also
        rd_kafka_topic_partition_list_add()
        + +
        +
        Parameters
        +
        size int|null int - ) - Initial allocated size used when the expected number of elements is known or can be estimated. Avoids reallocation and possibly relocation of the elems array.
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_partition_list_t* - A newly allocated Topic+Partition list.
        +
        + +

        rd_kafka_topic_partition_list_destroy()

        +
        public static rd_kafka_topic_partition_list_destroy ( 
        +    \FFI\CData|null $rkparlist
        + ): void
        +
        +
        +
        Parameters
        +
        rkparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        +

        rd_kafka_topic_partition_list_add()

        +
        public static rd_kafka_topic_partition_list_add ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $partition
        + ): \FFI\CData|null
        +
        +

        Add topic+partition to list.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t* - List to extend
        +
        topic string|null const char* - Topic name (copied)
        +
        partition int|null int32_t - Partition id
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_partition_t* - The object which can be used to fill in additionals fields.
        +
        + +

        rd_kafka_topic_partition_list_add_range()

        +
        public static rd_kafka_topic_partition_list_add_range ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $start, 
        +    int|null $stop
        + ): void
        +
        +

        Add range of partitions from start to stop inclusive.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t* - List to extend
        +
        topic string|null const char* - Topic name (copied)
        +
        start int|null int32_t - Start partition of range
        +
        stop int|null int32_t - Last partition of range (inclusive)
        +
        + +

        rd_kafka_topic_partition_list_del()

        +
        public static rd_kafka_topic_partition_list_del ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $partition
        + ): int|null
        +
        +

        Delete partition from list.

        + +
        Remarks
        Any held indices to elems[] are unusable after this call returns 1.
        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t* - List to modify
        +
        topic string|null const char* - Topic name to match
        +
        partition int|null int32_t - Partition to match
        +
        Returns
        +
        int|null int - 1 if partition was found (and removed), else 0.
        +
        + +

        rd_kafka_topic_partition_list_del_by_idx()

        +
        public static rd_kafka_topic_partition_list_del_by_idx ( 
        +    \FFI\CData|null $rktparlist, 
        +    int|null $idx
        + ): int|null
        +
        +

        Delete partition from list by elems[] index.

        + +
        See also
        rd_kafka_topic_partition_list_del()
        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        idx int|null int
        +
        Returns
        +
        int|null int - 1 if partition was found (and removed), else 0.
        +
        + +

        rd_kafka_topic_partition_list_copy()

        +
        public static rd_kafka_topic_partition_list_copy ( 
        +    \FFI\CData|null $src
        + ): \FFI\CData|null
        +
        +

        Make a copy of an existing list.

        + +
        +
        Parameters
        +
        src \FFI\CData|null const rd_kafka_topic_partition_list_t* - ) - The existing list to copy.
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_partition_list_t* - A new list fully populated to be identical to src
        +
        + +

        rd_kafka_topic_partition_list_set_offset()

        +
        public static rd_kafka_topic_partition_list_set_offset ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $partition, 
        +    int|null $offset
        + ): int
        +
        +

        Set offset to offset for topic and partition.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        offset int|null int64_t
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if partition was not found in the list.
        +
        + +

        rd_kafka_topic_partition_list_find()

        +
        public static rd_kafka_topic_partition_list_find ( 
        +    \FFI\CData|null $rktparlist, 
        +    string|null $topic, 
        +    int|null $partition
        + ): \FFI\CData|null
        +
        +

        Find element by topic and partition.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_partition_t* - a pointer to the first matching element, or NULL if not found.
        +
        + +

        rd_kafka_topic_partition_list_sort()

        +
        public static rd_kafka_topic_partition_list_sort ( 
        +    \FFI\CData|null $rktparlist, 
        +    \FFI\CData|\Closure $cmp, 
        +    \FFI\CData|object|string|null $opaque
        + ): void
        +
        +

        Sort list using comparator cmp.

        +

        If cmp is NULL the default comparator will be used that sorts by ascending topic name and partition.

        +

        cmp_opaque is provided as the cmp_opaque argument to cmp.

        + +
        +
        Parameters
        +
        rktparlist \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        cmp \FFI\CData|\Closure int()(const void, const void*, void*)
        +
        opaque \FFI\CData|object|string|null void*
        +
        + +

        rd_kafka_headers_new()

        +
        public static rd_kafka_headers_new ( 
        +    int|null $initial_count
        + ): \FFI\CData|null
        +
        +

        Create a new headers list.

        + +
        +
        Parameters
        +
        initial_count int|null size_t - ) - Preallocate space for this number of headers. Any number of headers may be added, updated and removed regardless of the initial count.
        +
        Returns
        +
        \FFI\CData|null rd_kafka_headers_t*
        +
        + +

        rd_kafka_headers_destroy()

        +
        public static rd_kafka_headers_destroy ( 
        +    \FFI\CData|null $hdrs
        + ): void
        +
        +
        +
        Parameters
        +
        hdrs \FFI\CData|null rd_kafka_headers_t*
        +
        +

        rd_kafka_headers_copy()

        +
        public static rd_kafka_headers_copy ( 
        +    \FFI\CData|null $src
        + ): \FFI\CData|null
        +
        +
        +
        Parameters
        +
        src \FFI\CData|null const rd_kafka_headers_t*
        +
        Returns
        +
        \FFI\CData|null rd_kafka_headers_t*
        +
        +

        rd_kafka_header_add()

        +
        public static rd_kafka_header_add ( 
        +    \FFI\CData|null $hdrs, 
        +    string|null $name, 
        +    int|null $name_size, 
        +    \FFI\CData|object|string|null $value, 
        +    int|null $value_size
        + ): int
        +
        +

        Add header with name name and value val (copied) of size size (not including null-terminator).

        + +
        +
        Parameters
        +
        hdrs \FFI\CData|null rd_kafka_headers_t* - Headers list.
        +
        name string|null const char* - Header name.
        +
        name_size int|null ssize_t - Header name size (not including the null-terminator). If -1 the name length is automatically acquired using strlen().
        +
        value \FFI\CData|object|string|null const void* - Pointer to header value, or NULL (set size to 0 or -1).
        +
        value_size int|null ssize_t - Size of header value. If -1 the value is assumed to be a null-terminated string and the length is automatically acquired using strlen().
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, else RD_KAFKA_RESP_ERR_NO_ERROR.
        +
        + +

        rd_kafka_header_remove()

        +
        public static rd_kafka_header_remove ( 
        +    \FFI\CData|null $hdrs, 
        +    string|null $name
        + ): int
        +
        +

        Remove all headers for the given key (if any).

        + +
        +
        Parameters
        +
        hdrs \FFI\CData|null rd_kafka_headers_t*
        +
        name string|null const char*
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only, RD_KAFKA_RESP_ERR__NOENT if no matching headers were found, else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed.
        +
        + +

        rd_kafka_header_get_last()

        +
        public static rd_kafka_header_get_last ( 
        +    \FFI\CData|null $hdrs, 
        +    string|null $name, 
        +    \FFI\CData|object|string|null $valuep, 
        +    \FFI\CData|null $sizep
        + ): int
        +
        +

        Find last header in list hdrs matching name.

        + +
        Remarks
        The returned pointer in valuep includes a trailing null-terminator that is not accounted for in sizep.
        +
        +The returned pointer is only valid as long as the headers list and the header item is valid.
        + +
        +
        Parameters
        +
        hdrs \FFI\CData|null const rd_kafka_headers_t* - Headers list.
        +
        name string|null const char* - Header to find (last match).
        +
        valuep \FFI\CData|object|string|null const void** - (out) Set to a (null-terminated) const pointer to the value (may be NULL).
        +
        sizep \FFI\CData|null size_t* - (out) Set to the value’s size (not including null-terminator).
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if an entry was found, else RD_KAFKA_RESP_ERR__NOENT.
        +
        + +

        rd_kafka_header_get()

        +
        public static rd_kafka_header_get ( 
        +    \FFI\CData|null $hdrs, 
        +    int|null $idx, 
        +    string|null $name, 
        +    \FFI\CData|object|string|null $valuep, 
        +    \FFI\CData|null $sizep
        + ): int
        +
        +

        Iterator for headers matching name.

        +
           Same semantics as rd_kafka_header_get_last()
        +
        + +
        +
        Parameters
        +
        hdrs \FFI\CData|null const rd_kafka_headers_t* - Headers to iterate.
        +
        idx int|null size_t - Iterator index, start at 0 and increment by one for each call as long as RD_KAFKA_RESP_ERR_NO_ERROR is returned.
        +
        name string|null const char* - Header name to match.
        +
        valuep \FFI\CData|object|string|null const void** - (out) Set to a (null-terminated) const pointer to the value (may be NULL).
        +
        sizep \FFI\CData|null size_t* - (out) Set to the value’s size (not including null-terminator).
        +
        Returns
        +
        int rd_kafka_resp_err_t
        +
        + +

        rd_kafka_header_get_all()

        +
        public static rd_kafka_header_get_all ( 
        +    \FFI\CData|null $hdrs, 
        +    int|null $idx, 
        +    \FFI\CData|null $namep, 
        +    \FFI\CData|object|string|null $valuep, 
        +    \FFI\CData|null $sizep
        + ): int
        +
        +

        Iterator for all headers.

        +
           Same semantics as rd_kafka_header_get()
        +
        +
        See also
        rd_kafka_header_get()
        + +
        +
        Parameters
        +
        hdrs \FFI\CData|null const rd_kafka_headers_t*
        +
        idx int|null size_t
        +
        namep \FFI\CData|null const char**
        +
        valuep \FFI\CData|object|string|null const void**
        +
        sizep \FFI\CData|null size_t*
        +
        Returns
        +
        int rd_kafka_resp_err_t
        +
        + +

        rd_kafka_message_destroy()

        +
        public static rd_kafka_message_destroy ( 
        +    \FFI\CData|null $rkmessage
        + ): void
        +
        +
        +
        Parameters
        +
        rkmessage \FFI\CData|null rd_kafka_message_t*
        +
        +

        rd_kafka_message_timestamp()

        +
        public static rd_kafka_message_timestamp ( 
        +    \FFI\CData|null $rkmessage, 
        +    \FFI\CData|null $tstype
        + ): int|null
        +
        +

        Returns the message timestamp for a consumed message.

        +

        The timestamp is the number of milliseconds since the epoch (UTC).

        +

        tstype (if not NULL) is updated to indicate the type of timestamp.

        + +
        Remarks
        Message timestamps require broker version 0.10.0 or later.
        + +
        +
        Parameters
        +
        rkmessage \FFI\CData|null const rd_kafka_message_t*
        +
        tstype \FFI\CData|null rd_kafka_timestamp_type_t*
        +
        Returns
        +
        int|null int64_t - message timestamp, or -1 if not available.
        +
        + +

        rd_kafka_message_latency()

        +
        public static rd_kafka_message_latency ( 
        +    \FFI\CData|null $rkmessage
        + ): int|null
        +
        +

        Returns the latency for a produced message measured from the produce() call.

        + +
        +
        Parameters
        +
        rkmessage \FFI\CData|null const rd_kafka_message_t* - )
        +
        Returns
        +
        int|null int64_t - the latency in microseconds, or -1 if not available.
        +
        + +

        rd_kafka_message_headers()

        +
        public static rd_kafka_message_headers ( 
        +    \FFI\CData|null $rkmessage, 
        +    \FFI\CData|null $hdrsp
        + ): int
        +
        +

        Get the message header list.

        +

        The returned pointer in *hdrsp is associated with the rkmessage and must not be used after destruction of the message object or the header list is replaced with rd_kafka_message_set_headers().

        + +
        Remarks
        Headers require broker version 0.11.0.0 or later.
        +
        +As an optimization the raw protocol headers are parsed on the first call to this function.
        + +
        +
        Parameters
        +
        rkmessage \FFI\CData|null const rd_kafka_message_t*
        +
        hdrsp \FFI\CData|null rd_kafka_headers_t**
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if headers were returned, RD_KAFKA_RESP_ERR__NOENT if the message has no headers, or another error code if the headers could not be parsed.
        +
        + +

        rd_kafka_message_detach_headers()

        +
        public static rd_kafka_message_detach_headers ( 
        +    \FFI\CData|null $rkmessage, 
        +    \FFI\CData|null $hdrsp
        + ): int
        +
        +

        Get the message header list and detach the list from the message making the application the owner of the headers. The application must eventually destroy the headers using rd_kafka_headers_destroy(). The message's headers will be set to NULL.

        +

        Otherwise same semantics as rd_kafka_message_headers()

        +
        See also
        rd_kafka_message_headers
        + +
        +
        Parameters
        +
        rkmessage \FFI\CData|null rd_kafka_message_t*
        +
        hdrsp \FFI\CData|null rd_kafka_headers_t**
        +
        Returns
        +
        int rd_kafka_resp_err_t
        +
        + +

        rd_kafka_message_set_headers()

        +
        public static rd_kafka_message_set_headers ( 
        +    \FFI\CData|null $rkmessage, 
        +    \FFI\CData|null $hdrs
        + ): void
        +
        +

        Replace the message's current headers with a new list.

        + +
        Remarks
        The existing headers object, if any, will be destroyed.
        + +
        +
        Parameters
        +
        rkmessage \FFI\CData|null rd_kafka_message_t* - The message to set headers.
        +
        hdrs \FFI\CData|null rd_kafka_headers_t* - New header list. The message object assumes ownership of the list, the list will be destroyed automatically with the message object. The new headers list may be updated until the message object is passed or returned to librdkafka.
        +
        + +

        rd_kafka_header_cnt()

        +
        public static rd_kafka_header_cnt ( 
        +    \FFI\CData|null $hdrs
        + ): int|null
        +
        +

        Returns the number of header key/value pairs.

        + +
        +
        Parameters
        +
        hdrs \FFI\CData|null const rd_kafka_headers_t* - ) - Headers to count
        +
        Returns
        +
        int|null size_t
        +
        + +

        rd_kafka_message_status()

        +
        public static rd_kafka_message_status ( 
        +    \FFI\CData|null $rkmessage
        + ): int
        +
        +

        Returns the message's persistence status in the topic log.

        +
        Remarks
        The message status is not available in on_acknowledgement interceptors.
        + +
        +
        Parameters
        +
        rkmessage \FFI\CData|null const rd_kafka_message_t* - )
        +
        Returns
        +
        int rd_kafka_msg_status_t
        +
        + +

        rd_kafka_conf_new()

        +
        public static rd_kafka_conf_new (  ): \FFI\CData|null
        +
        +

        Create configuration object.

        +

        When providing your own configuration to the rd_kafka_*_new_*() calls the rd_kafka_conf_t objects needs to be created with this function which will set up the defaults. I.e.:

        +
        rd_kafka_conf_t *myconf;
        +rd_kafka_conf_res_t res;
        +
        +myconf = rd_kafka_conf_new();
        +res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600",
        +                        errstr, sizeof(errstr));
        +if (res != RD_KAFKA_CONF_OK)
        +   die("%s\n", errstr);
        +
        +rk = rd_kafka_new(..., myconf);
        +

        Please see CONFIGURATION.md for the default settings or use rd_kafka_conf_properties_show() to provide the information at runtime.

        +

        The properties are identical to the Apache Kafka configuration properties whenever possible.

        +
        Remarks
        A successful call to rd_kafka_new() will assume ownership of the conf object and rd_kafka_conf_destroy() must not be called.
        + +
        See also
        rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy()
        + +
        +
        Returns
        +
        \FFI\CData|null rd_kafka_conf_t* - ) - A new rd_kafka_conf_t object with defaults set.
        +
        + +

        rd_kafka_conf_destroy()

        +
        public static rd_kafka_conf_destroy ( 
        +    \FFI\CData|null $conf
        + ): void
        +
        +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        +

        rd_kafka_conf_dup()

        +
        public static rd_kafka_conf_dup ( 
        +    \FFI\CData|null $conf
        + ): \FFI\CData|null
        +
        +

        Creates a copy/duplicate of configuration object conf.

        +
        Remarks
        Interceptors are NOT copied to the new configuration object.
        +
        See also
        rd_kafka_interceptor_f_on_conf_dup
        + +
        +
        Parameters
        +
        conf \FFI\CData|null const rd_kafka_conf_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_conf_t*
        +
        + +

        rd_kafka_conf_dup_filter()

        +
        public static rd_kafka_conf_dup_filter ( 
        +    \FFI\CData|null $conf, 
        +    int|null $filter_cnt, 
        +    \FFI\CData|null $filter
        + ): \FFI\CData|null
        +
        +
        +
        Parameters
        +
        conf \FFI\CData|null const rd_kafka_conf_t*
        +
        filter_cnt int|null size_t
        +
        filter \FFI\CData|null const char**
        +
        Returns
        +
        \FFI\CData|null rd_kafka_conf_t*
        +
        +

        rd_kafka_conf_set()

        +
        public static rd_kafka_conf_set ( 
        +    \FFI\CData|null $conf, 
        +    string|null $name, 
        +    string|null $value, 
        +    \FFI\CData|null $errstr, 
        +    int|null $errstr_size
        + ): int
        +
        +

        Sets a configuration property.

        +

        conf must have been previously created with rd_kafka_conf_new().

        +

        Fallthrough: Topic-level configuration properties may be set using this interface in which case they are applied on the default_topic_conf. If no default_topic_conf has been set one will be created. Any subsequent rd_kafka_conf_set_default_topic_conf() calls will replace the current default topic configuration.

        + +
        Remarks
        Setting properties or values that were disabled at build time due to missing dependencies will return RD_KAFKA_CONF_INVALID.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        name string|null const char*
        +
        value string|null const char*
        +
        errstr \FFI\CData|null char*
        +
        errstr_size int|null size_t
        +
        Returns
        +
        int rd_kafka_conf_res_t - rd_kafka_conf_res_t to indicate success or failure. In case of failure errstr is updated to contain a human readable error string.
        +
        + +

        rd_kafka_conf_set_events()

        +
        public static rd_kafka_conf_set_events ( 
        +    \FFI\CData|null $conf, 
        +    int|null $events
        + ): void
        +
        +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        events int|null int
        +
        +

        rd_kafka_conf_set_background_event_cb()

        +
        public static rd_kafka_conf_set_background_event_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $event_cb
        + ): void
        +
        +

        Generic event callback to be used with the event API to trigger callbacks for rd_kafka_event_t objects from a background thread serving the background queue.

        +

        How to use:

        +
          +
        1. First set the event callback on the configuration object with this function, followed by creating an rd_kafka_t instance with rd_kafka_new().
        2. +
        3. Get the instance's background queue with rd_kafka_queue_get_background() and pass it as the reply/response queue to an API that takes an event queue, such as rd_kafka_CreateTopics().
        4. +
        5. As the response event is ready and enqueued on the background queue the event callback will be triggered from the background thread.
        6. +
        7. Prior to destroying the client instance, loose your reference to the background queue by calling rd_kafka_queue_destroy().
        8. +
        +

        The application must destroy the rkev passed to event cb using rd_kafka_event_destroy().

        +

        The event_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        +
        Remarks
        This callback is a specialized alternative to the poll-based event API described in the Event interface section.
        +
        +The event_cb will be called spontaneously from a background thread completely managed by librdkafka. Take care to perform proper locking of application objects.
        +
        Warning
        The application MUST NOT call rd_kafka_destroy() from the event callback.
        +
        See also
        rd_kafka_queue_get_background
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        event_cb \FFI\CData|\Closure void()(rd_kafka_t, rd_kafka_event_t*, void*)
        +
        + +

        rd_kafka_conf_set_dr_cb()

        +
        public static rd_kafka_conf_set_dr_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $dr_cb
        + ): void
        +
        +
        Deprecated:
        See rd_kafka_conf_set_dr_msg_cb()
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        dr_cb \FFI\CData|\Closure void()(rd_kafka_t, void*, size_t, rd_kafka_resp_err_t, void*, void*)
        +
        + +

        rd_kafka_conf_set_dr_msg_cb()

        +
        public static rd_kafka_conf_set_dr_msg_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $dr_msg_cb
        + ): void
        +
        +

        Producer: Set delivery report callback in provided conf object.

        +

        The delivery report callback will be called once for each message accepted by rd_kafka_produce() (et.al) with err set to indicate the result of the produce request.

        +

        The callback is called when a message is succesfully produced or if librdkafka encountered a permanent failure. Delivery errors occur when the retry count is exceeded, when the message.timeout.ms timeout is exceeded or there is a permanent error like RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART.

        +

        An application must call rd_kafka_poll() at regular intervals to serve queued delivery report callbacks.

        +

        The broker-assigned offset can be retrieved with rkmessage->offset and the timestamp can be retrieved using rd_kafka_message_timestamp().

        +

        The dr_msg_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque(). The per-message msg_opaque value is available in rd_kafka_message_t._private.

        +
        Remarks
        The Idempotent Producer may return invalid timestamp (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and and offset (RD_KAFKA_OFFSET_INVALID) for retried messages that were previously successfully delivered but not properly acknowledged.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        dr_msg_cb \FFI\CData|\Closure void()(rd_kafka_t, const rd_kafka_message_t*, void*)
        +
        + +

        rd_kafka_conf_set_consume_cb()

        +
        public static rd_kafka_conf_set_consume_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $consume_cb
        + ): void
        +
        +

        Consumer: Set consume callback for use with rd_kafka_consumer_poll()

        +

        The consume_cb opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        consume_cb \FFI\CData|\Closure void()(rd_kafka_message_t, void*)
        +
        + +

        rd_kafka_conf_set_rebalance_cb()

        +
        public static rd_kafka_conf_set_rebalance_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $rebalance_cb
        + ): void
        +
        +

        Consumer: Set rebalance callback for use with coordinated consumer group balancing.

        +

        The err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions' contains the full partition set that was either assigned or revoked.

        +

        Registering a rebalance_cb turns off librdkafka's automatic partition assignment/revocation and instead delegates that responsibility to the application's rebalance_cb.

        +

        The rebalance callback is responsible for updating librdkafka's assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle arbitrary rebalancing failures where err is neither of those.

        +
        Remarks
        In this latter case (arbitrary error), the application must call rd_kafka_assign(rk, NULL) to synchronize state.
        +

        For eager/non-cooperative partition.assignment.strategy assignors, such as range and roundrobin, the application must use rd_kafka_assign() to set or clear the entire assignment. For the cooperative assignors, such as cooperative-sticky, the application must use rd_kafka_incremental_assign() for RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign() for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS.

        +

        Without a rebalance callback this is done automatically by librdkafka but registering a rebalance callback gives the application flexibility in performing other operations along with the assigning/revocation, such as fetching offsets from an alternate location (on assign) or manually committing offsets (on revoke).

        +

        rebalance_cb is always triggered exactly once when a rebalance completes with a new assignment, even if that assignment is empty. If an eager/non-cooperative assignor is configured, there will eventually be exactly one corresponding call to rebalance_cb to revoke these partitions (even if empty), whether this is due to a group rebalance or lost partitions. In the cooperative case, rebalance_cb will never be called if the set of partitions being revoked is empty (whether or not lost).

        +

        The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        +
        Remarks
        The partitions list is destroyed by librdkafka on return return from the rebalance_cb and must not be freed or saved by the application.
        +
        +Be careful when modifying the partitions list. Changing this list should only be done to change the initial offsets for each partition. But a function like rd_kafka_position() might have unexpected effects for instance when a consumer gets assigned a partition it used to consume at an earlier rebalance. In this case, the list of partitions will be updated with the old offset for that partition. In this case, it is generally better to pass a copy of the list (see rd_kafka_topic_partition_list_copy()). The result of rd_kafka_position() is typically outdated in RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS.
        +
        See also
        rd_kafka_assign()
        +
        +rd_kafka_incremental_assign()
        +
        +rd_kafka_incremental_unassign()
        +
        +rd_kafka_assignment_lost()
        +
        +rd_kafka_rebalance_protocol()
        +

        The following example shows the application's responsibilities:

        +
        static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
        +                          rd_kafka_topic_partition_list_t *partitions,
        +                          void *opaque) {
        +
        +switch (err)
        +    {
        +      case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
        +         // application may load offets from arbitrary external
        +         // storage here and update \p partitions
        +         if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
        +                 rd_kafka_incremental_assign(rk, partitions);
        +         else // EAGER
        +                 rd_kafka_assign(rk, partitions);
        +         break;
        +
        +      case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
        +         if (manual_commits) // Optional explicit manual commit
        +             rd_kafka_commit(rk, partitions, 0); // sync commit
        +
        +         if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
        +                 rd_kafka_incremental_unassign(rk, partitions);
        +         else // EAGER
        +                 rd_kafka_assign(rk, NULL);
        +         break;
        +
        +      default:
        +         handle_unlikely_error(err);
        +         rd_kafka_assign(rk, NULL); // sync state
        +         break;
        +     }
        +}
        +
        Remarks
        The above example lacks error handling for assign calls, see the examples/ directory.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        rebalance_cb \FFI\CData|\Closure void()(rd_kafka_t, rd_kafka_resp_err_t, rd_kafka_topic_partition_list_t*, void*)
        +
        + +

        rd_kafka_conf_set_offset_commit_cb()

        +
        public static rd_kafka_conf_set_offset_commit_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $offset_commit_cb
        + ): void
        +
        +

        Consumer: Set offset commit callback for use with consumer groups.

        +

        The results of automatic or manual offset commits will be scheduled for this callback and is served by rd_kafka_consumer_poll().

        +

        If no partitions had valid offsets to commit this callback will be called with err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered an error.

        +

        The offsets list contains per-partition information:

        +
          +
        • offset: committed offset (attempted)
        • +
        • err: commit error
        • +
        +

        The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        offset_commit_cb \FFI\CData|\Closure void()(rd_kafka_t, rd_kafka_resp_err_t, rd_kafka_topic_partition_list_t*, void*)
        +
        + +

        rd_kafka_conf_set_error_cb()

        +
        public static rd_kafka_conf_set_error_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $error_cb
        + ): void
        +
        +

        Set error callback in provided conf object.

        +

        The error callback is used by librdkafka to signal warnings and errors back to the application.

        +

        These errors should generally be considered informational and non-permanent, the client will try to recover automatically from all type of errors. Given that the client and cluster configuration is correct the application should treat these as temporary errors.

        +

        error_cb will be triggered with err set to RD_KAFKA_RESP_ERR__FATAL if a fatal error has been raised; in this case use rd_kafka_fatal_error() to retrieve the fatal error code and error string, and then begin terminating the client instance.

        +

        If no error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set with rd_kafka_conf_set_events, then the errors will be logged instead.

        +

        The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        error_cb \FFI\CData|\Closure void()(rd_kafka_t, int, const char*, void*)
        +
        + +

        rd_kafka_conf_set_throttle_cb()

        +
        public static rd_kafka_conf_set_throttle_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $throttle_cb
        + ): void
        +
        +

        Set throttle callback.

        +

        The throttle callback is used to forward broker throttle times to the application for Produce and Fetch (consume) requests.

        +

        Callbacks are triggered whenever a non-zero throttle time is returned by the broker, or when the throttle time drops back to zero.

        +

        An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at regular intervals to serve queued callbacks.

        +

        The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        +
        Remarks
        Requires broker version 0.9.0 or later.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        throttle_cb \FFI\CData|\Closure void()(rd_kafka_t, const char*, int32_t, int, void*)
        +
        + +

        rd_kafka_conf_set_log_cb()

        +
        public static rd_kafka_conf_set_log_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $log_cb
        + ): void
        +
        +

        Set logger callback.

        +

        The default is to print to stderr, but a syslog logger is also available, see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives. Alternatively the application may provide its own logger callback. Or pass func as NULL to disable logging.

        +

        This is the configuration alternative to the deprecated rd_kafka_set_logger()

        +
        Remarks
        The log_cb will be called spontaneously from librdkafka's internal threads unless logs have been forwarded to a poll queue through rd_kafka_set_log_queue(). An application MUST NOT call any librdkafka APIs or do any prolonged work in a non-forwarded log_cb.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        log_cb \FFI\CData|\Closure void()(const rd_kafka_t, int, const char*, const char*)
        +
        + +

        rd_kafka_conf_set_stats_cb()

        +
        public static rd_kafka_conf_set_stats_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $stats_cb
        + ): void
        +
        +

        Set statistics callback in provided conf object.

        +

        The statistics callback is triggered from rd_kafka_poll() every statistics.interval.ms (needs to be configured separately). Function arguments:

        +
          +
        • rk - Kafka handle
        • +
        • json - String containing the statistics data in JSON format
        • +
        • json_len - Length of json string.
        • +
        • opaque - Application-provided opaque as set by rd_kafka_conf_set_opaque().
        • +
        +

        For more information on the format of json, see https://github.com/confluentinc/librdkafka/wiki/Statistics

        +

        If the application wishes to hold on to the json pointer and free it at a later time it must return 1 from the stats_cb. If the application returns 0 from the stats_cb then librdkafka will immediately free the json pointer.

        +

        See STATISTICS.md for a full definition of the JSON object.

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        stats_cb \FFI\CData|\Closure int()(rd_kafka_t, char*, size_t, void*)
        +
        + +

        rd_kafka_conf_set_socket_cb()

        +
        public static rd_kafka_conf_set_socket_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $socket_cb
        + ): void
        +
        +

        Set socket callback.

        +

        The socket callback is responsible for opening a socket according to the supplied domain, type and protocol. The socket shall be created with CLOEXEC set in a racefree fashion, if possible.

        +

        The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        +

        Default:

        +
          +
        • on linux: racefree CLOEXEC
        • +
        • others : non-racefree CLOEXEC
        • +
        +
        Remarks
        The callback will be called from an internal librdkafka thread.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        socket_cb \FFI\CData|\Closure int()(int, int, int, void)
        +
        + +

        rd_kafka_conf_set_connect_cb()

        +
        public static rd_kafka_conf_set_connect_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $connect_cb
        + ): void
        +
        +

        Set connect callback.

        +

        The connect callback is responsible for connecting socket sockfd to peer address addr. The id field contains the broker identifier.

        +

        connect_cb shall return 0 on success (socket connected) or an error number (errno) on error.

        +

        The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        +
        Remarks
        The callback will be called from an internal librdkafka thread.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        connect_cb \FFI\CData|\Closure int()(int, const struct sockaddr, int, const char*, void*)
        +
        + +

        rd_kafka_conf_set_closesocket_cb()

        +
        public static rd_kafka_conf_set_closesocket_cb ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|\Closure $closesocket_cb
        + ): void
        +
        +

        Set close socket callback.

        +

        Close a socket (optionally opened with socket_cb()).

        +

        The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

        +
        Remarks
        The callback will be called from an internal librdkafka thread.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        closesocket_cb \FFI\CData|\Closure int()(int, void)
        +
        + +

        rd_kafka_conf_set_opaque()

        +
        public static rd_kafka_conf_set_opaque ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|object|string|null $opaque
        + ): void
        +
        +

        Sets the application's opaque pointer that will be passed to callbacks.

        +
        See also
        rd_kafka_opaque()
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        opaque \FFI\CData|object|string|null void*
        +
        + +

        rd_kafka_opaque()

        +
        public static rd_kafka_opaque ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|object|string|null
        +
        +
        +
        Parameters
        +
        rk \FFI\CData|null const rd_kafka_t*
        +
        Returns
        +
        \FFI\CData|object|string|null void*
        +
        +

        rd_kafka_conf_set_default_topic_conf()

        +
        public static rd_kafka_conf_set_default_topic_conf ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|null $tconf
        + ): void
        +
        +

        Sets the default topic configuration to use for automatically subscribed topics (e.g., through pattern-matched topics). The topic config object is not usable after this call.

        +
        Warning
        Any topic configuration settings that have been set on the global rd_kafka_conf_t object will be overwritten by this call since the implicitly created default topic config object is replaced by the user-supplied one.
        +
        Deprecated:
        Set default topic level configuration on the global rd_kafka_conf_t object instead.
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        tconf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        + +

        rd_kafka_conf_get()

        +
        public static rd_kafka_conf_get ( 
        +    \FFI\CData|null $conf, 
        +    string|null $name, 
        +    \FFI\CData|null $dest, 
        +    \FFI\CData|null $dest_size
        + ): int
        +
        +

        Retrieve configuration value for property name.

        +

        If dest is non-NULL the value will be written to dest with at most dest_size.

        +

        *dest_size is updated to the full length of the value, thus if *dest_size initially is smaller than the full length the application may reallocate dest to fit the returned *dest_size and try again.

        +

        If dest is NULL only the full length of the value is returned.

        +

        Fallthrough: Topic-level configuration properties from the default_topic_conf may be retrieved using this interface.

        + +
        +
        Parameters
        +
        conf \FFI\CData|null const rd_kafka_conf_t*
        +
        name string|null const char*
        +
        dest \FFI\CData|null char*
        +
        dest_size \FFI\CData|null size_t*
        +
        Returns
        +
        int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK if the property name matched, else RD_KAFKA_CONF_UNKNOWN.
        +
        + +

        rd_kafka_topic_conf_get()

        +
        public static rd_kafka_topic_conf_get ( 
        +    \FFI\CData|null $conf, 
        +    string|null $name, 
        +    \FFI\CData|null $dest, 
        +    \FFI\CData|null $dest_size
        + ): int
        +
        +

        Retrieve topic configuration value for property name.

        +
        See also
        rd_kafka_conf_get()
        + +
        +
        Parameters
        +
        conf \FFI\CData|null const rd_kafka_topic_conf_t*
        +
        name string|null const char*
        +
        dest \FFI\CData|null char*
        +
        dest_size \FFI\CData|null size_t*
        +
        Returns
        +
        int rd_kafka_conf_res_t
        +
        + +

        rd_kafka_conf_dump()

        +
        public static rd_kafka_conf_dump ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|null $cntp
        + ): \FFI\CData|null
        +
        +

        Dump the configuration properties and values of conf to an array with "key", "value" pairs.

        +

        The number of entries in the array is returned in *cntp.

        +

        The dump must be freed with rd_kafka_conf_dump_free().

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        cntp \FFI\CData|null size_t*
        +
        Returns
        +
        \FFI\CData|null const char**
        +
        + +

        rd_kafka_topic_conf_dump()

        +
        public static rd_kafka_topic_conf_dump ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|null $cntp
        + ): \FFI\CData|null
        +
        +

        Dump the topic configuration properties and values of conf to an array with "key", "value" pairs.

        +

        The number of entries in the array is returned in *cntp.

        +

        The dump must be freed with rd_kafka_conf_dump_free().

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        cntp \FFI\CData|null size_t*
        +
        Returns
        +
        \FFI\CData|null const char**
        +
        + +

        rd_kafka_conf_dump_free()

        +
        public static rd_kafka_conf_dump_free ( 
        +    \FFI\CData|null $arr, 
        +    int|null $cnt
        + ): void
        +
        +
        +
        Parameters
        +
        arr \FFI\CData|null const char**
        +
        cnt int|null size_t
        +
        +

        rd_kafka_conf_properties_show()

        +
        public static rd_kafka_conf_properties_show ( 
        +    \FFI\CData|null $fp
        + ): void
        +
        +

        Prints a table to fp of all supported configuration properties, their default values as well as a description.

        +
        Remarks
        All properties and properties and values are shown, even those that have been disabled at build time due to missing dependencies.
        + +
        +
        Parameters
        +
        fp \FFI\CData|null FILE* - )
        +
        + +

        rd_kafka_topic_conf_new()

        +
        public static rd_kafka_topic_conf_new (  ): \FFI\CData|null
        +
        +

        Create topic configuration object.

        +
        See also
        Same semantics as for rd_kafka_conf_new().
        + +
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_conf_t* - )
        +
        + +

        rd_kafka_topic_conf_dup()

        +
        public static rd_kafka_topic_conf_dup ( 
        +    \FFI\CData|null $conf
        + ): \FFI\CData|null
        +
        +
        +
        Parameters
        +
        conf \FFI\CData|null const rd_kafka_topic_conf_t*
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_conf_t*
        +
        +

        rd_kafka_default_topic_conf_dup()

        +
        public static rd_kafka_default_topic_conf_dup ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_conf_t*
        +
        +

        rd_kafka_topic_conf_destroy()

        +
        public static rd_kafka_topic_conf_destroy ( 
        +    \FFI\CData|null $topic_conf
        + ): void
        +
        +
        +
        Parameters
        +
        topic_conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        +

        rd_kafka_topic_conf_set()

        +
        public static rd_kafka_topic_conf_set ( 
        +    \FFI\CData|null $conf, 
        +    string|null $name, 
        +    string|null $value, 
        +    \FFI\CData|null $errstr, 
        +    int|null $errstr_size
        + ): int
        +
        +

        Sets a single rd_kafka_topic_conf_t value by property name.

        +

        topic_conf should have been previously set up with rd_kafka_topic_conf_new().

        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        name string|null const char*
        +
        value string|null const char*
        +
        errstr \FFI\CData|null char*
        +
        errstr_size int|null size_t
        +
        Returns
        +
        int rd_kafka_conf_res_t - rd_kafka_conf_res_t to indicate success or failure.
        +
        + +

        rd_kafka_topic_conf_set_opaque()

        +
        public static rd_kafka_topic_conf_set_opaque ( 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|object|string|null $opaque
        + ): void
        +
        +

        Sets the application's opaque pointer that will be passed to all topic callbacks as the rkt_opaque argument.

        +
        See also
        rd_kafka_topic_opaque()
        + +
        +
        Parameters
        +
        conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        opaque \FFI\CData|object|string|null void*
        +
        + +

        rd_kafka_topic_conf_set_partitioner_cb()

        +
        public static rd_kafka_topic_conf_set_partitioner_cb ( 
        +    \FFI\CData|null $topic_conf, 
        +    \FFI\CData|\Closure $partitioner
        + ): void
        +
        +

        Producer: Set partitioner callback in provided topic conf object.

        +

        The partitioner may be called in any thread at any time, it may be called multiple times for the same message/key.

        +

        The callback's rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The callback's msg_opaque argument is the per-message opaque passed to produce().

        +

        Partitioner function constraints:

        +
          +
        • MUST NOT call any rd_kafka_*() functions except: rd_kafka_topic_partition_available()
        • +
        • MUST NOT block or execute for prolonged periods of time.
        • +
        • MUST return a value between 0 and partition_cnt-1, or the special RD_KAFKA_PARTITION_UA value if partitioning could not be performed.
        • +
        + +
        +
        Parameters
        +
        topic_conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        partitioner \FFI\CData|\Closure int32_t()(const rd_kafka_topic_t, const void*, size_t, int32_t, void*, void*)
        +
        + +

        rd_kafka_topic_conf_set_msg_order_cmp()

        +
        public static rd_kafka_topic_conf_set_msg_order_cmp ( 
        +    \FFI\CData|null $topic_conf, 
        +    \FFI\CData|\Closure $msg_order_cmp
        + ): void
        +
        +

        Producer: Set message queueing order comparator callback.

        +

        The callback may be called in any thread at any time, it may be called multiple times for the same message.

        +

        Ordering comparator function constraints:

        +
          +
        • MUST be stable sort (same input gives same output).
        • +
        • MUST NOT call any rd_kafka_*() functions.
        • +
        • MUST NOT block or execute for prolonged periods of time.
        • +
        +

        The comparator shall compare the two messages and return:

        +
          +
        • < 0 if message a should be inserted before message b.
        • +
        • >=0 if message a should be inserted after message b.
        • +
        +
        Remarks
        Insert sorting will be used to enqueue the message in the correct queue position, this comes at a cost of O(n).
        +
        +If queuing.strategy=fifo new messages are enqueued to the tail of the queue regardless of msg_order_cmp, but retried messages are still affected by msg_order_cmp.
        +
        Warning
        THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL, DO NOT USE IN PRODUCTION.
        + +
        +
        Parameters
        +
        topic_conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        msg_order_cmp \FFI\CData|\Closure int()(const rd_kafka_message_t, const rd_kafka_message_t*)
        +
        + +

        rd_kafka_topic_partition_available()

        +
        public static rd_kafka_topic_partition_available ( 
        +    \FFI\CData|null $rkt, 
        +    int|null $partition
        + ): int|null
        +
        +

        Check if partition is available (has a leader broker).

        + +
        Warning
        This function must only be called from inside a partitioner function
        + +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        partition int|null int32_t
        +
        Returns
        +
        int|null int - 1 if the partition is available, else 0.
        +
        + +

        rd_kafka_msg_partitioner_random()

        +
        public static rd_kafka_msg_partitioner_random ( 
        +    \FFI\CData|null $rkt, 
        +    \FFI\CData|object|string|null $key, 
        +    int|null $keylen, 
        +    int|null $partition_cnt, 
        +    \FFI\CData|object|string|null $opaque, 
        +    \FFI\CData|object|string|null $msg_opaque
        + ): int|null
        +
        +

        Random partitioner.

        +

        Will try not to return unavailable partitions.

        +

        The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

        + +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        key \FFI\CData|object|string|null const void*
        +
        keylen int|null size_t
        +
        partition_cnt int|null int32_t
        +
        opaque \FFI\CData|object|string|null void*
        +
        msg_opaque \FFI\CData|object|string|null void*
        +
        Returns
        +
        int|null int32_t - a random partition between 0 and partition_cnt - 1.
        +
        + +

        rd_kafka_msg_partitioner_consistent()

        +
        public static rd_kafka_msg_partitioner_consistent ( 
        +    \FFI\CData|null $rkt, 
        +    \FFI\CData|object|string|null $key, 
        +    int|null $keylen, 
        +    int|null $partition_cnt, 
        +    \FFI\CData|object|string|null $opaque, 
        +    \FFI\CData|object|string|null $msg_opaque
        + ): int|null
        +
        +

        Consistent partitioner.

        +

        Uses consistent hashing to map identical keys onto identical partitions.

        +

        The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

        + +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        key \FFI\CData|object|string|null const void*
        +
        keylen int|null size_t
        +
        partition_cnt int|null int32_t
        +
        opaque \FFI\CData|object|string|null void*
        +
        msg_opaque \FFI\CData|object|string|null void*
        +
        Returns
        +
        int|null int32_t - a “random” partition between 0 and partition_cnt - 1 based on the CRC value of the key
        +
        + +

        rd_kafka_msg_partitioner_consistent_random()

        +
        public static rd_kafka_msg_partitioner_consistent_random ( 
        +    \FFI\CData|null $rkt, 
        +    \FFI\CData|object|string|null $key, 
        +    int|null $keylen, 
        +    int|null $partition_cnt, 
        +    \FFI\CData|object|string|null $opaque, 
        +    \FFI\CData|object|string|null $msg_opaque
        + ): int|null
        +
        +

        Consistent-Random partitioner.

        +

        This is the default partitioner. Uses consistent hashing to map identical keys onto identical partitions, and messages without keys will be assigned via the random partitioner.

        +

        The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

        + +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        key \FFI\CData|object|string|null const void*
        +
        keylen int|null size_t
        +
        partition_cnt int|null int32_t
        +
        opaque \FFI\CData|object|string|null void*
        +
        msg_opaque \FFI\CData|object|string|null void*
        +
        Returns
        +
        int|null int32_t - a “random” partition between 0 and partition_cnt - 1 based on the CRC value of the key (if provided)
        +
        + +

        rd_kafka_msg_partitioner_murmur2()

        +
        public static rd_kafka_msg_partitioner_murmur2 ( 
        +    \FFI\CData|null $rkt, 
        +    \FFI\CData|object|string|null $key, 
        +    int|null $keylen, 
        +    int|null $partition_cnt, 
        +    \FFI\CData|object|string|null $rkt_opaque, 
        +    \FFI\CData|object|string|null $msg_opaque
        + ): int|null
        +
        +

        Murmur2 partitioner (Java compatible).

        +

        Uses consistent hashing to map identical keys onto identical partitions using Java-compatible Murmur2 hashing.

        +

        The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

        + +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        key \FFI\CData|object|string|null const void*
        +
        keylen int|null size_t
        +
        partition_cnt int|null int32_t
        +
        rkt_opaque \FFI\CData|object|string|null void*
        +
        msg_opaque \FFI\CData|object|string|null void*
        +
        Returns
        +
        int|null int32_t - a partition between 0 and partition_cnt - 1.
        +
        + +

        rd_kafka_msg_partitioner_murmur2_random()

        +
        public static rd_kafka_msg_partitioner_murmur2_random ( 
        +    \FFI\CData|null $rkt, 
        +    \FFI\CData|object|string|null $key, 
        +    int|null $keylen, 
        +    int|null $partition_cnt, 
        +    \FFI\CData|object|string|null $rkt_opaque, 
        +    \FFI\CData|object|string|null $msg_opaque
        + ): int|null
        +
        +

        Consistent-Random Murmur2 partitioner (Java compatible).

        +

        Uses consistent hashing to map identical keys onto identical partitions using Java-compatible Murmur2 hashing. Messages without keys will be assigned via the random partitioner.

        +

        The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

        + +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        key \FFI\CData|object|string|null const void*
        +
        keylen int|null size_t
        +
        partition_cnt int|null int32_t
        +
        rkt_opaque \FFI\CData|object|string|null void*
        +
        msg_opaque \FFI\CData|object|string|null void*
        +
        Returns
        +
        int|null int32_t - a partition between 0 and partition_cnt - 1.
        +
        + +

        rd_kafka_new()

        +
        public static rd_kafka_new ( 
        +    int $type, 
        +    \FFI\CData|null $conf, 
        +    \FFI\CData|null $errstr, 
        +    int|null $errstr_size
        + ): \FFI\CData|null
        +
        +

        Creates a new Kafka handle and starts its operation according to the specified type (RD_KAFKA_CONSUMER or RD_KAFKA_PRODUCER).

        +

        conf is an optional struct created with rd_kafka_conf_new() that will be used instead of the default configuration. The conf object is freed by this function on success and must not be used or destroyed by the application subsequently. See rd_kafka_conf_set() et.al for more information.

        +

        errstr must be a pointer to memory of at least size errstr_size where rd_kafka_new() may write a human readable error message in case the creation of a new handle fails. In which case the function returns NULL.

        +
        Remarks
        RD_KAFKA_CONSUMER: When a new RD_KAFKA_CONSUMER rd_kafka_t handle is created it may either operate in the legacy simple consumer mode using the rd_kafka_consume_start() interface, or the High-level KafkaConsumer API.
        +
        +An application must only use one of these groups of APIs on a given rd_kafka_t RD_KAFKA_CONSUMER handle.
        + +
        See also
        To destroy the Kafka handle, use rd_kafka_destroy().
        + +
        +
        Parameters
        +
        type int rd_kafka_type_t
        +
        conf \FFI\CData|null rd_kafka_conf_t*
        +
        errstr \FFI\CData|null char*
        +
        errstr_size int|null size_t
        +
        Returns
        +
        \FFI\CData|null rd_kafka_t* - The Kafka handle on success or NULL on error (see errstr)
        +
        + +

        rd_kafka_destroy()

        +
        public static rd_kafka_destroy ( 
        +    \FFI\CData|null $rk
        + ): void
        +
        +

        Destroy Kafka handle.

        +
        Remarks
        This is a blocking operation.
        +
        +rd_kafka_consumer_close() will be called from this function if the instance type is RD_KAFKA_CONSUMER, a group.id was configured, and the rd_kafka_consumer_close() was not explicitly called by the application. This in turn may trigger consumer callbacks, such as rebalance_cb. Use rd_kafka_destroy_flags() with RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE to avoid this behaviour.
        +
        See also
        rd_kafka_destroy_flags()
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        + +

        rd_kafka_destroy_flags()

        +
        public static rd_kafka_destroy_flags ( 
        +    \FFI\CData|null $rk, 
        +    int|null $flags
        + ): void
        +
        +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        flags int|null int
        +
        +

        rd_kafka_name()

        +
        public static rd_kafka_name ( 
        +    \FFI\CData|null $rk
        + ): string|null
        +
        +
        +
        Parameters
        +
        rk \FFI\CData|null const rd_kafka_t*
        +
        Returns
        +
        string|null const char*
        +
        +

        rd_kafka_type()

        +
        public static rd_kafka_type ( 
        +    \FFI\CData|null $rk
        + ): int
        +
        +
        +
        Parameters
        +
        rk \FFI\CData|null const rd_kafka_t*
        +
        Returns
        +
        int rd_kafka_type_t
        +
        +

        rd_kafka_memberid()

        +
        public static rd_kafka_memberid ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Returns this client's broker-assigned group member id.

        +
        Remarks
        This currently requires the high-level KafkaConsumer
        + +
        +
        Parameters
        +
        rk \FFI\CData|null const rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null char* - An allocated string containing the current broker-assigned group member id, or NULL if not available. The application must free the string with free() or rd_kafka_mem_free()
        +
        + +

        rd_kafka_clusterid()

        +
        public static rd_kafka_clusterid ( 
        +    \FFI\CData|null $rk, 
        +    int|null $timeout_ms
        + ): \FFI\CData|null
        +
        +

        Returns the ClusterId as reported in broker metadata.

        + +
        Remarks
        Requires broker version >=0.10.0 and api.version.request=true.
        +
        +The application must free the returned pointer using rd_kafka_mem_free().
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Client instance.
        +
        timeout_ms int|null int - If there is no cached value from metadata retrieval then this specifies the maximum amount of time (in milliseconds) the call will block waiting for metadata to be retrieved. Use 0 for non-blocking calls.
        +
        Returns
        +
        \FFI\CData|null char* - a newly allocated string containing the ClusterId, or NULL if no ClusterId could be retrieved in the allotted timespan.
        +
        + +

        rd_kafka_controllerid()

        +
        public static rd_kafka_controllerid ( 
        +    \FFI\CData|null $rk, 
        +    int|null $timeout_ms
        + ): int|null
        +
        +

        Returns the current ControllerId as reported in broker metadata.

        + +
        Remarks
        Requires broker version >=0.10.0 and api.version.request=true.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Client instance.
        +
        timeout_ms int|null int - If there is no cached value from metadata retrieval then this specifies the maximum amount of time (in milliseconds) the call will block waiting for metadata to be retrieved. Use 0 for non-blocking calls.
        +
        Returns
        +
        int|null int32_t - the controller broker id (>= 0), or -1 if no ControllerId could be retrieved in the allotted timespan.
        +
        + +

        rd_kafka_topic_new()

        +
        public static rd_kafka_topic_new ( 
        +    \FFI\CData|null $rk, 
        +    string|null $topic, 
        +    \FFI\CData|null $conf
        + ): \FFI\CData|null
        +
        +

        Creates a new topic handle for topic named topic.

        +

        conf is an optional configuration for the topic created with rd_kafka_topic_conf_new() that will be used instead of the default topic configuration. The conf object is freed by this function and must not be used or destroyed by the application subsequently. See rd_kafka_topic_conf_set() et.al for more information.

        +

        Topic handles are refcounted internally and calling rd_kafka_topic_new() again with the same topic name will return the previous topic handle without updating the original handle's configuration. Applications must eventually call rd_kafka_topic_destroy() for each succesfull call to rd_kafka_topic_new() to clear up resources.

        + +
        See also
        rd_kafka_topic_destroy()
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topic string|null const char*
        +
        conf \FFI\CData|null rd_kafka_topic_conf_t*
        +
        Returns
        +
        \FFI\CData|null rd_kafka_topic_t* - the new topic handle or NULL on error (use rd_kafka_errno2err() to convert system errno to an rd_kafka_resp_err_t error code.
        +
        + +

        rd_kafka_topic_destroy()

        +
        public static rd_kafka_topic_destroy ( 
        +    \FFI\CData|null $rkt
        + ): void
        +
        +

        Loose application's topic handle refcount as previously created with rd_kafka_topic_new().

        +
        Remarks
        Since topic objects are refcounted (both internally and for the app) the topic object might not actually be destroyed by this call, but the application must consider the object destroyed.
        + +
        +
        Parameters
        +
        rkt \FFI\CData|null rd_kafka_topic_t* - )
        +
        + +

        rd_kafka_topic_name()

        +
        public static rd_kafka_topic_name ( 
        +    \FFI\CData|null $rkt
        + ): string|null
        +
        +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        Returns
        +
        string|null const char*
        +
        +

        rd_kafka_topic_opaque()

        +
        public static rd_kafka_topic_opaque ( 
        +    \FFI\CData|null $rkt
        + ): \FFI\CData|object|string|null
        +
        +
        +
        Parameters
        +
        rkt \FFI\CData|null const rd_kafka_topic_t*
        +
        Returns
        +
        \FFI\CData|object|string|null void*
        +
        +

        rd_kafka_poll()

        +
        public static rd_kafka_poll ( 
        +    \FFI\CData|null $rk, 
        +    int|null $timeout_ms
        + ): int|null
        +
        +

        Polls the provided kafka handle for events.

        +

        Events will cause application-provided callbacks to be called.

        +

        The timeout_ms argument specifies the maximum amount of time (in milliseconds) that the call will block waiting for events. For non-blocking calls, provide 0 as timeout_ms. To wait indefinitely for an event, provide -1.

        +
        Remarks
        An application should make sure to call poll() at regular intervals to serve any queued callbacks waiting to be called.
        +
        +If your producer doesn't have any callback set (in particular via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb) you might choose not to call poll(), though this is not recommended.
        +

        Events:

        +
          +
        • delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer]
        • +
        • error callbacks (rd_kafka_conf_set_error_cb()) [all]
        • +
        • stats callbacks (rd_kafka_conf_set_stats_cb()) [all]
        • +
        • throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all]
        • +
        • OAUTHBEARER token refresh callbacks (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all]
        • +
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        timeout_ms int|null int
        +
        Returns
        +
        int|null int - the number of events served.
        +
        + +

        rd_kafka_yield()

        +
        public static rd_kafka_yield ( 
        +    \FFI\CData|null $rk
        + ): void
        +
        +

        Cancels the current callback dispatcher (rd_kafka_poll(), rd_kafka_consume_callback(), etc).

        +

        A callback may use this to force an immediate return to the calling code (caller of e.g. rd_kafka_poll()) without processing any further events.

        +
        Remarks
        This function MUST ONLY be called from within a librdkafka callback.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        + +

        rd_kafka_pause_partitions()

        +
        public static rd_kafka_pause_partitions ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $partitions
        + ): int
        +
        +

        Pause producing or consumption for the provided list of partitions.

        +

        Success or error is returned per-partition err in the partitions list.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR
        +
        + +

        rd_kafka_resume_partitions()

        +
        public static rd_kafka_resume_partitions ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $partitions
        + ): int
        +
        +

        Resume producing consumption for the provided list of partitions.

        +

        Success or error is returned per-partition err in the partitions list.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR
        +
        + +

        rd_kafka_query_watermark_offsets()

        +
        public static rd_kafka_query_watermark_offsets ( 
        +    \FFI\CData|null $rk, 
        +    string|null $topic, 
        +    int|null $partition, 
        +    \FFI\CData|null $low, 
        +    \FFI\CData|null $high, 
        +    int|null $timeout_ms
        + ): int
        +
        +

        Query broker for low (oldest/beginning) and high (newest/end) offsets for partition.

        +

        Offsets are returned in *low and *high respectively.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        low \FFI\CData|null int64_t*
        +
        high \FFI\CData|null int64_t*
        +
        timeout_ms int|null int
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
        +
        + +

        rd_kafka_get_watermark_offsets()

        +
        public static rd_kafka_get_watermark_offsets ( 
        +    \FFI\CData|null $rk, 
        +    string|null $topic, 
        +    int|null $partition, 
        +    \FFI\CData|null $low, 
        +    \FFI\CData|null $high
        + ): int
        +
        +

        Get last known low (oldest/beginning) and high (newest/end) offsets for partition.

        +

        The low offset is updated periodically (if statistics.interval.ms is set) while the high offset is updated on each fetched message set from the broker.

        +

        If there is no cached offset (either low or high, or both) then RD_KAFKA_OFFSET_INVALID will be returned for the respective offset.

        +

        Offsets are returned in *low and *high respectively.

        + +
        Remarks
        Shall only be used with an active consumer instance.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        low \FFI\CData|null int64_t*
        +
        high \FFI\CData|null int64_t*
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
        +
        + +

        rd_kafka_offsets_for_times()

        +
        public static rd_kafka_offsets_for_times ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $offsets, 
        +    int|null $timeout_ms
        + ): int
        +
        +

        Look up the offsets for the given partitions by timestamp.

        +

        The returned offset for each partition is the earliest offset whose timestamp is greater than or equal to the given timestamp in the corresponding partition.

        +

        The timestamps to query are represented as offset in offsets on input, and offset will contain the offset on output.

        +

        The function will block for at most timeout_ms milliseconds.

        +
        Remarks
        Duplicate Topic+Partitions are not supported.
        +
        +Per-partition errors may be returned in rd_kafka_topic_partition_t.err
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        offsets \FFI\CData|null rd_kafka_topic_partition_list_t*
        +
        timeout_ms int|null int
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note that per-partition errors might be set), RD_KAFKA_RESP_ERR__TIMED_OUT if not all offsets could be fetched within timeout_ms, RD_KAFKA_RESP_ERR__INVALID_ARG if the offsets list is empty, RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if all partitions are unknown, RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE if unable to query leaders for the given partitions.
        +
        + +

        rd_kafka_mem_free()

        +
        public static rd_kafka_mem_free ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|object|string|null $ptr
        + ): void
        +
        +

        Free pointer returned by librdkafka.

        +

        This is typically an abstraction for the free(3) call and makes sure the application can use the same memory allocator as librdkafka for freeing pointers returned by librdkafka.

        +

        In standard setups it is usually not necessary to use this interface rather than the free(3) functione.

        +

        rk must be set for memory returned by APIs that take an rk argument, for other APIs pass NULL for rk.

        +
        Remarks
        rd_kafka_mem_free() must only be used for pointers returned by APIs that explicitly mention using this function for freeing.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        ptr \FFI\CData|object|string|null void*
        +
        + +

        rd_kafka_queue_new()

        +
        public static rd_kafka_queue_new ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Create a new message queue.

        +

        See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t*
        +
        + +

        rd_kafka_queue_destroy()

        +
        public static rd_kafka_queue_destroy ( 
        +    \FFI\CData|null $rkqu
        + ): void
        +
        +

        Destroy a queue, purging all of its enqueued messages.

        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t* - )
        +
        + +

        rd_kafka_queue_get_main()

        +
        public static rd_kafka_queue_get_main ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Use rd_kafka_queue_destroy() to loose the reference.

        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the main librdkafka event queue. This is the queue served by rd_kafka_poll().
        +
        + +

        rd_kafka_queue_get_consumer()

        +
        public static rd_kafka_queue_get_consumer ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        Use rd_kafka_queue_destroy() to loose the reference.

        +
        Remarks
        rd_kafka_queue_destroy() MUST be called on this queue prior to calling rd_kafka_consumer_close().
        +
        +Polling the returned queue counts as a consumer poll, and will reset the timer for max.poll.interval.ms. If this queue is forwarded to a "destq", polling destq also counts as a consumer poll (this works for any number of forwards). However, even if this queue is unforwarded or forwarded elsewhere, polling destq will continue to count as a consumer poll.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the librdkafka consumer queue. This is the queue served by rd_kafka_consumer_poll().
        +
        + +

        rd_kafka_queue_get_partition()

        +
        public static rd_kafka_queue_get_partition ( 
        +    \FFI\CData|null $rk, 
        +    string|null $topic, 
        +    int|null $partition
        + ): \FFI\CData|null
        +
        +

        Use rd_kafka_queue_destroy() to loose the reference.

        +
        Remarks
        rd_kafka_queue_destroy() MUST be called on this queue
        +
        +This function only works on consumers.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t*
        +
        topic string|null const char*
        +
        partition int|null int32_t
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the partition’s queue, or NULL if partition is invalid.
        +
        + +

        rd_kafka_queue_get_background()

        +
        public static rd_kafka_queue_get_background ( 
        +    \FFI\CData|null $rk
        + ): \FFI\CData|null
        +
        +

        The background thread queue provides the application with an automatically polled queue that triggers the event callback in a background thread, this background thread is completely managed by librdkafka.

        +

        The background thread queue is automatically created if a generic event handler callback is configured with rd_kafka_conf_set_background_event_cb() or if rd_kafka_queue_get_background() is called.

        +

        The background queue is polled and served by librdkafka and MUST NOT be polled, forwarded, or otherwise managed by the application, it may only be used as the destination queue passed to queue-enabled APIs, such as the Admin API.

        +

        Use rd_kafka_queue_destroy() to loose the reference.

        +
        Warning
        The background queue MUST NOT be read from (polled, consumed, etc), or forwarded from.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - )
        +
        Returns
        +
        \FFI\CData|null rd_kafka_queue_t* - a reference to the background thread queue, or NULL if the background queue is not enabled.
        +
        + +

        rd_kafka_queue_forward()

        +
        public static rd_kafka_queue_forward ( 
        +    \FFI\CData|null $src, 
        +    \FFI\CData|null $dst
        + ): void
        +
        +

        Forward/re-route queue src to dst. If dst is NULL the forwarding is removed.

        +

        The internal refcounts for both queues are increased.

        +
        Remarks
        Regardless of whether dst is NULL or not, after calling this function, src will not forward it's fetch queue to the consumer queue.
        + +
        +
        Parameters
        +
        src \FFI\CData|null rd_kafka_queue_t*
        +
        dst \FFI\CData|null rd_kafka_queue_t*
        +
        + +

        rd_kafka_set_log_queue()

        +
        public static rd_kafka_set_log_queue ( 
        +    \FFI\CData|null $rk, 
        +    \FFI\CData|null $rkqu
        + ): int
        +
        +

        Forward librdkafka logs (and debug) to the specified queue for serving with one of the ..poll() calls.

        +

        This allows an application to serve log callbacks (log_cb) in its thread of choice.

        + +
        Remarks
        The configuration property log.queue MUST also be set to true.
        +
        +librdkafka maintains its own reference to the provided queue.
        + +
        +
        Parameters
        +
        rk \FFI\CData|null rd_kafka_t* - Client instance.
        +
        rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to forward logs to. If the value is NULL the logs are forwarded to the main queue.
        +
        Returns
        +
        int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error, eg RD_KAFKA_RESP_ERR__NOT_CONFIGURED when log.queue is not set to true.
        +
        + +

        rd_kafka_queue_length()

        +
        public static rd_kafka_queue_length ( 
        +    \FFI\CData|null $rkqu
        + ): int|null
        +
        +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t* - )
        +
        Returns
        +
        int|null size_t - the current number of elements in queue.
        +
        + +

        rd_kafka_queue_io_event_enable()

        +
        public static rd_kafka_queue_io_event_enable ( 
        +    \FFI\CData|null $rkqu, 
        +    int|null $fd, 
        +    \FFI\CData|object|string|null $payload, 
        +    int|null $size
        + ): void
        +
        +

        Enable IO event triggering for queue.

        +

        To ease integration with IO based polling loops this API allows an application to create a separate file-descriptor that librdkafka will write payload (of size size) to whenever a new element is enqueued on a previously empty queue.

        +

        To remove event triggering call with fd = -1.

        +

        librdkafka will maintain a copy of the payload.

        +
        Remarks
        IO and callback event triggering are mutually exclusive.
        +
        +When using forwarded queues the IO event must only be enabled on the final forwarded-to (destination) queue.
        +
        +The file-descriptor/socket must be set to non-blocking.
        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t*
        +
        fd int|null int
        +
        payload \FFI\CData|object|string|null const void*
        +
        size int|null size_t
        +
        + +

        rd_kafka_queue_cb_event_enable()

        +
        public static rd_kafka_queue_cb_event_enable ( 
        +    \FFI\CData|null $rkqu, 
        +    \FFI\CData|\Closure $event_cb, 
        +    \FFI\CData|object|string|null $opaque
        + ): void
        +
        +

        Enable callback event triggering for queue.

        +

        The callback will be called from an internal librdkafka thread when a new element is enqueued on a previously empty queue.

        +

        To remove event triggering call with event_cb = NULL.

        +

        The qev_opaque is passed to the callback's qev_opaque argument.

        +
        Remarks
        IO and callback event triggering are mutually exclusive.
        +
        +Since the callback may be triggered from internal librdkafka threads, the application must not perform any pro-longed work in the callback, or call any librdkafka APIs (for the same rd_kafka_t handle).
        + +
        +
        Parameters
        +
        rkqu \FFI\CData|null rd_kafka_queue_t*
        +
        event_cb \FFI\CData|\Closure void()(rd_kafka_t, void*)
        +
        opaque \FFI\CData|object|string|null void*
        +
        + +

        rd_kafka_consume_start()

        +
        public static rd_kafka_consume_start ( 
        +    \FFI\CData|null $rkt, 
        +    int|null $partition, 
        +    int|null $offset
        + ): int|null
        +
        +

        Start consuming messages for topic rkt and partition at offset offset which may either be an absolute (0..N) or one of the logical offsets:

        +
          +
        • RD_KAFKA_OFFSET_BEGINNING
        • +
        • RD_KAFKA_OFFSET_END
        • +
        • RD_KAFKA_OFFSET_STORED
        • +
        • RD_KAFKA_OFFSET_TAIL
        • +
        +

        rdkafka will attempt to keep queued.min.messages (config property) messages in the local queue by repeatedly fetching batches of messages from the broker until the threshold is reached.

        +

        The application shall use one of the rd_kafka_consume*() functions to consume messages from the local queue, each kafka message being represented as a rd_kafka_message_t * object.

        +

        rd_kafka_consume_start() must not be called multiple times for the same topic and partition without stopping consumption first with rd_kafka_consume_stop().

        + +

        Use rd_kafka_errno2err() to convert sytem errno to rd_kafka_resp_err_t

        + +
        +
        Parameters
        +
        rkt \FFI\CData|null rd_kafka_topic_t*
        +
        partition int|null int32_t
        +
        offset int|null int64_t
        +
        Returns
        +
        int|null int - 0 on success or -1 on error in which case errno is set accordingly:
          +
          +
        • EBUSY - Conflicts with an existing or previous subscription (RD_KAFKA_RESP_ERR__CONFLICT)
        • +
        • EINVAL - Invalid offset, or incomplete configuration (lacking group.id) (RD_KAFKA_RESP_ERR__INVALID_ARG)
        • +
        • ESRCH - requested partition is invalid. (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
        • +
        • ENOENT - topic is unknown in the Kafka cluster. (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
        • + + +

          rd_kafka_consume_start_queue()

          +
          public static rd_kafka_consume_start_queue ( 
          +    \FFI\CData|null $rkt, 
          +    int|null $partition, 
          +    int|null $offset, 
          +    \FFI\CData|null $rkqu
          + ): int|null
          +
          +

          Same as rd_kafka_consume_start() but re-routes incoming messages to the provided queue rkqu (which must have been previously allocated with rd_kafka_queue_new().

          +

          The application must use one of the rd_kafka_consume_*_queue() functions to receive fetched messages.

          +

          rd_kafka_consume_start_queue() must not be called multiple times for the same topic and partition without stopping consumption first with rd_kafka_consume_stop(). rd_kafka_consume_start() and rd_kafka_consume_start_queue() must not be combined for the same topic and partition.

          + +
          +
          Parameters
          +
          rkt \FFI\CData|null rd_kafka_topic_t*
          +
          partition int|null int32_t
          +
          offset int|null int64_t
          +
          rkqu \FFI\CData|null rd_kafka_queue_t*
          +
          Returns
          +
          int|null int
          +
          + +

          rd_kafka_consume_stop()

          +
          public static rd_kafka_consume_stop ( 
          +    \FFI\CData|null $rkt, 
          +    int|null $partition
          + ): int|null
          +
          +

          Stop consuming messages for topic rkt and partition, purging all messages currently in the local queue.

          +

          NOTE: To enforce synchronisation this call will block until the internal fetcher has terminated and offsets are committed to configured storage method.

          +

          The application needs to be stop all consumers before calling rd_kafka_destroy() on the main object handle.

          + +
          +
          Parameters
          +
          rkt \FFI\CData|null rd_kafka_topic_t*
          +
          partition int|null int32_t
          +
          Returns
          +
          int|null int - 0 on success or -1 on error (see errno).
          +
          + +

          rd_kafka_seek()

          +
          public static rd_kafka_seek ( 
          +    \FFI\CData|null $rkt, 
          +    int|null $partition, 
          +    int|null $offset, 
          +    int|null $timeout_ms
          + ): int
          +
          +

          Seek consumer for topic+partition to offset which is either an absolute or logical offset.

          +

          If timeout_ms is specified (not 0) the seek call will wait this long for the consumer to update its fetcher state for the given partition with the new offset. This guarantees that no previously fetched messages for the old offset (or fetch position) will be passed to the application.

          +

          If the timeout is reached the internal state will be unknown to the caller and this function returns RD_KAFKA_RESP_ERR__TIMED_OUT.

          +

          If timeout_ms is 0 it will initiate the seek but return immediately without any error reporting (e.g., async).

          +

          This call will purge all pre-fetched messages for the given partition, which may be up to queued.max.message.kbytes in size. Repeated use of seek may thus lead to increased network usage as messages are re-fetched from the broker.

          +
          Remarks
          Seek must only be performed for already assigned/consumed partitions, use rd_kafka_assign() (et.al) to set the initial starting offset for a new assignmenmt.
          + +
          Deprecated:
          Use rd_kafka_seek_partitions().
          + +
          +
          Parameters
          +
          rkt \FFI\CData|null rd_kafka_topic_t*
          +
          partition int|null int32_t
          +
          offset int|null int64_t
          +
          timeout_ms int|null int
          +
          Returns
          +
          int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__NO_ERROR on success else an error code.
          +
          + +

          rd_kafka_consume()

          +
          public static rd_kafka_consume ( 
          +    \FFI\CData|null $rkt, 
          +    int|null $partition, 
          +    int|null $timeout_ms
          + ): \FFI\CData|null
          +
          +

          Consume a single message from topic rkt and partition.

          +

          timeout_ms is maximum amount of time to wait for a message to be received. Consumer must have been previously started with rd_kafka_consume_start().

          + +

          Errors (when returning NULL):

          +
            +
          • ETIMEDOUT - timeout_ms was reached with no new messages fetched.
          • +
          • ENOENT - rkt + partition is unknown. (no prior rd_kafka_consume_start() call)
          • +
          +

          NOTE: The returned message's ..->err must be checked for errors. NOTE: ..->err == RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the end of the partition has been reached, which should typically not be considered an error. The application should handle this case (e.g., ignore).

          +
          Remarks
          on_consume() interceptors may be called from this function prior to passing message to application.
          + +
          +
          Parameters
          +
          rkt \FFI\CData|null rd_kafka_topic_t*
          +
          partition int|null int32_t
          +
          timeout_ms int|null int
          +
          Returns
          +
          \FFI\CData|null rd_kafka_message_t* - a message object on success or NULL on error. The message object must be destroyed with rd_kafka_message_destroy() when the application is done with it.
          +
          + +

          rd_kafka_consume_batch()

          +
          public static rd_kafka_consume_batch ( 
          +    \FFI\CData|null $rkt, 
          +    int|null $partition, 
          +    int|null $timeout_ms, 
          +    \FFI\CData|null $rkmessages, 
          +    int|null $rkmessages_size
          + ): int|null
          +
          +

          Consume up to rkmessages_size from topic rkt and partition putting a pointer to each message in the application provided array rkmessages (of size rkmessages_size entries).

          +

          rd_kafka_consume_batch() provides higher throughput performance than rd_kafka_consume().

          +

          timeout_ms is the maximum amount of time to wait for all of rkmessages_size messages to be put into rkmessages. If no messages were available within the timeout period this function returns 0 and rkmessages remains untouched. This differs somewhat from rd_kafka_consume().

          +

          The message objects must be destroyed with rd_kafka_message_destroy() when the application is done with it.

          + +
          See also
          rd_kafka_consume()
          +
          Remarks
          on_consume() interceptors may be called from this function prior to passing message to application.
          + +
          +
          Parameters
          +
          rkt \FFI\CData|null rd_kafka_topic_t*
          +
          partition int|null int32_t
          +
          timeout_ms int|null int
          +
          rkmessages \FFI\CData|null rd_kafka_message_t**
          +
          rkmessages_size int|null size_t
          +
          Returns
          +
          int|null ssize_t - the number of rkmessages added in rkmessages, or -1 on error (same error codes as for rd_kafka_consume().
          +
          + +

          rd_kafka_consume_callback()

          +
          public static rd_kafka_consume_callback ( 
          +    \FFI\CData|null $rkt, 
          +    int|null $partition, 
          +    int|null $timeout_ms, 
          +    \FFI\CData|\Closure $consume_cb, 
          +    \FFI\CData|object|string|null $opaque
          + ): int|null
          +
          +

          Consumes messages from topic rkt and partition, calling the provided callback for each consumed messsage.

          +

          rd_kafka_consume_callback() provides higher throughput performance than both rd_kafka_consume() and rd_kafka_consume_batch().

          +

          timeout_ms is the maximum amount of time to wait for one or more messages to arrive.

          +

          The provided consume_cb function is called for each message, the application MUST NOT call rd_kafka_message_destroy() on the provided rkmessage.

          +

          The commit_opaque argument is passed to the consume_cb as commit_opaque.

          + +
          See also
          rd_kafka_consume()
          +
          Remarks
          on_consume() interceptors may be called from this function prior to passing message to application.
          +
          +This function will return early if a transaction control message is received, these messages are not exposed to the application but still enqueued on the consumer queue to make sure their offsets are stored.
          +
          Deprecated:
          This API is deprecated and subject for future removal. There is no new callback-based consume interface, use the poll/queue based alternatives.
          + +
          +
          Parameters
          +
          rkt \FFI\CData|null rd_kafka_topic_t*
          +
          partition int|null int32_t
          +
          timeout_ms int|null int
          +
          consume_cb \FFI\CData|\Closure void()(rd_kafka_message_t, void*)
          +
          opaque \FFI\CData|object|string|null void*
          +
          Returns
          +
          int|null int - the number of messages processed or -1 on error.
          +
          + +

          rd_kafka_consume_queue()

          +
          public static rd_kafka_consume_queue ( 
          +    \FFI\CData|null $rkqu, 
          +    int|null $timeout_ms
          + ): \FFI\CData|null
          +
          +

          Consume from queue.

          +
          See also
          rd_kafka_consume()
          + +
          +
          Parameters
          +
          rkqu \FFI\CData|null rd_kafka_queue_t*
          +
          timeout_ms int|null int
          +
          Returns
          +
          \FFI\CData|null rd_kafka_message_t*
          +
          + +

          rd_kafka_consume_batch_queue()

          +
          public static rd_kafka_consume_batch_queue ( 
          +    \FFI\CData|null $rkqu, 
          +    int|null $timeout_ms, 
          +    \FFI\CData|null $rkmessages, 
          +    int|null $rkmessages_size
          + ): int|null
          +
          +

          Consume batch of messages from queue.

          +
          See also
          rd_kafka_consume_batch()
          + +
          +
          Parameters
          +
          rkqu \FFI\CData|null rd_kafka_queue_t*
          +
          timeout_ms int|null int
          +
          rkmessages \FFI\CData|null rd_kafka_message_t**
          +
          rkmessages_size int|null size_t
          +
          Returns
          +
          int|null ssize_t
          +
          + +

          rd_kafka_consume_callback_queue()

          +
          public static rd_kafka_consume_callback_queue ( 
          +    \FFI\CData|null $rkqu, 
          +    int|null $timeout_ms, 
          +    \FFI\CData|\Closure $consume_cb, 
          +    \FFI\CData|object|string|null $opaque
          + ): int|null
          +
          +

          Consume multiple messages from queue with callback.

          +
          See also
          rd_kafka_consume_callback()
          +
          Deprecated:
          This API is deprecated and subject for future removal. There is no new callback-based consume interface, use the poll/queue based alternatives.
          + +
          +
          Parameters
          +
          rkqu \FFI\CData|null rd_kafka_queue_t*
          +
          timeout_ms int|null int
          +
          consume_cb \FFI\CData|\Closure void()(rd_kafka_message_t, void*)
          +
          opaque \FFI\CData|object|string|null void*
          +
          Returns
          +
          int|null int
          +
          + +

          rd_kafka_offset_store()

          +
          public static rd_kafka_offset_store ( 
          +    \FFI\CData|null $rkt, 
          +    int|null $partition, 
          +    int|null $offset
          + ): int
          +
          +

          Store offset offset + 1 for topic rkt partition partition.

          +

          The offset + 1 will be committed (written) to broker (or file) according to auto.commit.interval.ms or manual offset-less commit()

          +
          Deprecated:
          This API lacks support for partition leader epochs, which makes it at risk for unclean leader election log truncation issues. Use rd_kafka_offsets_store() and rd_kafka_offset_store_message() instead.
          +
          Warning
          This method may only be called for partitions that are currently assigned. Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. Since v1.9.0.
          +
          +Avoid storing offsets after calling rd_kafka_seek() (et.al) as this may later interfere with resuming a paused partition, instead store offsets prior to calling seek.
          +
          Remarks
          enable.auto.offset.store must be set to "false" when using this API.
          + +
          +
          Parameters
          +
          rkt \FFI\CData|null rd_kafka_topic_t*
          +
          partition int|null int32_t
          +
          offset int|null int64_t
          +
          Returns
          +
          int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
          +
          + +

          rd_kafka_offsets_store()

          +
          public static rd_kafka_offsets_store ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $offsets
          + ): int
          +
          +

          Store offsets for next auto-commit for one or more partitions.

          +

          The offset will be committed (written) to the offset store according to auto.commit.interval.ms or manual offset-less commit().

          +

          Per-partition success/error status propagated through each partition's .err for all return values (even NO_ERROR) except INVALID_ARG.

          +
          Warning
          This method may only be called for partitions that are currently assigned. Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. Since v1.9.0.
          +
          +Avoid storing offsets after calling rd_kafka_seek() (et.al) as this may later interfere with resuming a paused partition, instead store offsets prior to calling seek.
          +
          Remarks
          The .offset field is stored as is, it will NOT be + 1.
          +
          +enable.auto.offset.store must be set to "false" when using this API.
          +
          +The leader epoch, if set, will be used to fence outdated partition leaders. See rd_kafka_topic_partition_set_leader_epoch().
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          offsets \FFI\CData|null rd_kafka_topic_partition_list_t*
          +
          Returns
          +
          int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or RD_KAFKA_RESP_ERR__INVALID_ARG if enable.auto.offset.store is true, or RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE if none of the offsets could be stored.
          +
          + +

          rd_kafka_subscribe()

          +
          public static rd_kafka_subscribe ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $topics
          + ): int
          +
          +

          Subscribe to topic set using balanced consumer groups.

          +

          Wildcard (regex) topics are supported: any topic name in the topics list that is prefixed with "^" will be regex-matched to the full list of topics in the cluster and matching topics will be added to the subscription list.

          +

          The full topic list is retrieved every topic.metadata.refresh.interval.ms to pick up new or delete topics that match the subscription. If there is any change to the matched topics the consumer will immediately rejoin the group with the updated set of subscribed topics.

          +

          Regex and full topic names can be mixed in topics.

          +
          Remarks
          Only the .topic field is used in the supplied topics list, all other fields are ignored.
          +
          +subscribe() is an asynchronous method which returns immediately: background threads will (re)join the group, wait for group rebalance, issue any registered rebalance_cb, assign() the assigned partitions, and then start fetching messages. This cycle may take up to session.timeout.ms * 2 or more to complete.
          +
          +After this call returns a consumer error will be returned by rd_kafka_consumer_poll (et.al) for each unavailable topic in the topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART for non-existent topics, and RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics. The consumer error will be raised through rd_kafka_consumer_poll() (et.al.) with the rd_kafka_message_t.err field set to one of the error codes mentioned above. The subscribe function itself is asynchronous and will not return an error on unavailable topics.
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          topics \FFI\CData|null const rd_kafka_topic_partition_list_t*
          +
          Returns
          +
          int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid topics or regexes or duplicate entries, RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error.
          +
          + +

          rd_kafka_unsubscribe()

          +
          public static rd_kafka_unsubscribe ( 
          +    \FFI\CData|null $rk
          + ): int
          +
          +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          Returns
          +
          int rd_kafka_resp_err_t
          +
          +

          rd_kafka_subscription()

          +
          public static rd_kafka_subscription ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $topics
          + ): int
          +
          +

          Returns the current topic subscription.

          + +
          Remarks
          The application is responsible for calling rd_kafka_topic_partition_list_destroy on the returned list.
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          topics \FFI\CData|null rd_kafka_topic_partition_list_t**
          +
          Returns
          +
          int rd_kafka_resp_err_t - An error code on failure, otherwise topic is updated to point to a newly allocated topic list (possibly empty).
          +
          + +

          rd_kafka_consumer_poll()

          +
          public static rd_kafka_consumer_poll ( 
          +    \FFI\CData|null $rk, 
          +    int|null $timeout_ms
          + ): \FFI\CData|null
          +
          +

          Poll the consumer for messages or events.

          +

          Will block for at most timeout_ms milliseconds.

          +
          Remarks
          An application should make sure to call consumer_poll() at regular intervals, even if no messages are expected, to serve any queued callbacks waiting to be called. This is especially important when a rebalance_cb has been registered as it needs to be called and handled properly to synchronize internal consumer state.
          + +
          Remarks
          on_consume() interceptors may be called from this function prior to passing message to application.
          +
          +When subscribing to topics the application must call poll at least every max.poll.interval.ms to remain a member of the consumer group.
          +

          Noteworthy errors returned in ->err:

          +
            +
          • RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED - application failed to call poll within max.poll.interval.ms.
          • +
          +
          See also
          rd_kafka_message_t
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          timeout_ms int|null int
          +
          Returns
          +
          \FFI\CData|null rd_kafka_message_t* - A message object which is a proper message if ->err is RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other value.
          +
          + +

          rd_kafka_consumer_close()

          +
          public static rd_kafka_consumer_close ( 
          +    \FFI\CData|null $rk
          + ): int
          +
          +

          Close the consumer.

          +

          This call will block until the consumer has revoked its assignment, calling the rebalance_cb if it is configured, committed offsets to broker, and left the consumer group (if applicable). The maximum blocking time is roughly limited to session.timeout.ms.

          + +
          Remarks
          The application still needs to call rd_kafka_destroy() after this call finishes to clean up the underlying handle resources.
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t* - )
          +
          Returns
          +
          int rd_kafka_resp_err_t - An error code indicating if the consumer close was succesful or not. RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised a fatal error.
          +
          + +

          rd_kafka_assign()

          +
          public static rd_kafka_assign ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $partitions
          + ): int
          +
          +

          Atomic assignment of partitions to consume.

          +

          The new partitions will replace the existing assignment.

          +

          A zero-length partitions will treat the partitions as a valid, albeit empty assignment, and maintain internal state, while a NULL value for partitions will reset and clear the internal state.

          +

          When used from a rebalance callback, the application should pass the partition list passed to the callback (or a copy of it) even if the list is empty (i.e. should not pass NULL in this case) so as to maintain internal join state. This is not strictly required - the application may adjust the assignment provided by the group. However, this is rarely useful in practice.

          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          partitions \FFI\CData|null const rd_kafka_topic_partition_list_t*
          +
          Returns
          +
          int rd_kafka_resp_err_t - An error code indicating if the new assignment was applied or not. RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised a fatal error.
          +
          + +

          rd_kafka_assignment()

          +
          public static rd_kafka_assignment ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $partitions
          + ): int
          +
          +

          Returns the current partition assignment as set by rd_kafka_assign() or rd_kafka_incremental_assign().

          + +
          Remarks
          The application is responsible for calling rd_kafka_topic_partition_list_destroy on the returned list.
          +
          +This assignment represents the partitions assigned through the assign functions and not the partitions assigned to this consumer instance by the consumer group leader. They are usually the same following a rebalance but not necessarily since an application is free to assign any partitions.
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          partitions \FFI\CData|null rd_kafka_topic_partition_list_t**
          +
          Returns
          +
          int rd_kafka_resp_err_t - An error code on failure, otherwise partitions is updated to point to a newly allocated partition list (possibly empty).
          +
          + +

          rd_kafka_commit()

          +
          public static rd_kafka_commit ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $offsets, 
          +    int|null $async
          + ): int
          +
          +

          Commit offsets on broker for the provided list of partitions.

          +

          offsets should contain topic, partition, offset and possibly metadata. The offset should be the offset where consumption will resume, i.e., the last processed offset + 1. If offsets is NULL the current partition assignment will be used instead.

          +

          If async is false this operation will block until the broker offset commit is done, returning the resulting success or error code.

          +

          If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been configured the callback will be enqueued for a future call to rd_kafka_poll(), rd_kafka_consumer_poll() or similar.

          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          offsets \FFI\CData|null const rd_kafka_topic_partition_list_t*
          +
          async int|null int
          +
          Returns
          +
          int rd_kafka_resp_err_t - An error code indiciating if the commit was successful, or successfully scheduled if asynchronous, or failed. RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised a fatal error.
          +
          + +

          rd_kafka_commit_message()

          +
          public static rd_kafka_commit_message ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $rkmessage, 
          +    int|null $async
          + ): int
          +
          +

          Commit message's offset on broker for the message's partition. The committed offset is the message's offset + 1.

          +
          See also
          rd_kafka_commit
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          rkmessage \FFI\CData|null const rd_kafka_message_t*
          +
          async int|null int
          +
          Returns
          +
          int rd_kafka_resp_err_t
          +
          + +

          rd_kafka_commit_queue()

          +
          public static rd_kafka_commit_queue ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $offsets, 
          +    \FFI\CData|null $rkqu, 
          +    \FFI\CData|\Closure $cb, 
          +    \FFI\CData|object|string|null $opaque
          + ): int
          +
          +

          Commit offsets on broker for the provided list of partitions.

          +

          See rd_kafka_commit for offsets semantics.

          +

          The result of the offset commit will be posted on the provided rkqu queue.

          +

          If the application uses one of the poll APIs (rd_kafka_poll(), rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue the cb callback is required.

          +

          The commit_opaque argument is passed to the callback as commit_opaque, or if using the event API the callback is ignored and the offset commit result will be returned as an RD_KAFKA_EVENT_COMMIT event and the commit_opaque value will be available with rd_kafka_event_opaque().

          +

          If rkqu is NULL a temporary queue will be created and the callback will be served by this call.

          +
          See also
          rd_kafka_commit()
          +
          +rd_kafka_conf_set_offset_commit_cb()
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          offsets \FFI\CData|null const rd_kafka_topic_partition_list_t*
          +
          rkqu \FFI\CData|null rd_kafka_queue_t*
          +
          cb \FFI\CData|\Closure void()(rd_kafka_t, rd_kafka_resp_err_t, rd_kafka_topic_partition_list_t*, void*)
          +
          opaque \FFI\CData|object|string|null void*
          +
          Returns
          +
          int rd_kafka_resp_err_t
          +
          + +

          rd_kafka_committed()

          +
          public static rd_kafka_committed ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $partitions, 
          +    int|null $timeout_ms
          + ): int
          +
          +

          Retrieve committed offsets for topics+partitions.

          +

          The offset field of each requested partition will either be set to stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored offset for that partition.

          +

          Committed offsets will be returned according to the isolation.level configuration property, if set to read_committed (default) then only stable offsets for fully committed transactions will be returned, while read_uncommitted may return offsets for not yet committed transactions.

          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
          +
          timeout_ms int|null int
          +
          Returns
          +
          int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the offset or err field of each partitions’ element is filled in with the stored offset, or a partition specific error. Else returns an error code.
          +
          + +

          rd_kafka_position()

          +
          public static rd_kafka_position ( 
          +    \FFI\CData|null $rk, 
          +    \FFI\CData|null $partitions
          + ): int
          +
          +

          Retrieve current positions (offsets) for topics+partitions.

          +

          The offset field of each requested partition will be set to the offset of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there was no previous message.

          +
          Remarks
          In this context the last consumed message is the offset consumed by the current librdkafka instance and, in case of rebalancing, not necessarily the last message fetched from the partition.
          + +
          +
          Parameters
          +
          rk \FFI\CData|null rd_kafka_t*
          +
          partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
          +
          Returns
          +
          int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the offset or err field of each partitions’ element is filled in with the stored offset, or a partition specific error. Else returns an error code.
          +
          + +

          rd_kafka_produce()

          +
          public static rd_kafka_produce ( 
          +    \FFI\CData|null $rkt, 
          +    int|null $partition, 
          +    int|null $msgflags, 
          +    \FFI\CData|object|string|null $payload, 
          +    int|null $len, 
          +    \FFI\CData|object|string|null $key, 
          +    int|null $keylen, 
          +    \FFI\CData|object|string|null $msg_opaque
          + ): int|null
          +
          +

          Produce and send a single message to broker.

          +

          rkt is the target topic which must have been previously created with rd_kafka_topic_new().

          +

          rd_kafka_produce() is an asynchronous non-blocking API. See rd_kafka_conf_set_dr_msg_cb on how to setup a callback to be called once the delivery status (success or failure) is known. The delivery report is triggered by the application calling rd_kafka_poll() (at regular intervals) or rd_kafka_flush() (at termination).

          +

          Since producing is asynchronous, you should call rd_kafka_flush() before you destroy the producer. Otherwise, any outstanding messages will be silently discarded.

          +

          When temporary errors occur, librdkafka automatically retries to produce the messages. Retries are triggered after retry.backoff.ms and when the leader broker for the given partition is available. Otherwise, librdkafka falls back to polling the topic metadata to monitor when a new leader is elected (see the topic.metadata.refresh.fast.interval.ms and topic.metadata.refresh.interval.ms configurations) and then performs a retry. A delivery error will occur if the message could not be produced within message.timeout.ms.

          +

          See the "Message reliability" chapter in INTRODUCTION.md for more information.

          +

          partition is the target partition, either:

          +
            +
          • RD_KAFKA_PARTITION_UA (unassigned) for automatic partitioning using the topic's partitioner function, or
          • +
          • a fixed partition (0..N)
          • +
          +

          msgflags is zero or more of the following flags OR:ed together: RD_KAFKA_MSG_F_BLOCK - block produce*() call if queue.buffering.max.messages or queue.buffering.max.kbytes are exceeded. Messages are considered in-queue from the point they are accepted by produce() until their corresponding delivery report callback/event returns. It is thus a requirement to call rd_kafka_poll() (or equiv.) from a separate thread when F_BLOCK is used. See WARNING on RD_KAFKA_MSG_F_BLOCK above.

          +

          RD_KAFKA_MSG_F_FREE - rdkafka will free(3) payload when it is done with it. RD_KAFKA_MSG_F_COPY - the payload data will be copied and the payload pointer will not be used by rdkafka after the call returns. RD_KAFKA_MSG_F_PARTITION - produce_batch() will honour per-message partition, either set manually or by the configured partitioner.

          +

          .._F_FREE and .._F_COPY are mutually exclusive. If neither of these are set, the caller must ensure that the memory backing payload remains valid and is not modified or reused until the delivery callback is invoked. Other buffers passed to rd_kafka_produce() don't have this restriction on reuse, i.e. the memory backing the key or the topic name may be reused as soon as rd_kafka_produce() returns.

          +

          If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then the memory associated with the payload is still the caller's responsibility.

          +

          payload is the message payload of size len bytes.

          +

          key is an optional message key of size keylen bytes, if non-NULL it will be passed to the topic partitioner as well as be sent with the message to the broker and passed on to the consumer.

          +

          msg_opaque is an optional application-provided per-message opaque pointer that will provided in the message's delivery report callback (dr_msg_cb or dr_cb) and the rd_kafka_message_t _private field.

          +
          Remarks
          on_send() and on_acknowledgement() interceptors may be called from this function. on_acknowledgement() will only be called if the message fails partitioning.
          +
          +If the producer is transactional (transactional.id is configured) producing is only allowed during an on-going transaction, namely after rd_kafka_begin_transaction() has been called.
          + +
          See also
          Use rd_kafka_errno2err() to convert errno to rdkafka error code.
          + +
          +
          Parameters
          +
          rkt \FFI\CData|null rd_kafka_topic_t*
          +
          partition int|null int32_t
          +
          msgflags int|null int
          +
          payload \FFI\CData|object|string|null void*
          +
          len int|null size_t
          +
          key \FFI\CData|object|string|null const void*
          +
          keylen int|null size_t
          +
          msg_opaque \FFI\CData|object|string|null void*
          +
          Returns
          +
          int|null int - 0 on success or -1 on error in which case errno is set accordingly:
            +
            +
          • ENOBUFS - maximum number of outstanding messages has been reached: "queue.buffering.max.messages" (RD_KAFKA_RESP_ERR__QUEUE_FULL)
          • +
          • EMSGSIZE - message is larger than configured max size: "messages.max.bytes". (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
          • +
          • ESRCH - requested partition is unknown in the Kafka cluster. (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
          • +
          • ENOENT - topic is unknown in the Kafka cluster. (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
          • +
          • ECANCELED - fatal error has been raised on producer, see rd_kafka_fatal_error(), (RD_KAFKA_RESP_ERR__FATAL).
          • +
          • ENOEXEC - transactional state forbids producing (RD_KAFKA_RESP_ERR__STATE)
          • + + +

            rd_kafka_producev()

            +
            public static rd_kafka_producev ( 
            +    \FFI\CData|null $rk, 
            +    mixed $args
            + ): int
            +
            +

            Produce and send a single message to broker.

            +

            The message is defined by a va-arg list using rd_kafka_vtype_t tag tuples which must be terminated with a single RD_KAFKA_V_END.

            + +
            See also
            rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            args mixed
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code as described in rd_kafka_produce(). RD_KAFKA_RESP_ERR__CONFLICT is returned if _V_HEADER and _V_HEADERS are mixed.
            +
            + +

            rd_kafka_produce_batch()

            +
            public static rd_kafka_produce_batch ( 
            +    \FFI\CData|null $rkt, 
            +    int|null $partition, 
            +    int|null $msgflags, 
            +    \FFI\CData|null $rkmessages, 
            +    int|null $message_cnt
            + ): int|null
            +
            +

            Produce multiple messages.

            +

            If partition is RD_KAFKA_PARTITION_UA the configured partitioner will be run for each message (slower), otherwise the messages will be enqueued to the specified partition directly (faster).

            +

            The messages are provided in the array rkmessages of count message_cnt elements. The partition and msgflags are used for all provided messages.

            +

            Honoured rkmessages[] fields are:

            +
              +
            • payload,len Message payload and length
            • +
            • key,key_len Optional message key
            • +
            • _private Message opaque pointer (msg_opaque)
            • +
            • err Will be set according to success or failure, see rd_kafka_produce() for possible error codes. Application only needs to check for errors if return value != message_cnt.
            • +
            +
            Remarks
            If RD_KAFKA_MSG_F_PARTITION is set in msgflags, the .partition field of the rkmessages is used instead of partition.
            + +
            Remarks
            This interface does NOT support setting message headers on the provided rkmessages.
            + +
            +
            Parameters
            +
            rkt \FFI\CData|null rd_kafka_topic_t*
            +
            partition int|null int32_t
            +
            msgflags int|null int
            +
            rkmessages \FFI\CData|null rd_kafka_message_t*
            +
            message_cnt int|null int
            +
            Returns
            +
            int|null int - the number of messages succesfully enqueued for producing.
            +
            + +

            rd_kafka_flush()

            +
            public static rd_kafka_flush ( 
            +    \FFI\CData|null $rk, 
            +    int|null $timeout_ms
            + ): int
            +
            +

            Wait until all outstanding produce requests, et.al, are completed. This should typically be done prior to destroying a producer instance to make sure all queued and in-flight produce requests are completed before terminating.

            +
            Remarks
            This function will call rd_kafka_poll() and thus trigger callbacks.
            +
            +The linger.ms time will be ignored for the duration of the call, queued messages will be sent to the broker as soon as possible.
            +
            +If RD_KAFKA_EVENT_DR has been enabled (through rd_kafka_conf_set_events()) this function will not call rd_kafka_poll() but instead wait for the librdkafka-handled message count to reach zero. This requires the application to serve the event queue in a separate thread. In this mode only messages are counted, not other types of queued events.
            + +
            See also
            rd_kafka_outq_len()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            timeout_ms int|null int
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__TIMED_OUT if timeout_ms was reached before all outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR
            +
            + +

            rd_kafka_purge()

            +
            public static rd_kafka_purge ( 
            +    \FFI\CData|null $rk, 
            +    int|null $purge_flags
            + ): int
            +
            +

            Purge messages currently handled by the producer instance.

            + +

            The application will need to call rd_kafka_poll() or rd_kafka_flush() afterwards to serve the delivery report callbacks of the purged messages.

            +

            Messages purged from internal queues fail with the delivery report error code set to RD_KAFKA_RESP_ERR__PURGE_QUEUE, while purged messages that are in-flight to or from the broker will fail with the error code set to RD_KAFKA_RESP_ERR__PURGE_INFLIGHT.

            +
            Warning
            Purging messages that are in-flight to or from the broker will ignore any subsequent acknowledgement for these messages received from the broker, effectively making it impossible for the application to know if the messages were successfully produced or not. This may result in duplicate messages if the application retries these messages at a later time.
            +
            Remarks
            This call may block for a short time while background thread queues are purged.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            purge_flags int|null int - Tells which messages to purge and how.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, RD_KAFKA_RESP_ERR__INVALID_ARG if the purge flags are invalid or unknown, RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if called on a non-producer client instance.
            +
            + +

            rd_kafka_metadata()

            +
            public static rd_kafka_metadata ( 
            +    \FFI\CData|null $rk, 
            +    int|null $all_topics, 
            +    \FFI\CData|null $only_rkt, 
            +    \FFI\CData|null $metadatap, 
            +    int|null $timeout_ms
            + ): int
            +
            +

            Request Metadata from broker.

            +

            Parameters:

            +
              +
            • all_topics if non-zero: request info about all topics in cluster, if zero: only request info about locally known topics.
            • +
            • only_rkt only request info about this topic
            • +
            • metadatap pointer to hold metadata result. The *metadatap pointer must be released with rd_kafka_metadata_destroy().
            • +
            • timeout_ms maximum response time before failing.
            • +
            +
            Remarks
            Consumer: If all_topics is non-zero the Metadata response information may trigger a re-join if any subscribed topics have changed partition count or existence state.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            all_topics int|null int
            +
            only_rkt \FFI\CData|null rd_kafka_topic_t*
            +
            metadatap \FFI\CData|null const struct rd_kafka_metadata**
            +
            timeout_ms int|null int
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap) will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or other error code on error.
            +
            + +

            rd_kafka_metadata_destroy()

            +
            public static rd_kafka_metadata_destroy ( 
            +    \FFI\CData|\Closure $metadata
            + ): void
            +
            +
            +
            Parameters
            +
            metadata \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_metadata*)(rd_kafka_t*, int, rd_kafka_topic_t*, const struct rd_kafka_metadata**, int)
            +
            +

            rd_kafka_list_groups()

            +
            public static rd_kafka_list_groups ( 
            +    \FFI\CData|null $rk, 
            +    string|null $group, 
            +    \FFI\CData|null $grplistp, 
            +    int|null $timeout_ms
            + ): int
            +
            +

            List and describe client groups in cluster.

            +

            group is an optional group name to describe, otherwise (NULL) all groups are returned.

            +

            timeout_ms is the (approximate) maximum time to wait for response from brokers and must be a positive value.

            + +

            The grplistp remains untouched if any error code is returned, with the exception of RD_KAFKA_RESP_ERR__PARTIAL which behaves as RD_KAFKA_RESP_ERR__NO_ERROR (success) but with an incomplete group list.

            +
            See also
            Use rd_kafka_group_list_destroy() to release list memory.
            +
            Deprecated:
            Use rd_kafka_ListConsumerGroups() and rd_kafka_DescribeConsumerGroups() instead.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            group string|null const char*
            +
            grplistp \FFI\CData|null const struct rd_kafka_group_list**
            +
            timeout_ms int|null int
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR__NO_ERROR on success and grplistp is updated to point to a newly allocated list of groups. RD_KAFKA_RESP_ERR__PARTIAL if not all brokers responded in time but at least one group is returned in grplistlp. RD_KAFKA_RESP_ERR__TIMED_OUT if no groups were returned in the given timeframe but not all brokers have yet responded, or if the list of brokers in the cluster could not be obtained within the given timeframe. RD_KAFKA_RESP_ERR__TRANSPORT if no brokers were found. Other error codes may also be returned from the request layer.
            +
            + +

            rd_kafka_group_list_destroy()

            +
            public static rd_kafka_group_list_destroy ( 
            +    \FFI\CData|null $grplist
            + ): void
            +
            +
            +
            Parameters
            +
            grplist \FFI\CData|null const struct rd_kafka_group_list*
            +
            +

            rd_kafka_brokers_add()

            +
            public static rd_kafka_brokers_add ( 
            +    \FFI\CData|null $rk, 
            +    string|null $brokerlist
            + ): int|null
            +
            +

            Adds one or more brokers to the kafka handle's list of initial bootstrap brokers.

            +

            Additional brokers will be discovered automatically as soon as rdkafka connects to a broker by querying the broker metadata.

            +

            If a broker name resolves to multiple addresses (and possibly address families) all will be used for connection attempts in round-robin fashion.

            +

            brokerlist is a ,-separated list of brokers in the format: <broker1>,<broker2>,.. Where each broker is in either the host or URL based format: <host>[:<port>] <proto>://<host>[:port] <proto> is either PLAINTEXT, SSL, SASL, SASL_PLAINTEXT The two formats can be mixed but ultimately the value of the security.protocol config property decides what brokers are allowed.

            +

            Example: brokerlist = "broker1:10000,broker2" brokerlist = "SSL://broker3:9000,ssl://broker2"

            + +
            Remarks
            Brokers may also be defined with the metadata.broker.list or bootstrap.servers configuration property (preferred method).
            +
            Deprecated:
            Set bootstrap servers with the bootstrap.servers configuration property.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            brokerlist string|null const char*
            +
            Returns
            +
            int|null int - the number of brokers successfully added.
            +
            + +

            rd_kafka_set_logger()

            +
            public static rd_kafka_set_logger ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|\Closure $func
            + ): void
            +
            +

            Set logger function.

            +

            The default is to print to stderr, but a syslog logger is also available, see rd_kafka_log_(print|syslog) for the builtin alternatives. Alternatively the application may provide its own logger callback. Or pass 'func' as NULL to disable logging.

            +
            Deprecated:
            Use rd_kafka_conf_set_log_cb()
            +
            Remarks
            rk may be passed as NULL in the callback.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            func \FFI\CData|\Closure void()(const rd_kafka_t, int, const char*, const char*)
            +
            + +

            rd_kafka_set_log_level()

            +
            public static rd_kafka_set_log_level ( 
            +    \FFI\CData|null $rk, 
            +    int|null $level
            + ): void
            +
            +

            Specifies the maximum logging level emitted by internal kafka logging and debugging.

            +
            Deprecated:
            Set the "log_level" configuration property instead.
            +
            Remarks
            If the "debug" configuration property is set the log level is automatically adjusted to LOG_DEBUG (7).
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            level int|null int
            +
            + +

            rd_kafka_log_print()

            +
            public static rd_kafka_log_print ( 
            +    \FFI\CData|null $rk, 
            +    int|null $level, 
            +    string|null $fac, 
            +    string|null $buf
            + ): void
            +
            +
            +
            Parameters
            +
            rk \FFI\CData|null const rd_kafka_t*
            +
            level int|null int
            +
            fac string|null const char*
            +
            buf string|null const char*
            +
            +

            rd_kafka_log_syslog()

            +
            public static rd_kafka_log_syslog ( 
            +    \FFI\CData|null $rk, 
            +    int|null $level, 
            +    string|null $fac, 
            +    string|null $buf
            + ): void
            +
            +

            Builtin log sink: print to syslog.

            +
            Remarks
            This logger is only available if librdkafka was built with syslog support.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null const rd_kafka_t*
            +
            level int|null int
            +
            fac string|null const char*
            +
            buf string|null const char*
            +
            + +

            rd_kafka_outq_len()

            +
            public static rd_kafka_outq_len ( 
            +    \FFI\CData|null $rk
            + ): int|null
            +
            +

            Returns the current out queue length.

            +

            The out queue length is the sum of:

            +
              +
            • number of messages waiting to be sent to, or acknowledged by, the broker.
            • +
            • number of delivery reports (e.g., dr_msg_cb) waiting to be served by rd_kafka_poll() or rd_kafka_flush().
            • +
            • number of callbacks (e.g., error_cb, stats_cb, etc) waiting to be served by rd_kafka_poll(), rd_kafka_consumer_poll() or rd_kafka_flush().
            • +
            • number of events waiting to be served by background_event_cb() in the background queue (see rd_kafka_conf_set_background_event_cb).
            • +
            +

            An application should wait for the return value of this function to reach zero before terminating to make sure outstanding messages, requests (such as offset commits), callbacks and events are fully processed. See rd_kafka_flush().

            + +
            See also
            rd_kafka_flush()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            int|null int - number of messages and events waiting in queues.
            +
            + +

            rd_kafka_dump()

            +
            public static rd_kafka_dump ( 
            +    \FFI\CData|null $fp, 
            +    \FFI\CData|null $rk
            + ): void
            +
            +

            Dumps rdkafka's internal state for handle rk to stream fp.

            +

            This is only useful for debugging rdkafka, showing state and statistics for brokers, topics, partitions, etc.

            + +
            +
            Parameters
            +
            fp \FFI\CData|null FILE*
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            + +

            rd_kafka_thread_cnt()

            +
            public static rd_kafka_thread_cnt (  ): int|null
            +
            +

            Retrieve the current number of threads in use by librdkafka.

            +

            Used by regression tests.

            + +
            +
            Returns
            +
            int|null int - )
            +
            + +

            rd_kafka_wait_destroyed()

            +
            public static rd_kafka_wait_destroyed ( 
            +    int|null $timeout_ms
            + ): int|null
            +
            +

            Wait for all rd_kafka_t objects to be destroyed.

            +

            Returns 0 if all kafka objects are now destroyed, or -1 if the timeout was reached.

            +
            Remarks
            This function is deprecated.
            + +
            +
            Parameters
            +
            timeout_ms int|null int - )
            +
            Returns
            +
            int|null int
            +
            + +

            rd_kafka_unittest()

            +
            public static rd_kafka_unittest (  ): int|null
            +
            +

            Run librdkafka's built-in unit-tests.

            + +
            +
            Returns
            +
            int|null int - ) - the number of failures, or 0 if all tests passed.
            +
            + +

            rd_kafka_poll_set_consumer()

            +
            public static rd_kafka_poll_set_consumer ( 
            +    \FFI\CData|null $rk
            + ): int
            +
            +

            Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's queue (rd_kafka_consumer_poll()).

            +
            Warning
            It is not permitted to call rd_kafka_poll() after directing the main queue with rd_kafka_poll_set_consumer().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            + +

            rd_kafka_event_type()

            +
            public static rd_kafka_event_type ( 
            +    \FFI\CData|null $rkev
            + ): int|null
            +
            +
            Remarks
            As a convenience it is okay to pass rkev as NULL in which case RD_KAFKA_EVENT_NONE is returned.
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null const rd_kafka_event_t* - )
            +
            Returns
            +
            int|null rd_kafka_event_type_t - the event type for the given event.
            +
            + +

            rd_kafka_event_name()

            +
            public static rd_kafka_event_name ( 
            +    \FFI\CData|null $rkev
            + ): string|null
            +
            +
            Remarks
            As a convenience it is okay to pass rkev as NULL in which case the name for RD_KAFKA_EVENT_NONE is returned.
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null const rd_kafka_event_t* - )
            +
            Returns
            +
            string|null const char* - the event type’s name for the given event.
            +
            + +

            rd_kafka_event_destroy()

            +
            public static rd_kafka_event_destroy ( 
            +    \FFI\CData|null $rkev
            + ): void
            +
            +

            Destroy an event.

            +
            Remarks
            Any references to this event, such as extracted messages, will not be usable after this call.
            +
            +As a convenience it is okay to pass rkev as NULL in which case no action is performed.
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            + +

            rd_kafka_event_message_next()

            +
            public static rd_kafka_event_message_next ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Call repeatedly until it returns NULL.

            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_FETCH (1 message)
            • +
            • RD_KAFKA_EVENT_DR (>=1 message(s))
            • +
            +
            Remarks
            The returned message(s) MUST NOT be freed with rd_kafka_message_destroy().
            +
            +on_consume() interceptor may be called from this function prior to passing message to application.
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_message_t* - the next message from an event.
            +
            + +

            rd_kafka_event_message_array()

            +
            public static rd_kafka_event_message_array ( 
            +    \FFI\CData|null $rkev, 
            +    \FFI\CData|null $rkmessages, 
            +    int|null $size
            + ): int|null
            +
            +

            Extacts size message(s) from the event into the pre-allocated array rkmessages.

            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_FETCH (1 message)
            • +
            • RD_KAFKA_EVENT_DR (>=1 message(s))
            • +
            + +
            Remarks
            on_consume() interceptor may be called from this function prior to passing message to application.
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t*
            +
            rkmessages \FFI\CData|null const rd_kafka_message_t**
            +
            size int|null size_t
            +
            Returns
            +
            int|null size_t - the number of messages extracted.
            +
            + +

            rd_kafka_event_message_count()

            +
            public static rd_kafka_event_message_count ( 
            +    \FFI\CData|null $rkev
            + ): int|null
            +
            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_FETCH (1 message)
            • +
            • RD_KAFKA_EVENT_DR (>=1 message(s))
            • +
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            int|null size_t - the number of remaining messages in the event.
            +
            + +

            rd_kafka_event_error()

            +
            public static rd_kafka_event_error ( 
            +    \FFI\CData|null $rkev
            + ): int
            +
            +

            Use rd_kafka_event_error_is_fatal() to detect if this is a fatal error.

            +

            Event types:

            +
              +
            • all
            • +
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            int rd_kafka_resp_err_t - the error code for the event.
            +
            + +

            rd_kafka_event_error_string()

            +
            public static rd_kafka_event_error_string ( 
            +    \FFI\CData|null $rkev
            + ): string|null
            +
            +

            Event types:

            +
              +
            • all
            • +
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            string|null const char* - the error string (if any). An application should check that rd_kafka_event_error() returns non-zero before calling this function.
            +
            + +

            rd_kafka_event_error_is_fatal()

            +
            public static rd_kafka_event_error_is_fatal ( 
            +    \FFI\CData|null $rkev
            + ): int|null
            +
            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_ERROR
            • +
            +
            See also
            rd_kafka_fatal_error()
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            int|null int - 1 if the error is a fatal error, else 0.
            +
            + +

            rd_kafka_event_opaque()

            +
            public static rd_kafka_event_opaque ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|object|string|null
            +
            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_OFFSET_COMMIT
            • +
            • RD_KAFKA_EVENT_CREATETOPICS_RESULT
            • +
            • RD_KAFKA_EVENT_DELETETOPICS_RESULT
            • +
            • RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
            • +
            • RD_KAFKA_EVENT_CREATEACLS_RESULT
            • +
            • RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
            • +
            • RD_KAFKA_EVENT_DELETEACLS_RESULT
            • +
            • RD_KAFKA_EVENT_ALTERCONFIGS_RESULT
            • +
            • RD_KAFKA_EVENT_INCREMENTAL_ALTERCONFIGS_RESULT
            • +
            • RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT
            • +
            • RD_KAFKA_EVENT_DELETEGROUPS_RESULT
            • +
            • RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
            • +
            • RD_KAFKA_EVENT_DELETERECORDS_RESULT
            • +
            • RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
            • +
            • RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
            • +
            • RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
            • +
            • RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
            • +
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|object|string|null void* - the event opaque (if any) as passed to rd_kafka_commit() (et.al) or rd_kafka_AdminOptions_set_opaque(), depending on event type.
            +
            + +

            rd_kafka_event_log()

            +
            public static rd_kafka_event_log ( 
            +    \FFI\CData|null $rkev, 
            +    \FFI\CData|null $fac, 
            +    \FFI\CData|null $str, 
            +    \FFI\CData|null $level
            + ): int|null
            +
            +

            Extract log message from the event.

            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_LOG
            • +
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t*
            +
            fac \FFI\CData|null const char**
            +
            str \FFI\CData|null const char**
            +
            level \FFI\CData|null int*
            +
            Returns
            +
            int|null int - 0 on success or -1 if unsupported event type.
            +
            + +

            rd_kafka_event_stats()

            +
            public static rd_kafka_event_stats ( 
            +    \FFI\CData|null $rkev
            + ): string|null
            +
            +

            Extract stats from the event.

            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_STATS
            • +
            + +
            Remarks
            the returned string will be freed automatically along with the event object
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            string|null const char* - stats json string.
            +
            + +

            rd_kafka_event_topic_partition_list()

            +
            public static rd_kafka_event_topic_partition_list ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +
            Remarks
            The list MUST NOT be freed with rd_kafka_topic_partition_list_destroy()
            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_REBALANCE
            • +
            • RD_KAFKA_EVENT_OFFSET_COMMIT
            • +
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null rd_kafka_topic_partition_list_t* - the topic partition list from the event.
            +
            + +

            rd_kafka_event_topic_partition()

            +
            public static rd_kafka_event_topic_partition ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +
            Remarks
            The returned pointer MUST be freed with rd_kafka_topic_partition_destroy().
            +

            Event types: RD_KAFKA_EVENT_ERROR (for partition level errors)

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null rd_kafka_topic_partition_t* - a newly allocated topic_partition container, if applicable for the event type, else NULL.
            +
            + +

            rd_kafka_event_CreateTopics_result()

            +
            public static rd_kafka_event_CreateTopics_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get CreateTopics result.

            + +

            Event types: RD_KAFKA_EVENT_CREATETOPICS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_CreateTopics_result_t* - the result of a CreateTopics request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_DeleteTopics_result()

            +
            public static rd_kafka_event_DeleteTopics_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get DeleteTopics result.

            + +

            Event types: RD_KAFKA_EVENT_DELETETOPICS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DeleteTopics_result_t* - the result of a DeleteTopics request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_CreatePartitions_result()

            +
            public static rd_kafka_event_CreatePartitions_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get CreatePartitions result.

            + +

            Event types: RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_CreatePartitions_result_t* - the result of a CreatePartitions request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_AlterConfigs_result()

            +
            public static rd_kafka_event_AlterConfigs_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get AlterConfigs result.

            + +

            Event types: RD_KAFKA_EVENT_ALTERCONFIGS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_AlterConfigs_result_t* - the result of a AlterConfigs request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_DescribeConfigs_result()

            +
            public static rd_kafka_event_DescribeConfigs_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get DescribeConfigs result.

            + +

            Event types: RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DescribeConfigs_result_t* - the result of a DescribeConfigs request, or NULL if event is of different type.
            +
            + +

            rd_kafka_queue_poll()

            +
            public static rd_kafka_queue_poll ( 
            +    \FFI\CData|null $rkqu, 
            +    int|null $timeout_ms
            + ): \FFI\CData|null
            +
            +

            Poll a queue for an event for max timeout_ms.

            + +
            Remarks
            Use rd_kafka_event_destroy() to free the event.
            +
            See also
            rd_kafka_conf_set_background_event_cb()
            + +
            +
            Parameters
            +
            rkqu \FFI\CData|null rd_kafka_queue_t*
            +
            timeout_ms int|null int
            +
            Returns
            +
            \FFI\CData|null rd_kafka_event_t* - an event, or NULL.
            +
            + +

            rd_kafka_queue_poll_callback()

            +
            public static rd_kafka_queue_poll_callback ( 
            +    \FFI\CData|null $rkqu, 
            +    int|null $timeout_ms
            + ): int|null
            +
            +

            Poll a queue for events served through callbacks for max timeout_ms.

            + +
            Remarks
            This API must only be used for queues with callbacks registered for all expected event types. E.g., not a message queue.
            +
            +Also see rd_kafka_conf_set_background_event_cb() for triggering event callbacks from a librdkafka-managed background thread.
            +
            See also
            rd_kafka_conf_set_background_event_cb()
            + +
            +
            Parameters
            +
            rkqu \FFI\CData|null rd_kafka_queue_t*
            +
            timeout_ms int|null int
            +
            Returns
            +
            int|null int - the number of events served.
            +
            + +

            rd_kafka_plugin_f_conf_init_t()

            +
            public static rd_kafka_plugin_f_conf_init_t ( 
            +    \FFI\CData|null $conf, 
            +    \FFI\CData|object|string|null $plug_opaquep, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Plugin's configuration initializer method called each time the library is referenced from configuration (even if previously loaded by another client instance).

            +
            Remarks
            This method MUST be implemented by plugins and have the symbol name conf_init
            + +
            Remarks
            A plugin may add an on_conf_destroy() interceptor to clean up plugin-specific resources created in the plugin's conf_init() method.
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - Configuration set up to this point.
            +
            plug_opaquep \FFI\CData|object|string|null void** - Plugin can set this pointer to a per-configuration opaque pointer.
            +
            errstr \FFI\CData|null char* - String buffer of size errstr_size where plugin must write a human readable error string in the case the initializer fails (returns non-zero). - Maximum space (including \0) in errstr.
            +
            errstr_size int|null size_t
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
            +
            + +

            rd_kafka_interceptor_f_on_conf_set_t()

            +
            public static rd_kafka_interceptor_f_on_conf_set_t ( 
            +    \FFI\CData|null $conf, 
            +    string|null $name, 
            +    string|null $val, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_conf_set() is called from rd_kafka_*_conf_set() in the order the interceptors were added.

            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
            +
            name string|null const char* - The configuration property to set.
            +
            val string|null const char* - The configuration value to set, or NULL for reverting to default in which case the previous value should be freed.
            +
            errstr \FFI\CData|null char* - A human readable error string in case the interceptor fails.
            +
            errstr_size int|null size_t - Maximum space (including \0) in errstr.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK if the property was known and successfully handled by the interceptor, RD_KAFKA_CONF_INVALID if the property was handled by the interceptor but the value was invalid, or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle this property, in which case the property is passed on on the interceptor in the chain, finally ending up at the built-in configuration handler.
            +
            + +

            rd_kafka_interceptor_f_on_conf_dup_t()

            +
            public static rd_kafka_interceptor_f_on_conf_dup_t ( 
            +    \FFI\CData|null $new_conf, 
            +    \FFI\CData|null $old_conf, 
            +    int|null $filter_cnt, 
            +    \FFI\CData|null $filter, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_conf_dup() is called from rd_kafka_conf_dup() in the order the interceptors were added and is used to let an interceptor re-register its conf interecptors with a new opaque value. The on_conf_dup() method is called prior to the configuration from old_conf being copied to new_conf.

            + +
            Remarks
            No on_conf_* interceptors are copied to the new configuration object on rd_kafka_conf_dup().
            + +
            +
            Parameters
            +
            new_conf \FFI\CData|null rd_kafka_conf_t* - New configuration object.
            +
            old_conf \FFI\CData|null const rd_kafka_conf_t* - Old configuration object to copy properties from.
            +
            filter_cnt int|null size_t - Number of property names to filter in filter. - Property names to filter out (ignore) when setting up new_conf.
            +
            filter \FFI\CData|null const char**
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure (which is logged but otherwise ignored).
            +
            + +

            rd_kafka_interceptor_f_on_conf_destroy_t()

            +
            public static rd_kafka_interceptor_f_on_conf_destroy_t ( 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_conf_destroy() is called from rd_kafka_*_conf_destroy() in the order the interceptors were added.

            + +
            +
            Parameters
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            + +

            rd_kafka_interceptor_f_on_new_t()

            +
            public static rd_kafka_interceptor_f_on_new_t ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $conf, 
            +    \FFI\CData|object|string|null $ic_opaque, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            on_new() is called from rd_kafka_new() prior toreturning the newly created client instance to the application.

            + +
            Warning
            The rk client instance will not be fully set up when this interceptor is called and the interceptor MUST NOT call any other rk-specific APIs than rd_kafka_interceptor_add..().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            conf \FFI\CData|null const rd_kafka_conf_t* - The client instance’s final configuration.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            errstr \FFI\CData|null char* - A human readable error string in case the interceptor fails.
            +
            errstr_size int|null size_t - Maximum space (including \0) in errstr.
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_f_on_destroy_t()

            +
            public static rd_kafka_interceptor_f_on_destroy_t ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_destroy() is called from rd_kafka_destroy() or (rd_kafka_new() if rd_kafka_new() fails during initialization).

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            + +

            rd_kafka_interceptor_f_on_send_t()

            +
            public static rd_kafka_interceptor_f_on_send_t ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $rkmessage, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_send() is called from rd_kafka_produce*() (et.al) prior to the partitioner being called.

            + +
            Remarks
            This interceptor is only used by producer instances.
            +
            +The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
            +
            +If the partitioner fails or an unknown partition was specified, the on_acknowledgement() interceptor chain will be called from within the rd_kafka_produce*() call to maintain send-acknowledgement symmetry.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            rkmessage \FFI\CData|null rd_kafka_message_t* - The message being produced. Immutable.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_f_on_acknowledgement_t()

            +
            public static rd_kafka_interceptor_f_on_acknowledgement_t ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $rkmessage, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_acknowledgement() is called to inform interceptors that a message was succesfully delivered or permanently failed delivery. The interceptor chain is called from internal librdkafka background threads, or rd_kafka_produce*() if the partitioner failed.

            + +
            Remarks
            This interceptor is only used by producer instances.
            +
            +The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
            +
            Warning
            The on_acknowledgement() method may be called from internal librdkafka threads. An on_acknowledgement() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            rkmessage \FFI\CData|null rd_kafka_message_t* - The message being produced. Immutable.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_f_on_consume_t()

            +
            public static rd_kafka_interceptor_f_on_consume_t ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $rkmessage, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_consume() is called just prior to passing the message to the application in rd_kafka_consumer_poll(), rd_kafka_consume*(), the event interface, etc.

            + +
            Remarks
            This interceptor is only used by consumer instances.
            +
            +The rkmessage object is NOT mutable and MUST NOT be modified by the interceptor.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            rkmessage \FFI\CData|null rd_kafka_message_t* - The message being consumed. Immutable.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_f_on_commit_t()

            +
            public static rd_kafka_interceptor_f_on_commit_t ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $offsets, 
            +    int $err, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_commit() is called on completed or failed offset commit. It is called from internal librdkafka threads.

            + +
            Remarks
            This interceptor is only used by consumer instances.
            +
            Warning
            The on_commit() interceptor is called from internal librdkafka threads. An on_commit() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            offsets \FFI\CData|null const rd_kafka_topic_partition_list_t* - List of topic+partition+offset+error that were committed. The error message of each partition should be checked for error.
            +
            err int rd_kafka_resp_err_t - The commit error, if any.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_f_on_request_sent_t()

            +
            public static rd_kafka_interceptor_f_on_request_sent_t ( 
            +    \FFI\CData|null $rk, 
            +    int|null $sockfd, 
            +    string|null $brokername, 
            +    int|null $brokerid, 
            +    int|null $ApiKey, 
            +    int|null $ApiVersion, 
            +    int|null $CorrId, 
            +    int|null $size, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_request_sent() is called when a request has been fully written to a broker TCP connections socket.

            + +
            Warning
            The on_request_sent() interceptor is called from internal librdkafka broker threads. An on_request_sent() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            sockfd int|null int - Socket file descriptor.
            +
            brokername string|null const char* - Broker request is being sent to.
            +
            brokerid int|null int32_t - Broker request is being sent to.
            +
            ApiKey int|null int16_t - Kafka protocol request type.
            +
            ApiVersion int|null int16_t - Kafka protocol request type version.
            +
            CorrId int|null int32_t - Kafka protocol request correlation id.
            +
            size int|null size_t - Size of request.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_conf_interceptor_add_on_conf_set()

            +
            public static rd_kafka_conf_interceptor_add_on_conf_set ( 
            +    \FFI\CData|null $conf, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_conf_set, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_conf_set() interceptor.

            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_conf_set \FFI\CData|\Closure rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t*)(rd_kafka_conf_t*, const char*, const char*, char*, size_t, void*) - Function pointer.
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_conf_interceptor_add_on_conf_dup()

            +
            public static rd_kafka_conf_interceptor_add_on_conf_dup ( 
            +    \FFI\CData|null $conf, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_conf_dup, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_conf_dup() interceptor.

            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_conf_dup \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t*)(rd_kafka_conf_t*, const rd_kafka_conf_t*, size_t, const char**, void*) - Function pointer.
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_conf_interceptor_add_on_conf_destroy()

            +
            public static rd_kafka_conf_interceptor_add_on_conf_destroy ( 
            +    \FFI\CData|null $conf, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_conf_destroy, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_conf_destroy() interceptor.

            + +
            Remarks
            Multiple on_conf_destroy() interceptors are allowed to be added to the same configuration object.
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_conf_destroy \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t*)(void*) - Function pointer.
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR
            +
            + +

            rd_kafka_conf_interceptor_add_on_new()

            +
            public static rd_kafka_conf_interceptor_add_on_new ( 
            +    \FFI\CData|null $conf, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_new, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_new() interceptor.

            + +
            Remarks
            Since the on_new() interceptor is added to the configuration object it may be copied by rd_kafka_conf_dup(). An interceptor implementation must thus be able to handle the same interceptor,ic_opaque tuple to be used by multiple client instances.
            +
            +An interceptor plugin should check the return value to make sure it has not already been added.
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_new \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t*)(rd_kafka_t*, const rd_kafka_conf_t*, void*, char*, size_t) - Function pointer.
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_interceptor_add_on_destroy()

            +
            public static rd_kafka_interceptor_add_on_destroy ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_destroy, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_destroy() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_destroy \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_destroy_t*)(rd_kafka_t*, void*) - Function pointer.
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_interceptor_add_on_send()

            +
            public static rd_kafka_interceptor_add_on_send ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_send, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_send() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_send \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer.
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing intercepted with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_interceptor_add_on_acknowledgement()

            +
            public static rd_kafka_interceptor_add_on_acknowledgement ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_acknowledgement, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_acknowledgement() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_acknowledgement \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer.
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_interceptor_add_on_consume()

            +
            public static rd_kafka_interceptor_add_on_consume ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_consume, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_consume() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_consume \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t*)(rd_kafka_t*, rd_kafka_message_t*, void*) - Function pointer.
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_interceptor_add_on_commit()

            +
            public static rd_kafka_interceptor_add_on_commit ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_commit, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_commit() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_commit \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t*)(rd_kafka_t*, const rd_kafka_topic_partition_list_t*, rd_kafka_resp_err_t, void*)
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_interceptor_add_on_request_sent()

            +
            public static rd_kafka_interceptor_add_on_request_sent ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_request_sent, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_request_sent() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_request_sent \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t*)(rd_kafka_t*, int, const char*, int32_t, int16_t, int16_t, int32_t, size_t, void*)
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_topic_result_error()

            +
            public static rd_kafka_topic_result_error ( 
            +    \FFI\CData|null $topicres
            + ): int
            +
            +

            Topic result provides per-topic operation result information.

            + +
            +
            Parameters
            +
            topicres \FFI\CData|null const rd_kafka_topic_result_t* - )
            +
            Returns
            +
            int rd_kafka_resp_err_t - the error code for the given topic result.
            +
            + +

            rd_kafka_topic_result_error_string()

            +
            public static rd_kafka_topic_result_error_string ( 
            +    \FFI\CData|null $topicres
            + ): string|null
            +
            +
            Remarks
            lifetime of the returned string is the same as the topicres.
            + +
            +
            Parameters
            +
            topicres \FFI\CData|null const rd_kafka_topic_result_t* - )
            +
            Returns
            +
            string|null const char* - the human readable error string for the given topic result, or NULL if there was no error.
            +
            + +

            rd_kafka_topic_result_name()

            +
            public static rd_kafka_topic_result_name ( 
            +    \FFI\CData|null $topicres
            + ): string|null
            +
            +
            Remarks
            lifetime of the returned string is the same as the topicres.
            + +
            +
            Parameters
            +
            topicres \FFI\CData|null const rd_kafka_topic_result_t* - )
            +
            Returns
            +
            string|null const char* - the name of the topic for the given topic result.
            +
            + +

            rd_kafka_AdminOptions_new()

            +
            public static rd_kafka_AdminOptions_new ( 
            +    \FFI\CData|null $rk, 
            +    int $for_api
            + ): \FFI\CData|null
            +
            +

            Create a new AdminOptions object.

            +
               The options object is not modified by the Admin API request APIs,
            +   (e.g. CreateTopics) and may be reused for multiple calls.
            +
            +
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            for_api int rd_kafka_admin_op_t - Specifies what Admin API this AdminOptions object will be used for, which will enforce what AdminOptions_set_..() calls may be used based on the API, causing unsupported set..() calls to fail. Specifying RD_KAFKA_ADMIN_OP_ANY disables the enforcement allowing any option to be set, even if the option is not used in a future call to an Admin API method.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_AdminOptions_t* - a new AdminOptions object (which must be freed with rd_kafka_AdminOptions_destroy()), or NULL if for_api was set to an unknown API op type.
            +
            + +

            rd_kafka_AdminOptions_destroy()

            +
            public static rd_kafka_AdminOptions_destroy ( 
            +    \FFI\CData|null $options
            + ): void
            +
            +
            +
            Parameters
            +
            options \FFI\CData|null rd_kafka_AdminOptions_t*
            +
            +

            rd_kafka_AdminOptions_set_request_timeout()

            +
            public static rd_kafka_AdminOptions_set_request_timeout ( 
            +    \FFI\CData|null $options, 
            +    int|null $timeout_ms, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Sets the overall request timeout, including broker lookup, request transmission, operation time on broker, and response.

            + +
            Remarks
            This option is valid for all Admin API requests.
            + +
            +
            Parameters
            +
            options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
            +
            timeout_ms int|null int - Timeout in milliseconds. Defaults to socket.timeout.ms.
            +
            errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
            +
            errstr_size int|null size_t
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which case an error string will be written errstr.
            +
            + +

            rd_kafka_AdminOptions_set_operation_timeout()

            +
            public static rd_kafka_AdminOptions_set_operation_timeout ( 
            +    \FFI\CData|null $options, 
            +    int|null $timeout_ms, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Sets the broker's operation timeout, such as the timeout for CreateTopics to complete the creation of topics on the controller before returning a result to the application.

            +

            CreateTopics: values <= 0 will return immediately after triggering topic creation, while > 0 will wait this long for topic creation to propagate in cluster. Default: 60 seconds.

            +

            DeleteTopics: same semantics as CreateTopics. CreatePartitions: same semantics as CreateTopics.

            + +
            Remarks
            This option is valid for CreateTopics, DeleteTopics, CreatePartitions, and DeleteRecords.
            + +
            +
            Parameters
            +
            options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
            +
            timeout_ms int|null int - Timeout in milliseconds.
            +
            errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
            +
            errstr_size int|null size_t
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which case an error string will be written errstr.
            +
            + +

            rd_kafka_AdminOptions_set_validate_only()

            +
            public static rd_kafka_AdminOptions_set_validate_only ( 
            +    \FFI\CData|null $options, 
            +    int|null $true_or_false, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Tell broker to only validate the request, without performing the requested operation (create topics, etc).

            + +
            Remarks
            This option is valid for CreateTopics, CreatePartitions, AlterConfigs.
            + +
            +
            Parameters
            +
            options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
            +
            true_or_false int|null int - Defaults to false.
            +
            errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
            +
            errstr_size int|null size_t
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure in which case an error string will be written errstr.
            +
            + +

            rd_kafka_AdminOptions_set_broker()

            +
            public static rd_kafka_AdminOptions_set_broker ( 
            +    \FFI\CData|null $options, 
            +    int|null $broker_id, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Override what broker the Admin request will be sent to.

            +

            By default, Admin requests are sent to the controller broker, with the following exceptions:

            +
              +
            • AlterConfigs with a BROKER resource are sent to the broker id set as the resource name.
            • +
            • IncrementalAlterConfigs with a BROKER resource are sent to the broker id set as the resource name.
            • +
            • DescribeConfigs with a BROKER resource are sent to the broker id set as the resource name.
            • +
            + +
            Remarks
            This API should typically not be used, but serves as a workaround if new resource types are to the broker that the client does not know where to send.
            + +
            +
            Parameters
            +
            options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin Options.
            +
            broker_id int|null int32_t - The broker to send the request to.
            +
            errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
            +
            errstr_size int|null size_t
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure in which case an error string will be written errstr.
            +
            + +

            rd_kafka_AdminOptions_set_opaque()

            +
            public static rd_kafka_AdminOptions_set_opaque ( 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|object|string|null $opaque
            + ): void
            +
            +
            +
            Parameters
            +
            options \FFI\CData|null rd_kafka_AdminOptions_t*
            +
            opaque \FFI\CData|object|string|null void*
            +
            +

            rd_kafka_NewTopic_new()

            +
            public static rd_kafka_NewTopic_new ( 
            +    string|null $topic, 
            +    int|null $num_partitions, 
            +    int|null $replication_factor, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): \FFI\CData|null
            +
            +

            Create a new NewTopic object. This object is later passed to rd_kafka_CreateTopics().

            + +
            +
            Parameters
            +
            topic string|null const char* - Topic name to create.
            +
            num_partitions int|null int - Number of partitions in topic, or -1 to use the broker’s default partition count (>= 2.4.0).
            +
            replication_factor int|null int - Default replication factor for the topic’s partitions, or -1 to use the broker’s default replication factor (>= 2.4.0) or if set_replica_assignment() will be used.
            +
            errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
            +
            errstr_size int|null size_t
            +
            Returns
            +
            \FFI\CData|null rd_kafka_NewTopic_t* - a new allocated NewTopic object, or NULL if the input parameters are invalid. Use rd_kafka_NewTopic_destroy() to free object when done.
            +
            + +

            rd_kafka_NewTopic_destroy()

            +
            public static rd_kafka_NewTopic_destroy ( 
            +    \FFI\CData|null $new_topic
            + ): void
            +
            +
            +
            Parameters
            +
            new_topic \FFI\CData|null rd_kafka_NewTopic_t*
            +
            +

            rd_kafka_NewTopic_destroy_array()

            +
            public static rd_kafka_NewTopic_destroy_array ( 
            +    \FFI\CData|null $new_topics, 
            +    int|null $new_topic_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            new_topics \FFI\CData|null rd_kafka_NewTopic_t**
            +
            new_topic_cnt int|null size_t
            +
            +

            rd_kafka_NewTopic_set_replica_assignment()

            +
            public static rd_kafka_NewTopic_set_replica_assignment ( 
            +    \FFI\CData|null $new_topic, 
            +    int|null $partition, 
            +    \FFI\CData|null $broker_ids, 
            +    int|null $broker_id_cnt, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Set the replica (broker) assignment for partition to the replica set in broker_ids (of broker_id_cnt elements).

            +
            Remarks
            When this method is used, rd_kafka_NewTopic_new() must have been called with a replication_factor of -1.
            +
            +An application must either set the replica assignment for all new partitions, or none.
            +
            +If called, this function must be called consecutively for each partition, starting at 0.
            +
            +Use rd_kafka_metadata() to retrieve the list of brokers in the cluster.
            + +
            See also
            rd_kafka_AdminOptions_set_validate_only()
            + +
            +
            Parameters
            +
            new_topic \FFI\CData|null rd_kafka_NewTopic_t*
            +
            partition int|null int32_t
            +
            broker_ids \FFI\CData|null int32_t*
            +
            broker_id_cnt int|null size_t
            +
            errstr \FFI\CData|null char*
            +
            errstr_size int|null size_t
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid.
            +
            + +

            rd_kafka_NewTopic_set_config()

            +
            public static rd_kafka_NewTopic_set_config ( 
            +    \FFI\CData|null $new_topic, 
            +    string|null $name, 
            +    string|null $value
            + ): int
            +
            +

            Set (broker-side) topic configuration name/value pair.

            +
            Remarks
            The name and value are not validated by the client, the validation takes place on the broker.
            + +
            See also
            rd_kafka_AdminOptions_set_validate_only()
            +
            +http://kafka.apache.org/documentation.html#topicconfigs
            + +
            +
            Parameters
            +
            new_topic \FFI\CData|null rd_kafka_NewTopic_t*
            +
            name string|null const char*
            +
            value string|null const char*
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid.
            +
            + +

            rd_kafka_CreateTopics()

            +
            public static rd_kafka_CreateTopics ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $new_topics, 
            +    int|null $new_topic_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Create topics in cluster as specified by the new_topics array of size new_topic_cnt elements.

            + +

            Supported admin options:

            +
              +
            • rd_kafka_AdminOptions_set_validate_only() - default false
            • +
            • rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds
            • +
            • rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
            • +
            +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATETOPICS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            new_topics \FFI\CData|null rd_kafka_NewTopic_t** - Array of new topics to create.
            +
            new_topic_cnt int|null size_t - Number of elements in new_topics array.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_CreateTopics_result_topics()

            +
            public static rd_kafka_CreateTopics_result_topics ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of topic results from a CreateTopics result.

            +

            The returned topics life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_CreateTopics_result_t* - Result to get topics from.
            +
            cntp \FFI\CData|null size_t* - Updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_topic_result_t**
            +
            + +

            rd_kafka_DeleteTopic_new()

            +
            public static rd_kafka_DeleteTopic_new ( 
            +    string|null $topic
            + ): \FFI\CData|null
            +
            +

            Create a new DeleteTopic object. This object is later passed to rd_kafka_DeleteTopics().

            + +
            +
            Parameters
            +
            topic string|null const char* - ) - Topic name to delete.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_DeleteTopic_t* - a new allocated DeleteTopic object. Use rd_kafka_DeleteTopic_destroy() to free object when done.
            +
            + +

            rd_kafka_DeleteTopic_destroy()

            +
            public static rd_kafka_DeleteTopic_destroy ( 
            +    \FFI\CData|null $del_topic
            + ): void
            +
            +
            +
            Parameters
            +
            del_topic \FFI\CData|null rd_kafka_DeleteTopic_t*
            +
            +

            rd_kafka_DeleteTopic_destroy_array()

            +
            public static rd_kafka_DeleteTopic_destroy_array ( 
            +    \FFI\CData|null $del_topics, 
            +    int|null $del_topic_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            del_topics \FFI\CData|null rd_kafka_DeleteTopic_t**
            +
            del_topic_cnt int|null size_t
            +
            +

            rd_kafka_DeleteTopics()

            +
            public static rd_kafka_DeleteTopics ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $del_topics, 
            +    int|null $del_topic_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Delete topics from cluster as specified by the topics array of size topic_cnt elements.

            + +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETETOPICS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            del_topics \FFI\CData|null rd_kafka_DeleteTopic_t** - Array of topics to delete.
            +
            del_topic_cnt int|null size_t - Number of elements in topics array.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_DeleteTopics_result_topics()

            +
            public static rd_kafka_DeleteTopics_result_topics ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of topic results from a DeleteTopics result.

            +

            The returned topics life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_DeleteTopics_result_t* - Result to get topic results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_topic_result_t**
            +
            + +

            rd_kafka_NewPartitions_new()

            +
            public static rd_kafka_NewPartitions_new ( 
            +    string|null $topic, 
            +    int|null $new_total_cnt, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): \FFI\CData|null
            +
            +

            Create a new NewPartitions. This object is later passed to rd_kafka_CreatePartitions() to increase the number of partitions to new_total_cnt for an existing topic.

            + +
            +
            Parameters
            +
            topic string|null const char* - Topic name to create more partitions for.
            +
            new_total_cnt int|null size_t - Increase the topic’s partition count to this value.
            +
            errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
            +
            errstr_size int|null size_t
            +
            Returns
            +
            \FFI\CData|null rd_kafka_NewPartitions_t* - a new allocated NewPartitions object, or NULL if the input parameters are invalid. Use rd_kafka_NewPartitions_destroy() to free object when done.
            +
            + +

            rd_kafka_NewPartitions_destroy()

            +
            public static rd_kafka_NewPartitions_destroy ( 
            +    \FFI\CData|null $new_parts
            + ): void
            +
            +
            +
            Parameters
            +
            new_parts \FFI\CData|null rd_kafka_NewPartitions_t*
            +
            +

            rd_kafka_NewPartitions_destroy_array()

            +
            public static rd_kafka_NewPartitions_destroy_array ( 
            +    \FFI\CData|null $new_parts, 
            +    int|null $new_parts_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            new_parts \FFI\CData|null rd_kafka_NewPartitions_t**
            +
            new_parts_cnt int|null size_t
            +
            +

            rd_kafka_NewPartitions_set_replica_assignment()

            +
            public static rd_kafka_NewPartitions_set_replica_assignment ( 
            +    \FFI\CData|null $new_parts, 
            +    int|null $new_partition_idx, 
            +    \FFI\CData|null $broker_ids, 
            +    int|null $broker_id_cnt, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Set the replica (broker id) assignment for new_partition_idx to the replica set in broker_ids (of broker_id_cnt elements).

            +
            Remarks
            An application must either set the replica assignment for all new partitions, or none.
            +
            +If called, this function must be called consecutively for each new partition being created, where new_partition_idx 0 is the first new partition, 1 is the second, and so on.
            +
            +broker_id_cnt should match the topic's replication factor.
            +
            +Use rd_kafka_metadata() to retrieve the list of brokers in the cluster.
            + +
            See also
            rd_kafka_AdminOptions_set_validate_only()
            + +
            +
            Parameters
            +
            new_parts \FFI\CData|null rd_kafka_NewPartitions_t*
            +
            new_partition_idx int|null int32_t
            +
            broker_ids \FFI\CData|null int32_t*
            +
            broker_id_cnt int|null size_t
            +
            errstr \FFI\CData|null char*
            +
            errstr_size int|null size_t
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code if the arguments were invalid.
            +
            + +

            rd_kafka_CreatePartitions()

            +
            public static rd_kafka_CreatePartitions ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $new_parts, 
            +    int|null $new_parts_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Create additional partitions for the given topics, as specified by the new_parts array of size new_parts_cnt elements.

            + +

            Supported admin options:

            +
              +
            • rd_kafka_AdminOptions_set_validate_only() - default false
            • +
            • rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds
            • +
            • rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
            • +
            +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            new_parts \FFI\CData|null rd_kafka_NewPartitions_t** - Array of topics for which new partitions are to be created.
            +
            new_parts_cnt int|null size_t - Number of elements in new_parts array.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_CreatePartitions_result_topics()

            +
            public static rd_kafka_CreatePartitions_result_topics ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of topic results from a CreatePartitions result.

            +

            The returned topics life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_CreatePartitions_result_t* - Result o get topic results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_topic_result_t**
            +
            + +

            rd_kafka_ConfigSource_name()

            +
            public static rd_kafka_ConfigSource_name ( 
            +    int $confsource
            + ): string|null
            +
            +
            +
            Parameters
            +
            confsource int rd_kafka_ConfigSource_t - )
            +
            Returns
            +
            string|null const char* - a string representation of the confsource.
            +
            + +

            rd_kafka_ConfigEntry_name()

            +
            public static rd_kafka_ConfigEntry_name ( 
            +    \FFI\CData|null $entry
            + ): string|null
            +
            +
            +
            Parameters
            +
            entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
            +
            Returns
            +
            string|null const char* - the configuration property name
            +
            + +

            rd_kafka_ConfigEntry_value()

            +
            public static rd_kafka_ConfigEntry_value ( 
            +    \FFI\CData|null $entry
            + ): string|null
            +
            +
            +
            Parameters
            +
            entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
            +
            Returns
            +
            string|null const char* - the configuration value, may be NULL for sensitive or unset properties.
            +
            + +

            rd_kafka_ConfigEntry_source()

            +
            public static rd_kafka_ConfigEntry_source ( 
            +    \FFI\CData|null $entry
            + ): int
            +
            +
            +
            Parameters
            +
            entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
            +
            Returns
            +
            int rd_kafka_ConfigSource_t - the config source.
            +
            + +

            rd_kafka_ConfigEntry_is_read_only()

            +
            public static rd_kafka_ConfigEntry_is_read_only ( 
            +    \FFI\CData|null $entry
            + ): int|null
            +
            +
            Remarks
            Shall only be used on a DescribeConfigs result, otherwise returns -1.
            + +
            +
            Parameters
            +
            entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
            +
            Returns
            +
            int|null int - 1 if the config property is read-only on the broker, else 0.
            +
            + +

            rd_kafka_ConfigEntry_is_default()

            +
            public static rd_kafka_ConfigEntry_is_default ( 
            +    \FFI\CData|null $entry
            + ): int|null
            +
            +
            Remarks
            Shall only be used on a DescribeConfigs result, otherwise returns -1.
            + +
            +
            Parameters
            +
            entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
            +
            Returns
            +
            int|null int - 1 if the config property is set to its default value on the broker, else 0.
            +
            + +

            rd_kafka_ConfigEntry_is_sensitive()

            +
            public static rd_kafka_ConfigEntry_is_sensitive ( 
            +    \FFI\CData|null $entry
            + ): int|null
            +
            +
            Remarks
            An application should take care not to include the value of sensitive configuration entries in its output.
            +
            +Shall only be used on a DescribeConfigs result, otherwise returns -1.
            + +
            +
            Parameters
            +
            entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
            +
            Returns
            +
            int|null int - 1 if the config property contains sensitive information (such as security configuration), else 0.
            +
            + +

            rd_kafka_ConfigEntry_is_synonym()

            +
            public static rd_kafka_ConfigEntry_is_synonym ( 
            +    \FFI\CData|null $entry
            + ): int|null
            +
            +
            +
            Parameters
            +
            entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - )
            +
            Returns
            +
            int|null int - 1 if this entry is a synonym, else 0.
            +
            + +

            rd_kafka_ConfigEntry_synonyms()

            +
            public static rd_kafka_ConfigEntry_synonyms ( 
            +    \FFI\CData|null $entry, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +
            Remarks
            The lifetime of the returned entry is the same as conf .
            +
            +Shall only be used on a DescribeConfigs result, otherwise returns NULL.
            + +
            +
            Parameters
            +
            entry \FFI\CData|null const rd_kafka_ConfigEntry_t* - Entry to get synonyms for.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_ConfigEntry_t** - the synonym config entry array.
            +
            + +

            rd_kafka_ResourceType_name()

            +
            public static rd_kafka_ResourceType_name ( 
            +    int $restype
            + ): string|null
            +
            +
            +
            Parameters
            +
            restype int rd_kafka_ResourceType_t - )
            +
            Returns
            +
            string|null const char* - a string representation of the restype
            +
            + +

            rd_kafka_ConfigResource_new()

            +
            public static rd_kafka_ConfigResource_new ( 
            +    int $restype, 
            +    string|null $resname
            + ): \FFI\CData|null
            +
            +

            Create new ConfigResource object.

            + +
            +
            Parameters
            +
            restype int rd_kafka_ResourceType_t - The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC)
            +
            resname string|null const char* - The resource name (e.g., the topic name)
            +
            Returns
            +
            \FFI\CData|null rd_kafka_ConfigResource_t* - a newly allocated object
            +
            + +

            rd_kafka_ConfigResource_destroy()

            +
            public static rd_kafka_ConfigResource_destroy ( 
            +    \FFI\CData|null $config
            + ): void
            +
            +
            +
            Parameters
            +
            config \FFI\CData|null rd_kafka_ConfigResource_t*
            +
            +

            rd_kafka_ConfigResource_destroy_array()

            +
            public static rd_kafka_ConfigResource_destroy_array ( 
            +    \FFI\CData|null $config, 
            +    int|null $config_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            config \FFI\CData|null rd_kafka_ConfigResource_t**
            +
            config_cnt int|null size_t
            +
            +

            rd_kafka_ConfigResource_set_config()

            +
            public static rd_kafka_ConfigResource_set_config ( 
            +    \FFI\CData|null $config, 
            +    string|null $name, 
            +    string|null $value
            + ): int
            +
            +

            Set configuration name value pair.

            + +

            This will overwrite the current value.

            + +
            +
            Parameters
            +
            config \FFI\CData|null rd_kafka_ConfigResource_t* - ConfigResource to set config property on.
            +
            name string|null const char* - Configuration name, depends on resource type.
            +
            value string|null const char* - Configuration value, depends on resource type and name. Set to NULL to revert configuration value to default.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR if config was added to resource, or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input.
            +
            + +

            rd_kafka_ConfigResource_configs()

            +
            public static rd_kafka_ConfigResource_configs ( 
            +    \FFI\CData|null $config, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of config entries from a ConfigResource object.

            +

            The returned object life-times are the same as the config object.

            + +
            +
            Parameters
            +
            config \FFI\CData|null const rd_kafka_ConfigResource_t* - ConfigResource to get configs from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_ConfigEntry_t**
            +
            + +

            rd_kafka_ConfigResource_type()

            +
            public static rd_kafka_ConfigResource_type ( 
            +    \FFI\CData|null $config
            + ): int
            +
            +
            +
            Parameters
            +
            config \FFI\CData|null const rd_kafka_ConfigResource_t* - )
            +
            Returns
            +
            int rd_kafka_ResourceType_t - the ResourceType for config
            +
            + +

            rd_kafka_ConfigResource_name()

            +
            public static rd_kafka_ConfigResource_name ( 
            +    \FFI\CData|null $config
            + ): string|null
            +
            +
            +
            Parameters
            +
            config \FFI\CData|null const rd_kafka_ConfigResource_t* - )
            +
            Returns
            +
            string|null const char* - the name for config
            +
            + +

            rd_kafka_ConfigResource_error()

            +
            public static rd_kafka_ConfigResource_error ( 
            +    \FFI\CData|null $config
            + ): int
            +
            +
            +
            Parameters
            +
            config \FFI\CData|null const rd_kafka_ConfigResource_t* - )
            +
            Returns
            +
            int rd_kafka_resp_err_t - the error for this resource from an AlterConfigs request
            +
            + +

            rd_kafka_ConfigResource_error_string()

            +
            public static rd_kafka_ConfigResource_error_string ( 
            +    \FFI\CData|null $config
            + ): string|null
            +
            +
            +
            Parameters
            +
            config \FFI\CData|null const rd_kafka_ConfigResource_t* - )
            +
            Returns
            +
            string|null const char* - the error string for this resource from an AlterConfigs request, or NULL if no error.
            +
            + +

            rd_kafka_AlterConfigs()

            +
            public static rd_kafka_AlterConfigs ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $configs, 
            +    int|null $config_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Update the configuration for the specified resources. Updates are not transactional so they may succeed for a subset of the provided resources while the others fail. The configuration for a particular resource is updated atomically, replacing values using the provided ConfigEntrys and reverting unspecified ConfigEntrys to their default values.

            +
            Remarks
            Requires broker version >=0.11.0.0
            +
            Warning
            AlterConfigs will replace all existing configuration for the provided resources with the new configuration given, reverting all other configuration to their default values.
            +
            Remarks
            Multiple resources and resource types may be set, but at most one resource of type RD_KAFKA_RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource.
            +
            Deprecated:
            Use rd_kafka_IncrementalAlterConfigs().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            configs \FFI\CData|null rd_kafka_ConfigResource_t**
            +
            config_cnt int|null size_t
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t*
            +
            rkqu \FFI\CData|null rd_kafka_queue_t*
            +
            + +

            rd_kafka_AlterConfigs_result_resources()

            +
            public static rd_kafka_AlterConfigs_result_resources ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of resource results from a AlterConfigs result.

            +

            Use rd_kafka_ConfigResource_error() and rd_kafka_ConfigResource_error_string() to extract per-resource error results on the returned array elements.

            +

            The returned object life-times are the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_AlterConfigs_result_t* - Result object to get resource results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_ConfigResource_t** - an array of ConfigResource elements, or NULL if not available.
            +
            + +

            rd_kafka_DescribeConfigs()

            +
            public static rd_kafka_DescribeConfigs ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $configs, 
            +    int|null $config_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Get configuration for the specified resources in configs.

            +

            The returned configuration includes default values and the rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source() methods may be used to distinguish them from user supplied values.

            +

            The value of config entries where rd_kafka_ConfigEntry_is_sensitive() is true will always be NULL to avoid disclosing sensitive information, such as security settings.

            +

            Configuration entries where rd_kafka_ConfigEntry_is_read_only() is true can't be updated (with rd_kafka_AlterConfigs()).

            +

            Synonym configuration entries are returned if the broker supports it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms().

            +
            Remarks
            Requires broker version >=0.11.0.0
            +
            +Multiple resources and resource types may be requested, but at most one resource of type RD_KAFKA_RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            configs \FFI\CData|null rd_kafka_ConfigResource_t**
            +
            config_cnt int|null size_t
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t*
            +
            rkqu \FFI\CData|null rd_kafka_queue_t*
            +
            + +

            rd_kafka_DescribeConfigs_result_resources()

            +
            public static rd_kafka_DescribeConfigs_result_resources ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of resource results from a DescribeConfigs result.

            +

            The returned resources life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_DescribeConfigs_result_t* - Result object to get resource results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_ConfigResource_t**
            +
            + +

            rd_kafka_conf()

            +
            public static rd_kafka_conf ( 
            +    \FFI\CData|null $rk
            + ): \FFI\CData|null
            +
            +
            Remarks
            the returned object is read-only and its lifetime is the same as the rd_kafka_t object.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_conf_t* - the configuration object used by an rd_kafka_t instance. For use with rd_kafka_conf_get(), et.al., to extract configuration properties from a running client.
            +
            + +

            rd_kafka_conf_set_oauthbearer_token_refresh_cb()

            +
            public static rd_kafka_conf_set_oauthbearer_token_refresh_cb ( 
            +    \FFI\CData|null $conf, 
            +    \FFI\CData|\Closure $oauthbearer_token_refresh_cb
            + ): void
            +
            +

            Set SASL/OAUTHBEARER token refresh callback in provided conf object.

            + +

            The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll() whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved, typically based on the configuration defined in sasl.oauthbearer.config.

            +

            The callback should invoke rd_kafka_oauthbearer_set_token() or rd_kafka_oauthbearer_set_token_failure() to indicate success or failure, respectively.

            +

            The refresh operation is eventable and may be received via rd_kafka_queue_poll() with an event type of RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH.

            +

            Note that before any SASL/OAUTHBEARER broker connection can succeed the application must call rd_kafka_oauthbearer_set_token() once – either directly or, more typically, by invoking either rd_kafka_poll(), rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause retrieval of an initial token to occur.

            +

            Alternatively, the application can enable the SASL queue by calling rd_kafka_conf_enable_sasl_queue() on the configuration object prior to creating the client instance, get the SASL queue with rd_kafka_queue_get_sasl(), and either serve the queue manually by calling rd_kafka_queue_poll(), or redirecting the queue to the background thread to have the queue served automatically. For the latter case the SASL queue must be forwarded to the background queue with rd_kafka_queue_forward(). A convenience function is available to automatically forward the SASL queue to librdkafka's background thread, see rd_kafka_sasl_background_callbacks_enable().

            +

            An unsecured JWT refresh handler is provided by librdkafka for development and testing purposes, it is enabled by setting the enable.sasl.oauthbearer.unsecure.jwt property to true and is mutually exclusive to using a refresh callback.

            +
            See also
            rd_kafka_sasl_background_callbacks_enable()
            +
            +rd_kafka_queue_get_sasl()
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - the configuration to mutate.
            +
            oauthbearer_token_refresh_cb \FFI\CData|\Closure void()(rd_kafka_t, const char*, void*) - the callback to set; callback function arguments:
            rk - Kafka handle
            oauthbearer_config - Value of configuration property sasl.oauthbearer.config. opaque - Application-provided opaque set via rd_kafka_conf_set_opaque()
            +
            + +

            rd_kafka_conf_set_ssl_cert_verify_cb()

            +
            public static rd_kafka_conf_set_ssl_cert_verify_cb ( 
            +    \FFI\CData|null $conf, 
            +    \FFI\CData|\Closure $ssl_cert_verify_cb
            + ): int
            +
            +

            Sets the verification callback of the broker certificate.

            +

            The verification callback is triggered from internal librdkafka threads upon connecting to a broker. On each connection attempt the callback will be called for each certificate in the broker's certificate chain, starting at the root certification, as long as the application callback returns 1 (valid certificate). broker_name and broker_id correspond to the broker the connection is being made to. The x509_error argument indicates if OpenSSL's verification of the certificate succeed (0) or failed (an OpenSSL error code). The application may set the SSL context error code by returning 0 from the verify callback and providing a non-zero SSL context error code in x509_error. If the verify callback sets x509_error to 0, returns 1, and the original x509_error was non-zero, the error on the SSL context will be cleared. x509_error is always a valid pointer to an int.

            +

            depth is the depth of the current certificate in the chain, starting at the root certificate.

            +

            The certificate itself is passed in binary DER format in buf of size size.

            +

            The callback must return 1 if verification succeeds, or 0 if verification fails and then write a human-readable error message to errstr (limited to errstr_size bytes, including nul-term).

            +

            The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

            + +
            Warning
            This callback will be called from internal librdkafka threads.
            +
            Remarks
            See <openssl/x509_vfy.h> in the OpenSSL source distribution for a list of x509_error codes.
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t*
            +
            ssl_cert_verify_cb \FFI\CData|\Closure int()(rd_kafka_t, const char*, int32_t, int*, int, const char*, size_t, char*, size_t, void*)
            +
            Returns
            +
            int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK if SSL is supported in this build, else RD_KAFKA_CONF_INVALID.
            +
            + +

            rd_kafka_conf_set_ssl_cert()

            +
            public static rd_kafka_conf_set_ssl_cert ( 
            +    \FFI\CData|null $conf, 
            +    int $cert_type, 
            +    int $cert_enc, 
            +    \FFI\CData|object|string|null $buffer, 
            +    int|null $size, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Set certificate/key cert_type from the cert_enc encoded memory at buffer of size bytes.

            + +
            Remarks
            Calling this method multiple times with the same cert_type will replace the previous value.
            +
            +Calling this method with buffer set to NULL will clear the configuration for cert_type.
            +
            +The private key may require a password, which must be specified with the ssl.key.password configuration property prior to calling this function.
            +
            +Private and public keys in PEM format may also be set with the ssl.key.pem and ssl.certificate.pem configuration properties.
            +
            +CA certificate in PEM format may also be set with the ssl.ca.pem configuration property.
            +
            +When librdkafka is linked to OpenSSL 3.0 and the certificate is encoded using an obsolete cipher, it might be necessary to set up an OpenSSL configuration file to load the "legacy" provider and set the OPENSSL_CONF environment variable. See https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more information.
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
            +
            cert_type int rd_kafka_cert_type_t - Certificate or key type to configure.
            +
            cert_enc int rd_kafka_cert_enc_t - Buffer encoding type.
            +
            buffer \FFI\CData|object|string|null const void* - Memory pointer to encoded certificate or key. The memory is not referenced after this function returns.
            +
            size int|null size_t - Size of memory at buffer.
            +
            errstr \FFI\CData|null char* - Memory were a human-readable error string will be written on failure.
            +
            errstr_size int|null size_t - Size of errstr, including space for nul-terminator.
            +
            Returns
            +
            int rd_kafka_conf_res_t - RD_KAFKA_CONF_OK on success or RD_KAFKA_CONF_INVALID if the memory in buffer is of incorrect encoding, or if librdkafka was not built with SSL support.
            +
            + +

            rd_kafka_event_config_string()

            +
            public static rd_kafka_event_config_string ( 
            +    \FFI\CData|null $rkev
            + ): string|null
            +
            +

            The returned memory is read-only and its lifetime is the same as the event object.

            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config
            • +
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            string|null const char* - the associated configuration string for the event, or NULL if the configuration property is not set or if not applicable for the given event type.
            +
            + +

            rd_kafka_oauthbearer_set_token()

            +
            public static rd_kafka_oauthbearer_set_token ( 
            +    \FFI\CData|null $rk, 
            +    string|null $token_value, 
            +    int|null $md_lifetime_ms, 
            +    string|null $md_principal_name, 
            +    \FFI\CData|null $extensions, 
            +    int|null $extension_size, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): int
            +
            +

            Set SASL/OAUTHBEARER token and metadata.

            + +

            The SASL/OAUTHBEARER token refresh callback or event handler should invoke this method upon success. The extension keys must not include the reserved key "`auth`", and all extension keys and values must conform to the required format as per https://tools.ietf.org/html/rfc7628#section-3.1:

            +
            key            = 1*(ALPHA)
            +value          = *(VCHAR / SP / HTAB / CR / LF )
            +
            +
            See also
            rd_kafka_oauthbearer_set_token_failure
            +
            +rd_kafka_conf_set_oauthbearer_token_refresh_cb
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            token_value string|null const char* - the mandatory token value to set, often (but not necessarily) a JWS compact serialization as per https://tools.ietf.org/html/rfc7515#section-3.1.
            +
            md_lifetime_ms int|null int64_t - when the token expires, in terms of the number of milliseconds since the epoch.
            +
            md_principal_name string|null const char* - the mandatory Kafka principal name associated with the token.
            +
            extensions \FFI\CData|null const char** - optional SASL extensions key-value array with extensions_size elements (number of keys * 2), where [i] is the key and [i+1] is the key’s value, to be communicated to the broker as additional key-value pairs during the initial client response as per https://tools.ietf.org/html/rfc7628#section-3.1. The key-value pairs are copied.
            +
            extension_size int|null size_t - the number of SASL extension keys plus values, which must be a non-negative multiple of 2.
            +
            errstr \FFI\CData|null char* - A human readable error string (nul-terminated) is written to this location that must be of at least errstr_size bytes. The errstr is only written in case of error. - Writable size in errstr.
            +
            errstr_size int|null size_t
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise errstr set and:
            +RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are invalid;
            +RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not supported by this build;
            +RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is not configured as the client’s authentication mechanism.
            +
            + +

            rd_kafka_oauthbearer_set_token_failure()

            +
            public static rd_kafka_oauthbearer_set_token_failure ( 
            +    \FFI\CData|null $rk, 
            +    string|null $errstr
            + ): int
            +
            +

            SASL/OAUTHBEARER token refresh failure indicator.

            + +

            The SASL/OAUTHBEARER token refresh callback or event handler should invoke this method upon failure.

            + +
            See also
            rd_kafka_oauthbearer_set_token
            +
            +rd_kafka_conf_set_oauthbearer_token_refresh_cb
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            errstr string|null const char* - mandatory human readable error reason for failing to acquire a token.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise:
            +RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not supported by this build;
            +RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is not configured as the client’s authentication mechanism,
            +RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied.
            +
            + +

            rd_kafka_interceptor_f_on_thread_start_t()

            +
            public static rd_kafka_interceptor_f_on_thread_start_t ( 
            +    \FFI\CData|null $rk, 
            +    int $thread_type, 
            +    string|null $thread_name, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_thread_start() is called from a newly created librdkafka-managed thread.

            + +
            Warning
            The on_thread_start() interceptor is called from internal librdkafka threads. An on_thread_start() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            thread_type int rd_kafka_thread_type_t - Thread type.
            +
            thread_name string|null const char* - Human-readable thread name, may not be unique.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_f_on_thread_exit_t()

            +
            public static rd_kafka_interceptor_f_on_thread_exit_t ( 
            +    \FFI\CData|null $rk, 
            +    int $thread_type, 
            +    string|null $thread_name, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_thread_exit() is called just prior to a librdkafka-managed thread exiting from the exiting thread itself.

            + +
            Remarks
            Depending on the thread type, librdkafka may execute additional code on the thread after on_thread_exit() returns.
            +
            Warning
            The on_thread_exit() interceptor is called from internal librdkafka threads. An on_thread_exit() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            thread_type int rd_kafka_thread_type_t - Thread type.n
            +
            thread_name string|null const char* - Human-readable thread name, may not be unique.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_add_on_thread_start()

            +
            public static rd_kafka_interceptor_add_on_thread_start ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_thread_start, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_thread_start() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_thread_start \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t*)(rd_kafka_t*, rd_kafka_thread_type_t, const char*, void*)
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_interceptor_add_on_thread_exit()

            +
            public static rd_kafka_interceptor_add_on_thread_exit ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_thread_exit, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_thread_exit() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_thread_exit \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t*)(rd_kafka_t*, rd_kafka_thread_type_t, const char*, void*)
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_mock_cluster_new()

            +
            public static rd_kafka_mock_cluster_new ( 
            +    \FFI\CData|null $rk, 
            +    int|null $broker_cnt
            + ): \FFI\CData|null
            +
            +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            broker_cnt int|null int
            +
            Returns
            +
            \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            +

            rd_kafka_mock_cluster_destroy()

            +
            public static rd_kafka_mock_cluster_destroy ( 
            +    \FFI\CData|null $mcluster
            + ): void
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            +

            rd_kafka_mock_cluster_handle()

            +
            public static rd_kafka_mock_cluster_handle ( 
            +    \FFI\CData|null $mcluster
            + ): \FFI\CData|null
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null const rd_kafka_mock_cluster_t*
            +
            Returns
            +
            \FFI\CData|null rd_kafka_t*
            +
            +

            rd_kafka_mock_cluster_bootstraps()

            +
            public static rd_kafka_mock_cluster_bootstraps ( 
            +    \FFI\CData|null $mcluster
            + ): string|null
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null const rd_kafka_mock_cluster_t*
            +
            Returns
            +
            string|null const char*
            +
            +

            rd_kafka_mock_push_request_errors()

            +
            public static rd_kafka_mock_push_request_errors ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $ApiKey, 
            +    int|null $cnt, 
            +    mixed $args
            + ): void
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            ApiKey int|null int16_t
            +
            cnt int|null size_t
            +
            args mixed
            +
            +

            rd_kafka_mock_topic_set_error()

            +
            public static rd_kafka_mock_topic_set_error ( 
            +    \FFI\CData|null $mcluster, 
            +    string|null $topic, 
            +    int $err
            + ): void
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            topic string|null const char*
            +
            err int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_partition_set_leader()

            +
            public static rd_kafka_mock_partition_set_leader ( 
            +    \FFI\CData|null $mcluster, 
            +    string|null $topic, 
            +    int|null $partition, 
            +    int|null $broker_id
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            topic string|null const char*
            +
            partition int|null int32_t
            +
            broker_id int|null int32_t
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_partition_set_follower()

            +
            public static rd_kafka_mock_partition_set_follower ( 
            +    \FFI\CData|null $mcluster, 
            +    string|null $topic, 
            +    int|null $partition, 
            +    int|null $broker_id
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            topic string|null const char*
            +
            partition int|null int32_t
            +
            broker_id int|null int32_t
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_partition_set_follower_wmarks()

            +
            public static rd_kafka_mock_partition_set_follower_wmarks ( 
            +    \FFI\CData|null $mcluster, 
            +    string|null $topic, 
            +    int|null $partition, 
            +    int|null $lo, 
            +    int|null $hi
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            topic string|null const char*
            +
            partition int|null int32_t
            +
            lo int|null int64_t
            +
            hi int|null int64_t
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_broker_set_rack()

            +
            public static rd_kafka_mock_broker_set_rack ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $broker_id, 
            +    string|null $rack
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            broker_id int|null int32_t
            +
            rack string|null const char*
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_error_code()

            +
            public static rd_kafka_error_code ( 
            +    \FFI\CData|null $error
            + ): int
            +
            +
            +
            Parameters
            +
            error \FFI\CData|null const rd_kafka_error_t* - )
            +
            Returns
            +
            int rd_kafka_resp_err_t - the error code for error or RD_KAFKA_RESP_ERR_NO_ERROR if error is NULL.
            +
            + +

            rd_kafka_error_name()

            +
            public static rd_kafka_error_name ( 
            +    \FFI\CData|null $error
            + ): string|null
            +
            +
            Remarks
            The lifetime of the returned pointer is the same as the error object.
            +
            See also
            rd_kafka_err2name()
            + +
            +
            Parameters
            +
            error \FFI\CData|null const rd_kafka_error_t* - )
            +
            Returns
            +
            string|null const char* - the error code name for error, e.g, “ERR_UNKNOWN_MEMBER_ID”, or an empty string if error is NULL.
            +
            + +

            rd_kafka_error_string()

            +
            public static rd_kafka_error_string ( 
            +    \FFI\CData|null $error
            + ): string|null
            +
            +
            Remarks
            The lifetime of the returned pointer is the same as the error object.
            + +
            +
            Parameters
            +
            error \FFI\CData|null const rd_kafka_error_t* - )
            +
            Returns
            +
            string|null const char* - a human readable error string for error, or an empty string if error is NULL.
            +
            + +

            rd_kafka_error_is_fatal()

            +
            public static rd_kafka_error_is_fatal ( 
            +    \FFI\CData|null $error
            + ): int|null
            +
            +
            +
            Parameters
            +
            error \FFI\CData|null const rd_kafka_error_t* - )
            +
            Returns
            +
            int|null int - 1 if the error is a fatal error, indicating that the client instance is no longer usable, else 0 (also if error is NULL).
            +
            + +

            rd_kafka_error_is_retriable()

            +
            public static rd_kafka_error_is_retriable ( 
            +    \FFI\CData|null $error
            + ): int|null
            +
            +
            +
            Parameters
            +
            error \FFI\CData|null const rd_kafka_error_t* - )
            +
            Returns
            +
            int|null int - 1 if the operation may be retried, else 0 (also if error is NULL).
            +
            + +

            rd_kafka_error_txn_requires_abort()

            +
            public static rd_kafka_error_txn_requires_abort ( 
            +    \FFI\CData|null $error
            + ): int|null
            +
            +
            Remarks
            The return value of this method is only valid for errors returned by the transactional API.
            + +
            +
            Parameters
            +
            error \FFI\CData|null const rd_kafka_error_t* - )
            +
            Returns
            +
            int|null int - 1 if the error is an abortable transaction error in which case the application must call rd_kafka_abort_transaction() and start a new transaction with rd_kafka_begin_transaction() if it wishes to proceed with transactions. Else returns 0 (also if error is NULL).
            +
            + +

            rd_kafka_error_destroy()

            +
            public static rd_kafka_error_destroy ( 
            +    \FFI\CData|null $error
            + ): void
            +
            +

            Free and destroy an error object.

            +
            Remarks
            As a conveniance it is permitted to pass a NULL error.
            + +
            +
            Parameters
            +
            error \FFI\CData|null rd_kafka_error_t* - )
            +
            + +

            rd_kafka_error_new()

            +
            public static rd_kafka_error_new ( 
            +    int $code, 
            +    string|null $fmt, 
            +    mixed $args
            + ): \FFI\CData|null
            +
            +

            Create a new error object with error code and optional human readable error string in fmt.

            +

            This method is mainly to be used for mocking errors in application test code.

            +

            The returned object must be destroyed with rd_kafka_error_destroy().

            + +
            +
            Parameters
            +
            code int rd_kafka_resp_err_t
            +
            fmt string|null const char*
            +
            args mixed
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t*
            +
            + +

            rd_kafka_msg_partitioner_fnv1a()

            +
            public static rd_kafka_msg_partitioner_fnv1a ( 
            +    \FFI\CData|null $rkt, 
            +    \FFI\CData|object|string|null $key, 
            +    int|null $keylen, 
            +    int|null $partition_cnt, 
            +    \FFI\CData|object|string|null $rkt_opaque, 
            +    \FFI\CData|object|string|null $msg_opaque
            + ): int|null
            +
            +

            FNV-1a partitioner.

            +

            Uses consistent hashing to map identical keys onto identical partitions using FNV-1a hashing.

            +

            The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

            + +
            +
            Parameters
            +
            rkt \FFI\CData|null const rd_kafka_topic_t*
            +
            key \FFI\CData|object|string|null const void*
            +
            keylen int|null size_t
            +
            partition_cnt int|null int32_t
            +
            rkt_opaque \FFI\CData|object|string|null void*
            +
            msg_opaque \FFI\CData|object|string|null void*
            +
            Returns
            +
            int|null int32_t - a partition between 0 and partition_cnt - 1.
            +
            + +

            rd_kafka_msg_partitioner_fnv1a_random()

            +
            public static rd_kafka_msg_partitioner_fnv1a_random ( 
            +    \FFI\CData|null $rkt, 
            +    \FFI\CData|object|string|null $key, 
            +    int|null $keylen, 
            +    int|null $partition_cnt, 
            +    \FFI\CData|object|string|null $rkt_opaque, 
            +    \FFI\CData|object|string|null $msg_opaque
            + ): int|null
            +
            +

            Consistent-Random FNV-1a partitioner.

            +

            Uses consistent hashing to map identical keys onto identical partitions using FNV-1a hashing. Messages without keys will be assigned via the random partitioner.

            +

            The rkt_opaque argument is the opaque set by rd_kafka_topic_conf_set_opaque(). The msg_opaque argument is the per-message opaque passed to produce().

            + +
            +
            Parameters
            +
            rkt \FFI\CData|null const rd_kafka_topic_t*
            +
            key \FFI\CData|object|string|null const void*
            +
            keylen int|null size_t
            +
            partition_cnt int|null int32_t
            +
            rkt_opaque \FFI\CData|object|string|null void*
            +
            msg_opaque \FFI\CData|object|string|null void*
            +
            Returns
            +
            int|null int32_t - a partition between 0 and partition_cnt - 1.
            +
            + +

            rd_kafka_consumer_group_metadata()

            +
            public static rd_kafka_consumer_group_metadata ( 
            +    \FFI\CData|null $rk
            + ): \FFI\CData|null
            +
            +
            Remarks
            The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
            +
            See also
            rd_kafka_send_offsets_to_transaction()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            \FFI\CData|null rd_kafka_consumer_group_metadata_t* - the current consumer group metadata associated with this consumer, or NULL if rk is not a consumer configured with a group.id. This metadata object should be passed to the transactional producer’s rd_kafka_send_offsets_to_transaction() API.
            +
            + +

            rd_kafka_consumer_group_metadata_new()

            +
            public static rd_kafka_consumer_group_metadata_new ( 
            +    string|null $group_id
            + ): \FFI\CData|null
            +
            +

            Create a new consumer group metadata object. This is typically only used for writing tests.

            + +
            Remarks
            The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
            + +
            +
            Parameters
            +
            group_id string|null const char* - ) - The group id.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_consumer_group_metadata_t*
            +
            + +

            rd_kafka_consumer_group_metadata_destroy()

            +
            public static rd_kafka_consumer_group_metadata_destroy ( 
            +    \FFI\CData|null $arg0
            + ): void
            +
            +
            +
            Parameters
            +
            arg0 \FFI\CData|null rd_kafka_consumer_group_metadata_t*
            +
            +

            rd_kafka_consumer_group_metadata_write()

            +
            public static rd_kafka_consumer_group_metadata_write ( 
            +    \FFI\CData|null $cgmd, 
            +    \FFI\CData|object|string|null $bufferp, 
            +    \FFI\CData|null $sizep
            + ): \FFI\CData|null
            +
            +

            Serialize the consumer group metadata to a binary format. This is mainly for client binding use and not for application use.

            +
            Remarks
            The serialized metadata format is private and is not compatible across different versions or even builds of librdkafka. It should only be used in the same process runtime and must only be passed to rd_kafka_consumer_group_metadata_read().
            + +
            See also
            rd_kafka_consumer_group_metadata_read()
            + +
            +
            Parameters
            +
            cgmd \FFI\CData|null const rd_kafka_consumer_group_metadata_t* - Metadata to be serialized.
            +
            bufferp \FFI\CData|object|string|null void** - On success this pointer will be updated to point to na allocated buffer containing the serialized metadata. The buffer must be freed with rd_kafka_mem_free().
            +
            sizep \FFI\CData|null size_t* - The pointed to size will be updated with the size of the serialized buffer.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure.
            +
            + +

            rd_kafka_consumer_group_metadata_read()

            +
            public static rd_kafka_consumer_group_metadata_read ( 
            +    \FFI\CData|null $cgmdp, 
            +    \FFI\CData|object|string|null $buffer, 
            +    int|null $size
            + ): \FFI\CData|null
            +
            +

            Reads serialized consumer group metadata and returns a consumer group metadata object. This is mainly for client binding use and not for application use.

            +
            Remarks
            The serialized metadata format is private and is not compatible across different versions or even builds of librdkafka. It should only be used in the same process runtime and must only be passed to rd_kafka_consumer_group_metadata_read().
            + +
            See also
            rd_kafka_consumer_group_metadata_write()
            + +
            +
            Parameters
            +
            cgmdp \FFI\CData|null rd_kafka_consumer_group_metadata_t** - On success this pointer will be updated to point to a new consumer group metadata object which must be freed with rd_kafka_consumer_group_metadata_destroy().
            +
            buffer \FFI\CData|object|string|null const void* - Pointer to the serialized data.
            +
            size int|null size_t - Size of the serialized data.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure.
            +
            + +

            rd_kafka_init_transactions()

            +
            public static rd_kafka_init_transactions ( 
            +    \FFI\CData|null $rk, 
            +    int|null $timeout_ms
            + ): \FFI\CData|null
            +
            +

            Initialize transactions for the producer instance.

            +

            This function ensures any transactions initiated by previous instances of the producer with the same transactional.id are completed. If the previous instance failed with a transaction in progress the previous transaction will be aborted. This function needs to be called before any other transactional or produce functions are called when the transactional.id is configured.

            +

            If the last transaction had begun completion (following transaction commit) but not yet finished, this function will await the previous transaction's completion.

            +

            When any previous transactions have been fenced this function will acquire the internal producer id and epoch, used in all future transactional messages issued by this producer instance.

            + +
            Remarks
            This function may block up to timeout_ms milliseconds.
            +
            +This call is resumable when a retriable timeout error is returned. Calling the function again will resume the operation that is progressing in the background.
            + +
            Remarks
            The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Producer instance.
            +
            timeout_ms int|null int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call init_transactions() again. If an infinite timeout (-1) is passed, the timeout will be adjusted to 2 * transaction.timeout.ms.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator could be not be contacted within timeout_ms (retriable), RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction coordinator is not available (retriable), RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction would not complete within timeout_ms (retriable), RD_KAFKA_RESP_ERR__STATE if transactions have already been started or upon fatal error, RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not support transactions (<Apache Kafka 0.11), this also raises a fatal error, RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT if the configured transaction.timeout.ms is outside the broker-configured range, this also raises a fatal error, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, or timeout_ms is out of range. Other error codes not listed here may be returned, depending on broker version.
            +
            + +

            rd_kafka_begin_transaction()

            +
            public static rd_kafka_begin_transaction ( 
            +    \FFI\CData|null $rk
            + ): \FFI\CData|null
            +
            +

            Begin a new transaction.

            +

            rd_kafka_init_transactions() must have been called successfully (once) before this function is called.

            +

            Upon successful return from this function the application has to perform at least one of the following operations within transaction.timeout.ms to avoid timing out the transaction on the broker:

            +
              +
            • rd_kafka_produce() (et.al)
            • +
            • rd_kafka_send_offsets_to_transaction()
            • +
            • rd_kafka_commit_transaction()
            • +
            • rd_kafka_abort_transaction()
            • +
            +

            Any messages produced, offsets sent (rd_kafka_send_offsets_to_transaction()), etc, after the successful return of this function will be part of the transaction and committed or aborted atomatically.

            +

            Finish the transaction by calling rd_kafka_commit_transaction() or abort the transaction by calling rd_kafka_abort_transaction().

            + +
            Remarks
            With the transactional producer, rd_kafka_produce(), rd_kafka_producev(), et.al, are only allowed during an on-going transaction, as started with this function. Any produce call outside an on-going transaction, or for a failed transaction, will fail.
            +
            +The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - ) - Producer instance.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__STATE if a transaction is already in progress or upon fatal error, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance. Other error codes not listed here may be returned, depending on broker version.
            +
            + +

            rd_kafka_send_offsets_to_transaction()

            +
            public static rd_kafka_send_offsets_to_transaction ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $offsets, 
            +    \FFI\CData|null $cgmetadata, 
            +    int|null $timeout_ms
            + ): \FFI\CData|null
            +
            +

            Sends a list of topic partition offsets to the consumer group coordinator for cgmetadata, and marks the offsets as part part of the current transaction. These offsets will be considered committed only if the transaction is committed successfully.

            +

            The offsets should be the next message your application will consume, i.e., the last processed message's offset + 1 for each partition. Either track the offsets manually during processing or use rd_kafka_position() (on the consumer) to get the current offsets for the partitions assigned to the consumer.

            +

            Use this method at the end of a consume-transform-produce loop prior to committing the transaction with rd_kafka_commit_transaction().

            + +
            Remarks
            This function must be called on the transactional producer instance, not the consumer.
            +
            +The consumer must disable auto commits (set enable.auto.commit to false on the consumer).
            +
            +Logical and invalid offsets (such as RD_KAFKA_OFFSET_INVALID) in offsets will be ignored, if there are no valid offsets in offsets the function will return NULL and no action will be taken.
            +
            +This call is retriable but not resumable, which means a new request with a new set of provided offsets and group metadata will be sent to the transaction coordinator if the call is retried.
            +
            +It is highly recommended to retry the call (upon retriable error) with identical offsets and cgmetadata parameters. Failure to do so risks inconsistent state between what is actually included in the transaction and what the application thinks is included in the transaction.
            + +
            Remarks
            The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Producer instance.
            +
            offsets \FFI\CData|null const rd_kafka_topic_partition_list_t* - List of offsets to commit to the consumer group upon successful commit of the transaction. Offsets should be the next message to consume, e.g., last processed message + 1.
            +
            cgmetadata \FFI\CData|null const rd_kafka_consumer_group_metadata_t* - The current consumer group metadata as returned by rd_kafka_consumer_group_metadata() on the consumer instance the provided offsets were consumed from.
            +
            timeout_ms int|null int - Maximum time allowed to register the offsets on the broker.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether an abortable or fatal error has been raised by calling rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal() respectively. Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED if the producer is not authorized to write the consumer offsets to the group coordinator, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, or if the consumer_group_id or offsets are empty. Other error codes not listed here may be returned, depending on broker version.
            +
            + +

            rd_kafka_commit_transaction()

            +
            public static rd_kafka_commit_transaction ( 
            +    \FFI\CData|null $rk, 
            +    int|null $timeout_ms
            + ): \FFI\CData|null
            +
            +

            Commit the current transaction (as started with rd_kafka_begin_transaction()).

            +

            Any outstanding messages will be flushed (delivered) before actually committing the transaction.

            +

            If any of the outstanding messages fail permanently the current transaction will enter the abortable error state and this function will return an abortable error, in this case the application must call rd_kafka_abort_transaction() before attempting a new transaction with rd_kafka_begin_transaction().

            + +
            Remarks
            It is strongly recommended to always pass -1 (remaining transaction time) as the timeout_ms. Using other values risk internal state desynchronization in case any of the underlying protocol requests fail.
            +
            +This function will block until all outstanding messages are delivered and the transaction commit request has been successfully handled by the transaction coordinator, or until timeout_ms expires, which ever comes first. On timeout the application may call the function again.
            +
            +Will automatically call rd_kafka_flush() to ensure all queued messages are delivered before attempting to commit the transaction. If the application has enabled RD_KAFKA_EVENT_DR it must serve the event queue in a separate thread since rd_kafka_flush() will not serve delivery reports in this mode.
            +
            +This call is resumable when a retriable timeout error is returned. Calling the function again will resume the operation that is progressing in the background.
            + +
            Remarks
            The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Producer instance.
            +
            timeout_ms int|null int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call this function again. Pass -1 to use the remaining transaction timeout, this is the recommended use.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether an abortable or fatal error has been raised by calling rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal() respectively. Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be complete commmitted within timeout_ms, this is a retriable error as the commit continues in the background, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, Other error codes not listed here may be returned, depending on broker version.
            +
            + +

            rd_kafka_abort_transaction()

            +
            public static rd_kafka_abort_transaction ( 
            +    \FFI\CData|null $rk, 
            +    int|null $timeout_ms
            + ): \FFI\CData|null
            +
            +

            Aborts the ongoing transaction.

            +
               This function should also be used to recover from non-fatal abortable
            +   transaction errors.
            +
            +Any outstanding messages will be purged and fail with
            +   RD_KAFKA_RESP_ERR__PURGE_INFLIGHT or RD_KAFKA_RESP_ERR__PURGE_QUEUE.
            +   See rd_kafka_purge() for details.
            +
            +
            Remarks
            It is strongly recommended to always pass -1 (remaining transaction time) as the timeout_ms. Using other values risk internal state desynchronization in case any of the underlying protocol requests fail.
            +
            +This function will block until all outstanding messages are purged and the transaction abort request has been successfully handled by the transaction coordinator, or until timeout_ms expires, which ever comes first. On timeout the application may call the function again. If the application has enabled RD_KAFKA_EVENT_DR it must serve the event queue in a separate thread since rd_kafka_flush() will not serve delivery reports in this mode.
            +
            +This call is resumable when a retriable timeout error is returned. Calling the function again will resume the operation that is progressing in the background.
            + +
            Remarks
            The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Producer instance.
            +
            timeout_ms int|null int - The maximum time to block. On timeout the operation may continue in the background, depending on state, and it is okay to call this function again. Pass -1 to use the remaining transaction timeout, this is the recommended use.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure. Check whether the returned error object permits retrying by calling rd_kafka_error_is_retriable(), or whether a fatal error has been raised by calling rd_kafka_error_is_fatal(). Error codes: RD_KAFKA_RESP_ERR__STATE if not currently in a transaction, RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be complete commmitted within timeout_ms, this is a retriable error as the commit continues in the background, RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer transaction has been fenced by a newer producer instance, RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the producer is no longer authorized to perform transactional operations, RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been configured for the producer instance, RD_KAFKA_RESP_ERR__INVALID_ARG if rk is not a producer instance, Other error codes not listed here may be returned, depending on broker version.
            +
            + +

            rd_kafka_handle_mock_cluster()

            +
            public static rd_kafka_handle_mock_cluster ( 
            +    \FFI\CData|null $rk
            + ): \FFI\CData|null
            +
            +
            +
            Parameters
            +
            rk \FFI\CData|null const rd_kafka_t*
            +
            Returns
            +
            \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            +

            rd_kafka_mock_topic_create()

            +
            public static rd_kafka_mock_topic_create ( 
            +    \FFI\CData|null $mcluster, 
            +    string|null $topic, 
            +    int|null $partition_cnt, 
            +    int|null $replication_factor
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            topic string|null const char*
            +
            partition_cnt int|null int
            +
            replication_factor int|null int
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_broker_set_down()

            +
            public static rd_kafka_mock_broker_set_down ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $broker_id
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            broker_id int|null int32_t
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_broker_set_up()

            +
            public static rd_kafka_mock_broker_set_up ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $broker_id
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            broker_id int|null int32_t
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_coordinator_set()

            +
            public static rd_kafka_mock_coordinator_set ( 
            +    \FFI\CData|null $mcluster, 
            +    string|null $key_type, 
            +    string|null $key, 
            +    int|null $broker_id
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            key_type string|null const char*
            +
            key string|null const char*
            +
            broker_id int|null int32_t
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_set_apiversion()

            +
            public static rd_kafka_mock_set_apiversion ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $ApiKey, 
            +    int|null $MinVersion, 
            +    int|null $MaxVersion
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            ApiKey int|null int16_t
            +
            MinVersion int|null int16_t
            +
            MaxVersion int|null int16_t
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_mock_broker_set_rtt()

            +
            public static rd_kafka_mock_broker_set_rtt ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $broker_id, 
            +    int|null $rtt_ms
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            broker_id int|null int32_t
            +
            rtt_ms int|null int
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_message_errstr()

            +
            public static rd_kafka_message_errstr ( 
            +    \FFI\CData|null $rkmessage
            + ): string|null
            +
            +

            Returns the error string for an errored rd_kafka_message_t or NULL if there was no error.

            +
            Remarks
            This function MUST NOT be used with the producer.
            + +
            +
            Parameters
            +
            rkmessage \FFI\CData|null const rd_kafka_message_t* - )
            +
            Returns
            +
            string|null const char*
            +
            + +

            rd_kafka_message_broker_id()

            +
            public static rd_kafka_message_broker_id ( 
            +    \FFI\CData|null $rkmessage
            + ): int|null
            +
            +

            Returns the broker id of the broker the message was produced to or fetched from.

            + +
            +
            Parameters
            +
            rkmessage \FFI\CData|null const rd_kafka_message_t* - )
            +
            Returns
            +
            int|null int32_t - a broker id if known, else -1.
            +
            + +

            rd_kafka_produceva()

            +
            public static rd_kafka_produceva ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $vus, 
            +    int|null $cnt
            + ): \FFI\CData|null
            +
            +

            Produce and send a single message to broker.

            +

            The message is defined by an array of rd_kafka_vu_t of count cnt.

            + +
            See also
            rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            vus \FFI\CData|null const rd_kafka_vu_t*
            +
            cnt int|null size_t
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - an error object on failure or NULL on success. See rd_kafka_producev() for specific error codes.
            +
            + +

            rd_kafka_event_debug_contexts()

            +
            public static rd_kafka_event_debug_contexts ( 
            +    \FFI\CData|null $rkev, 
            +    \FFI\CData|null $dst, 
            +    int|null $dstsize
            + ): int|null
            +
            +

            Extract log debug context from event.

            +

            Event types:

            +
              +
            • RD_KAFKA_EVENT_LOG
            • +
            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - the event to extract data from.
            +
            dst \FFI\CData|null char* - destination string for comma separated list.
            +
            dstsize int|null size_t - size of provided dst buffer.
            +
            Returns
            +
            int|null int - 0 on success or -1 if unsupported event type.
            +
            + +

            rd_kafka_mock_broker_push_request_errors()

            +
            public static rd_kafka_mock_broker_push_request_errors ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $broker_id, 
            +    int|null $ApiKey, 
            +    int|null $cnt, 
            +    mixed $args
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            broker_id int|null int32_t
            +
            ApiKey int|null int16_t
            +
            cnt int|null size_t
            +
            args mixed
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_conf_get_default_topic_conf()

            +
            public static rd_kafka_conf_get_default_topic_conf ( 
            +    \FFI\CData|null $conf
            + ): \FFI\CData|null
            +
            +

            Gets the default topic configuration as previously set with rd_kafka_conf_set_default_topic_conf() or that was implicitly created by configuring a topic-level property on the global conf object.

            + +
            Warning
            The returned topic configuration object is owned by the conf object. It may be modified but not destroyed and its lifetime is the same as the conf object or the next call to rd_kafka_conf_set_default_topic_conf().
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - )
            +
            Returns
            +
            \FFI\CData|null rd_kafka_topic_conf_t* - the conf’s default topic configuration (if any), or NULL.
            +
            + +

            rd_kafka_queue_yield()

            +
            public static rd_kafka_queue_yield ( 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Cancels the current rd_kafka_queue_poll() on rkqu.

            +

            An application may use this from another thread to force an immediate return to the calling code (caller of rd_kafka_queue_poll()). Must not be used from signal handlers since that may cause deadlocks.

            + +
            +
            Parameters
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - )
            +
            + +

            rd_kafka_seek_partitions()

            +
            public static rd_kafka_seek_partitions ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $partitions, 
            +    int|null $timeout_ms
            + ): \FFI\CData|null
            +
            +

            Seek consumer for partitions in partitions to the per-partition offset in the .offset field of partitions.

            +

            The offset may be either absolute (>= 0) or a logical offset.

            +

            If timeout_ms is specified (not 0) the seek call will wait this long for the consumer to update its fetcher state for the given partition with the new offset. This guarantees that no previously fetched messages for the old offset (or fetch position) will be passed to the application.

            +

            If the timeout is reached the internal state will be unknown to the caller and this function returns RD_KAFKA_RESP_ERR__TIMED_OUT.

            +

            If timeout_ms is 0 it will initiate the seek but return immediately without any error reporting (e.g., async).

            +

            This call will purge all pre-fetched messages for the given partition, which may be up to queued.max.message.kbytes in size. Repeated use of seek may thus lead to increased network usage as messages are re-fetched from the broker.

            +

            Individual partition errors are reported in the per-partition .err field of partitions.

            +
            Remarks
            Seek must only be performed for already assigned/consumed partitions, use rd_kafka_assign() (et.al) to set the initial starting offset for a new assignmenmt.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
            +
            timeout_ms int|null int
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure.
            +
            + +

            rd_kafka_incremental_assign()

            +
            public static rd_kafka_incremental_assign ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $partitions
            + ): \FFI\CData|null
            +
            +

            Incrementally add partitions to the current assignment.

            +

            If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, this method should be used in a rebalance callback to adjust the current assignment appropriately in the case where the rebalance type is RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the partition list passed to the callback (or a copy of it), even if the list is empty. partitions must not be NULL. This method may also be used outside the context of a rebalance callback.

            + +
            Remarks
            The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            partitions \FFI\CData|null const rd_kafka_topic_partition_list_t*
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success, or an error object if the operation was unsuccessful.
            +
            + +

            rd_kafka_incremental_unassign()

            +
            public static rd_kafka_incremental_unassign ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $partitions
            + ): \FFI\CData|null
            +
            +

            Incrementally remove partitions from the current assignment.

            +

            If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used, this method should be used in a rebalance callback to adjust the current assignment appropriately in the case where the rebalance type is RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the partition list passed to the callback (or a copy of it), even if the list is empty. partitions must not be NULL. This method may also be used outside the context of a rebalance callback.

            + +
            Remarks
            The returned error object (if not NULL) must be destroyed with rd_kafka_error_destroy().
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            partitions \FFI\CData|null const rd_kafka_topic_partition_list_t*
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success, or an error object if the operation was unsuccessful.
            +
            + +

            rd_kafka_rebalance_protocol()

            +
            public static rd_kafka_rebalance_protocol ( 
            +    \FFI\CData|null $rk
            + ): string|null
            +
            +

            The rebalance protocol currently in use. This will be "NONE" if the consumer has not (yet) joined a group, else it will match the rebalance protocol ("EAGER", "COOPERATIVE") of the configured and selected assignor(s). All configured assignors must have the same protocol type, meaning online migration of a consumer group from using one protocol to another (in particular upgading from EAGER to COOPERATIVE) without a restart is not currently supported.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            string|null const char* - NULL on error, or one of “NONE”, “EAGER”, “COOPERATIVE” on success.
            +
            + +

            rd_kafka_assignment_lost()

            +
            public static rd_kafka_assignment_lost ( 
            +    \FFI\CData|null $rk
            + ): int|null
            +
            +

            Check whether the consumer considers the current assignment to have been lost involuntarily. This method is only applicable for use with a high level subscribing consumer. Assignments are revoked immediately when determined to have been lost, so this method is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event or from within a rebalance_cb. Partitions that have been lost may already be owned by other members in the group and therefore commiting offsets, for example, may fail.

            +
            Remarks
            Calling rd_kafka_assign(), rd_kafka_incremental_assign() or rd_kafka_incremental_unassign() resets this flag.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            int|null int - Returns 1 if the current partition assignment is considered lost, 0 otherwise.
            +
            + +

            rd_kafka_consumer_group_metadata_new_with_genid()

            +
            public static rd_kafka_consumer_group_metadata_new_with_genid ( 
            +    string|null $group_id, 
            +    int|null $generation_id, 
            +    string|null $member_id, 
            +    string|null $group_instance_id
            + ): \FFI\CData|null
            +
            +

            Create a new consumer group metadata object. This is typically only used for writing tests.

            + +
            Remarks
            The returned pointer must be freed by the application using rd_kafka_consumer_group_metadata_destroy().
            + +
            +
            Parameters
            +
            group_id string|null const char* - The group id.
            +
            generation_id int|null int32_t - The group generation id.
            +
            member_id string|null const char* - The group member id.
            +
            group_instance_id string|null const char* - The group instance id (may be NULL).
            +
            Returns
            +
            \FFI\CData|null rd_kafka_consumer_group_metadata_t*
            +
            + +

            rd_kafka_event_DeleteRecords_result()

            +
            public static rd_kafka_event_DeleteRecords_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Event types: RD_KAFKA_EVENT_DELETERECORDS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DeleteRecords_result_t* - the result of a DeleteRecords request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_DeleteGroups_result()

            +
            public static rd_kafka_event_DeleteGroups_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get DeleteGroups result.

            + +

            Event types: RD_KAFKA_EVENT_DELETEGROUPS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DeleteGroups_result_t* - the result of a DeleteGroups request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_DeleteConsumerGroupOffsets_result()

            +
            public static rd_kafka_event_DeleteConsumerGroupOffsets_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get DeleteConsumerGroupOffsets result.

            + +

            Event types: RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DeleteConsumerGroupOffsets_result_t* - the result of a DeleteConsumerGroupOffsets request, or NULL if event is of different type.
            +
            + +

            rd_kafka_group_result_error()

            +
            public static rd_kafka_group_result_error ( 
            +    \FFI\CData|null $groupres
            + ): \FFI\CData|null
            +
            +

            Group result provides per-group operation result information.

            + +
            Remarks
            lifetime of the returned error is the same as the groupres.
            + +
            +
            Parameters
            +
            groupres \FFI\CData|null const rd_kafka_group_result_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_error_t* - the error for the given group result, or NULL on success.
            +
            + +

            rd_kafka_group_result_name()

            +
            public static rd_kafka_group_result_name ( 
            +    \FFI\CData|null $groupres
            + ): string|null
            +
            +
            Remarks
            lifetime of the returned string is the same as the groupres.
            + +
            +
            Parameters
            +
            groupres \FFI\CData|null const rd_kafka_group_result_t* - )
            +
            Returns
            +
            string|null const char* - the name of the group for the given group result.
            +
            + +

            rd_kafka_group_result_partitions()

            +
            public static rd_kafka_group_result_partitions ( 
            +    \FFI\CData|null $groupres
            + ): \FFI\CData|null
            +
            +
            Remarks
            lifetime of the returned list is the same as the groupres.
            + +
            +
            Parameters
            +
            groupres \FFI\CData|null const rd_kafka_group_result_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_topic_partition_list_t* - the partitions/offsets for the given group result, if applicable to the request type, else NULL.
            +
            + +

            rd_kafka_DeleteRecords_new()

            +
            public static rd_kafka_DeleteRecords_new ( 
            +    \FFI\CData|null $before_offsets
            + ): \FFI\CData|null
            +
            +

            Create a new DeleteRecords object. This object is later passed to rd_kafka_DeleteRecords().

            +

            before_offsets must contain topic, partition, and offset is the offset before which the messages will be deleted (exclusive). Set offset to RD_KAFKA_OFFSET_END (high-watermark) in order to delete all data in the partition.

            + +
            +
            Parameters
            +
            before_offsets \FFI\CData|null const rd_kafka_topic_partition_list_t* - ) - For each partition delete all messages up to but not including the specified offset.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_DeleteRecords_t* - a new allocated DeleteRecords object. Use rd_kafka_DeleteRecords_destroy() to free object when done.
            +
            + +

            rd_kafka_DeleteRecords_destroy()

            +
            public static rd_kafka_DeleteRecords_destroy ( 
            +    \FFI\CData|null $del_records
            + ): void
            +
            +
            +
            Parameters
            +
            del_records \FFI\CData|null rd_kafka_DeleteRecords_t*
            +
            +

            rd_kafka_DeleteRecords_destroy_array()

            +
            public static rd_kafka_DeleteRecords_destroy_array ( 
            +    \FFI\CData|null $del_records, 
            +    int|null $del_record_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            del_records \FFI\CData|null rd_kafka_DeleteRecords_t**
            +
            del_record_cnt int|null size_t
            +
            +

            rd_kafka_DeleteRecords()

            +
            public static rd_kafka_DeleteRecords ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $del_records, 
            +    int|null $del_record_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Delete records (messages) in topic partitions older than the offsets provided.

            + +

            Supported admin options:

            +
              +
            • rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds. Controls how long the brokers will wait for records to be deleted.
            • +
            • rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms. Controls how long rdkafka will wait for the request to complete.
            • +
            +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETERECORDS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            del_records \FFI\CData|null rd_kafka_DeleteRecords_t** - The offsets to delete (up to). Currently only one DeleteRecords_t (but containing multiple offsets) is supported.
            +
            del_record_cnt int|null size_t - The number of elements in del_records, must be 1.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_DeleteRecords_result_offsets()

            +
            public static rd_kafka_DeleteRecords_result_offsets ( 
            +    \FFI\CData|null $result
            + ): \FFI\CData|null
            +
            +

            Get a list of topic and partition results from a DeleteRecords result. The returned objects will contain topic, partition, offset and err. offset will be set to the post-deletion low-watermark (smallest available offset of all live replicas). err will be set per-partition if deletion failed.

            +

            The returned object's life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_DeleteRecords_result_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_topic_partition_list_t*
            +
            + +

            rd_kafka_DeleteGroup_new()

            +
            public static rd_kafka_DeleteGroup_new ( 
            +    string|null $group
            + ): \FFI\CData|null
            +
            +

            Create a new DeleteGroup object. This object is later passed to rd_kafka_DeleteGroups().

            + +
            +
            Parameters
            +
            group string|null const char* - ) - Name of group to delete.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_DeleteGroup_t* - a new allocated DeleteGroup object. Use rd_kafka_DeleteGroup_destroy() to free object when done.
            +
            + +

            rd_kafka_DeleteGroup_destroy()

            +
            public static rd_kafka_DeleteGroup_destroy ( 
            +    \FFI\CData|null $del_group
            + ): void
            +
            +
            +
            Parameters
            +
            del_group \FFI\CData|null rd_kafka_DeleteGroup_t*
            +
            +

            rd_kafka_DeleteGroup_destroy_array()

            +
            public static rd_kafka_DeleteGroup_destroy_array ( 
            +    \FFI\CData|null $del_groups, 
            +    int|null $del_group_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            del_groups \FFI\CData|null rd_kafka_DeleteGroup_t**
            +
            del_group_cnt int|null size_t
            +
            +

            rd_kafka_DeleteGroups()

            +
            public static rd_kafka_DeleteGroups ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $del_groups, 
            +    int|null $del_group_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Delete groups from cluster as specified by the del_groups array of size del_group_cnt elements.

            + +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETEGROUPS_RESULT
            +
            +This function in called deleteConsumerGroups in the Java client.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            del_groups \FFI\CData|null rd_kafka_DeleteGroup_t** - Array of groups to delete.
            +
            del_group_cnt int|null size_t - Number of elements in del_groups array.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_DeleteGroups_result_groups()

            +
            public static rd_kafka_DeleteGroups_result_groups ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of group results from a DeleteGroups result.

            +

            The returned groups life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_DeleteGroups_result_t* - Result to get group results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_group_result_t**
            +
            + +

            rd_kafka_DeleteConsumerGroupOffsets_new()

            +
            public static rd_kafka_DeleteConsumerGroupOffsets_new ( 
            +    string|null $group, 
            +    \FFI\CData|null $partitions
            + ): \FFI\CData|null
            +
            +

            Create a new DeleteConsumerGroupOffsets object. This object is later passed to rd_kafka_DeleteConsumerGroupOffsets().

            + +
            +
            Parameters
            +
            group string|null const char* - Consumer group id.
            +
            partitions \FFI\CData|null const rd_kafka_topic_partition_list_t* - Partitions to delete committed offsets for. Only the topic and partition fields are used.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t* - a new allocated DeleteConsumerGroupOffsets object. Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free object when done.
            +
            + +

            rd_kafka_DeleteConsumerGroupOffsets_destroy()

            +
            public static rd_kafka_DeleteConsumerGroupOffsets_destroy ( 
            +    \FFI\CData|null $del_grpoffsets
            + ): void
            +
            +
            +
            Parameters
            +
            del_grpoffsets \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t*
            +
            +

            rd_kafka_DeleteConsumerGroupOffsets_destroy_array()

            +
            public static rd_kafka_DeleteConsumerGroupOffsets_destroy_array ( 
            +    \FFI\CData|null $del_grpoffsets, 
            +    int|null $del_grpoffset_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            del_grpoffsets \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t**
            +
            del_grpoffset_cnt int|null size_t
            +
            +

            rd_kafka_DeleteConsumerGroupOffsets()

            +
            public static rd_kafka_DeleteConsumerGroupOffsets ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $del_grpoffsets, 
            +    int|null $del_grpoffsets_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Delete committed offsets for a set of partitions in a consumer group. This will succeed at the partition level only if the group is not actively subscribed to the corresponding topic.

            + +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
            +
            +The current implementation only supports one group per invocation.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            del_grpoffsets \FFI\CData|null rd_kafka_DeleteConsumerGroupOffsets_t** - Array of group committed offsets to delete. MUST only be one single element.
            +
            del_grpoffsets_cnt int|null size_t - Number of elements in del_grpoffsets array. MUST always be 1.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_DeleteConsumerGroupOffsets_result_groups()

            +
            public static rd_kafka_DeleteConsumerGroupOffsets_result_groups ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of results from a DeleteConsumerGroupOffsets result.

            +

            The returned groups life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_DeleteConsumerGroupOffsets_result_t* - Result to get group results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_group_result_t**
            +
            + +

            rd_kafka_mock_clear_request_errors()

            +
            public static rd_kafka_mock_clear_request_errors ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $ApiKey
            + ): void
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            ApiKey int|null int16_t
            +
            +

            rd_kafka_mock_push_request_errors_array()

            +
            public static rd_kafka_mock_push_request_errors_array ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $ApiKey, 
            +    int|null $cnt, 
            +    \FFI\CData|null $errors
            + ): void
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            ApiKey int|null int16_t
            +
            cnt int|null size_t
            +
            errors \FFI\CData|null const rd_kafka_resp_err_t*
            +
            +

            rd_kafka_interceptor_f_on_response_received_t()

            +
            public static rd_kafka_interceptor_f_on_response_received_t ( 
            +    \FFI\CData|null $rk, 
            +    int|null $sockfd, 
            +    string|null $brokername, 
            +    int|null $brokerid, 
            +    int|null $ApiKey, 
            +    int|null $ApiVersion, 
            +    int|null $CorrId, 
            +    int|null $size, 
            +    int|null $rtt, 
            +    int $err, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_response_received() is called when a protocol response has been fully received from a broker TCP connection socket but before the response payload is parsed.

            + +
            Warning
            The on_response_received() interceptor is called from internal librdkafka broker threads. An on_response_received() interceptor MUST NOT call any librdkafka API's associated with the rk, or perform any blocking or prolonged work.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            sockfd int|null int - Socket file descriptor (always -1).
            +
            brokername string|null const char* - Broker response was received from, possibly empty string on error.
            +
            brokerid int|null int32_t - Broker response was received from.
            +
            ApiKey int|null int16_t - Kafka protocol request type or -1 on error.
            +
            ApiVersion int|null int16_t - Kafka protocol request type version or -1 on error.
            +
            CorrId int|null int32_t - Kafka protocol request correlation id, possibly -1 on error.
            +
            size int|null size_t - Size of response, possibly 0 on error.
            +
            rtt int|null int64_t - Request round-trip-time in microseconds, possibly -1 on error.
            +
            err int rd_kafka_resp_err_t - Receive error.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_add_on_response_received()

            +
            public static rd_kafka_interceptor_add_on_response_received ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_response_received, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_response_received() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_response_received \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t*)(rd_kafka_t*, int, const char*, int32_t, int16_t, int16_t, int32_t, size_t, int64_t, rd_kafka_resp_err_t, void*)
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_conf_set_engine_callback_data()

            +
            public static rd_kafka_conf_set_engine_callback_data ( 
            +    \FFI\CData|null $conf, 
            +    \FFI\CData|object|string|null $callback_data
            + ): void
            +
            +

            Set callback_data for OpenSSL engine.

            + +
            Remarks
            The ssl.engine.location configuration must be set for this to have affect.
            +
            +The memory pointed to by value must remain valid for the lifetime of the configuration object and any Kafka clients that use it.
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t* - Configuration object.
            +
            callback_data \FFI\CData|object|string|null void* - passed to engine callbacks, e.g. ENGINE_load_ssl_client_cert.
            +
            + +

            rd_kafka_mem_calloc()

            +
            public static rd_kafka_mem_calloc ( 
            +    \FFI\CData|null $rk, 
            +    int|null $num, 
            +    int|null $size
            + ): \FFI\CData|object|string|null
            +
            +

            Allocate and zero memory using the same allocator librdkafka uses.

            +

            This is typically an abstraction for the calloc(3) call and makes sure the application can use the same memory allocator as librdkafka for allocating pointers that are used by librdkafka.

            +

            rk can be set to return memory allocated by a specific rk instance otherwise pass NULL for rk.

            +
            Remarks
            Memory allocated by rd_kafka_mem_calloc() must be freed using rd_kafka_mem_free()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            num int|null size_t
            +
            size int|null size_t
            +
            Returns
            +
            \FFI\CData|object|string|null void*
            +
            + +

            rd_kafka_mem_malloc()

            +
            public static rd_kafka_mem_malloc ( 
            +    \FFI\CData|null $rk, 
            +    int|null $size
            + ): \FFI\CData|object|string|null
            +
            +

            Allocate memory using the same allocator librdkafka uses.

            +

            This is typically an abstraction for the malloc(3) call and makes sure the application can use the same memory allocator as librdkafka for allocating pointers that are used by librdkafka.

            +

            rk can be set to return memory allocated by a specific rk instance otherwise pass NULL for rk.

            +
            Remarks
            Memory allocated by rd_kafka_mem_malloc() must be freed using rd_kafka_mem_free()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            size int|null size_t
            +
            Returns
            +
            \FFI\CData|object|string|null void*
            +
            + +

            rd_kafka_mock_broker_push_request_error_rtts()

            +
            public static rd_kafka_mock_broker_push_request_error_rtts ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $broker_id, 
            +    int|null $ApiKey, 
            +    int|null $cnt, 
            +    mixed $args
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            broker_id int|null int32_t
            +
            ApiKey int|null int16_t
            +
            cnt int|null size_t
            +
            args mixed
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_conf_enable_sasl_queue()

            +
            public static rd_kafka_conf_enable_sasl_queue ( 
            +    \FFI\CData|null $conf, 
            +    int|null $enable
            + ): void
            +
            +

            Enable/disable creation of a queue specific to SASL events and callbacks.

            +

            For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this configuration API allows an application to get a dedicated queue for the SASL events/callbacks. After enabling the queue with this API the application can retrieve the queue by calling rd_kafka_queue_get_sasl() on the client instance. This queue may then be served directly by the application (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as the background queue.

            +

            A convenience function is available to automatically forward the SASL queue to librdkafka's background thread, see rd_kafka_sasl_background_callbacks_enable().

            +

            By default (enable = 0) the main queue (as served by rd_kafka_poll(), et.al.) is used for SASL callbacks.

            +
            Remarks
            The SASL queue is currently only used by the SASL OAUTHBEARER mechanism's token_refresh_cb().
            +
            See also
            rd_kafka_queue_get_sasl()
            +
            +rd_kafka_sasl_background_callbacks_enable()
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t*
            +
            enable int|null int
            +
            + +

            rd_kafka_queue_get_sasl()

            +
            public static rd_kafka_queue_get_sasl ( 
            +    \FFI\CData|null $rk
            + ): \FFI\CData|null
            +
            +

            Use rd_kafka_queue_destroy() to loose the reference.

            +
            See also
            rd_kafka_sasl_background_callbacks_enable()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            \FFI\CData|null rd_kafka_queue_t* - a reference to the SASL callback queue, if a SASL mechanism with callbacks is configured (currently only OAUTHBEARER), else returns NULL.
            +
            + +

            rd_kafka_sasl_background_callbacks_enable()

            +
            public static rd_kafka_sasl_background_callbacks_enable ( 
            +    \FFI\CData|null $rk
            + ): \FFI\CData|null
            +
            +

            Enable SASL OAUTHBEARER refresh callbacks on the librdkafka background thread.

            +

            This serves as an alternative for applications that do not call rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means of automatically trigger the refresh callbacks, which are needed to initiate connections to the brokers in the case a custom OAUTHBEARER refresh callback is configured.

            + +
            See also
            rd_kafka_queue_get_sasl()
            +
            +rd_kafka_conf_set_oauthbearer_token_refresh_cb()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on error.
            +
            + +

            rd_kafka_consumer_close_queue()

            +
            public static rd_kafka_consumer_close_queue ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $rkqu
            + ): \FFI\CData|null
            +
            +

            Asynchronously close the consumer.

            +

            Performs the same actions as rd_kafka_consumer_close() but in a background thread.

            +

            Rebalance events/callbacks (etc) will be forwarded to the application-provided rkqu. The application must poll/serve this queue until rd_kafka_consumer_closed() returns true.

            +
            Remarks
            Depending on consumer group join state there may or may not be rebalance events emitted on rkqu.
            + +
            See also
            rd_kafka_consumer_closed()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            rkqu \FFI\CData|null rd_kafka_queue_t*
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - an error object if the consumer close failed, else NULL.
            +
            + +

            rd_kafka_consumer_closed()

            +
            public static rd_kafka_consumer_closed ( 
            +    \FFI\CData|null $rk
            + ): int|null
            +
            +

            Should be used in conjunction with rd_kafka_consumer_close_queue() to know when the consumer has been closed.

            +
            See also
            rd_kafka_consumer_close_queue()
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - )
            +
            Returns
            +
            int|null int - 1 if the consumer is closed, else 0.
            +
            + +

            rd_kafka_event_CreateAcls_result()

            +
            public static rd_kafka_event_CreateAcls_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Event types: RD_KAFKA_EVENT_CREATEACLS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_CreateAcls_result_t* - the result of a CreateAcls request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_DescribeAcls_result()

            +
            public static rd_kafka_event_DescribeAcls_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Event types: RD_KAFKA_EVENT_DESCRIBEACLS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DescribeAcls_result_t* - the result of a DescribeAcls request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_DeleteAcls_result()

            +
            public static rd_kafka_event_DeleteAcls_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Event types: RD_KAFKA_EVENT_DELETEACLS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DeleteAcls_result_t* - the result of a DeleteAcls request, or NULL if event is of different type.
            +
            + +

            rd_kafka_ResourcePatternType_name()

            +
            public static rd_kafka_ResourcePatternType_name ( 
            +    int $resource_pattern_type
            + ): string|null
            +
            +
            +
            Parameters
            +
            resource_pattern_type int rd_kafka_ResourcePatternType_t - )
            +
            Returns
            +
            string|null const char* - a string representation of the resource_pattern_type
            +
            + +

            rd_kafka_acl_result_error()

            +
            public static rd_kafka_acl_result_error ( 
            +    \FFI\CData|null $aclres
            + ): \FFI\CData|null
            +
            +
            +
            Parameters
            +
            aclres \FFI\CData|null const rd_kafka_acl_result_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_error_t* - the error object for the given acl result, or NULL on success.
            +
            + +

            rd_kafka_AclOperation_name()

            +
            public static rd_kafka_AclOperation_name ( 
            +    int $acl_operation
            + ): string|null
            +
            +
            +
            Parameters
            +
            acl_operation int rd_kafka_AclOperation_t - )
            +
            Returns
            +
            string|null const char* - a string representation of the acl_operation
            +
            + +

            rd_kafka_AclPermissionType_name()

            +
            public static rd_kafka_AclPermissionType_name ( 
            +    int $acl_permission_type
            + ): string|null
            +
            +
            +
            Parameters
            +
            acl_permission_type int rd_kafka_AclPermissionType_t - )
            +
            Returns
            +
            string|null const char* - a string representation of the acl_permission_type
            +
            + +

            rd_kafka_AclBinding_new()

            +
            public static rd_kafka_AclBinding_new ( 
            +    int $restype, 
            +    string|null $name, 
            +    int $resource_pattern_type, 
            +    string|null $principal, 
            +    string|null $host, 
            +    int $operation, 
            +    int $permission_type, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): \FFI\CData|null
            +
            +

            Create a new AclBinding object. This object is later passed to rd_kafka_CreateAcls().

            + +
            +
            Parameters
            +
            restype int rd_kafka_ResourceType_t - The ResourceType.
            +
            name string|null const char* - The resource name.
            +
            resource_pattern_type int rd_kafka_ResourcePatternType_t - The pattern type.
            +
            principal string|null const char* - A principal, following the kafka specification.
            +
            host string|null const char* - An hostname or ip.
            +
            operation int rd_kafka_AclOperation_t - A Kafka operation.
            +
            permission_type int rd_kafka_AclPermissionType_t - A Kafka permission type.
            +
            errstr \FFI\CData|null char* - An error string for returning errors or NULL to not use it.
            +
            errstr_size int|null size_t - The errstr size or 0 to not use it.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_AclBinding_t* - a new allocated AclBinding object, or NULL if the input parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when done.
            +
            + +

            rd_kafka_AclBindingFilter_new()

            +
            public static rd_kafka_AclBindingFilter_new ( 
            +    int $restype, 
            +    string|null $name, 
            +    int $resource_pattern_type, 
            +    string|null $principal, 
            +    string|null $host, 
            +    int $operation, 
            +    int $permission_type, 
            +    \FFI\CData|null $errstr, 
            +    int|null $errstr_size
            + ): \FFI\CData|null
            +
            +

            Create a new AclBindingFilter object. This object is later passed to rd_kafka_DescribeAcls() or rd_kafka_DeletesAcls() in order to filter the acls to retrieve or to delete. Use the same rd_kafka_AclBinding functions to query or destroy it.

            + +
            +
            Parameters
            +
            restype int rd_kafka_ResourceType_t - The ResourceType or RD_KAFKA_RESOURCE_ANY if not filtering by this field.
            +
            name string|null const char* - The resource name or NULL if not filtering by this field.
            +
            resource_pattern_type int rd_kafka_ResourcePatternType_t - The pattern type or RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field.
            +
            principal string|null const char* - A principal or NULL if not filtering by this field.
            +
            host string|null const char* - An hostname or ip or NULL if not filtering by this field.
            +
            operation int rd_kafka_AclOperation_t - A Kafka operation or RD_KAFKA_ACL_OPERATION_ANY if not filtering by this field.
            +
            permission_type int rd_kafka_AclPermissionType_t - A Kafka permission type or RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field.
            +
            errstr \FFI\CData|null char* - An error string for returning errors or NULL to not use it.
            +
            errstr_size int|null size_t - The errstr size or 0 to not use it.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_AclBindingFilter_t* - a new allocated AclBindingFilter object, or NULL if the input parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when done.
            +
            + +

            rd_kafka_AclBinding_restype()

            +
            public static rd_kafka_AclBinding_restype ( 
            +    \FFI\CData|null $acl
            + ): int
            +
            +
            +
            Parameters
            +
            acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
            +
            Returns
            +
            int rd_kafka_ResourceType_t - the resource type for the given acl binding.
            +
            + +

            rd_kafka_AclBinding_name()

            +
            public static rd_kafka_AclBinding_name ( 
            +    \FFI\CData|null $acl
            + ): string|null
            +
            +
            Remarks
            lifetime of the returned string is the same as the acl.
            + +
            +
            Parameters
            +
            acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
            +
            Returns
            +
            string|null const char* - the resource name for the given acl binding.
            +
            + +

            rd_kafka_AclBinding_principal()

            +
            public static rd_kafka_AclBinding_principal ( 
            +    \FFI\CData|null $acl
            + ): string|null
            +
            +
            Remarks
            lifetime of the returned string is the same as the acl.
            + +
            +
            Parameters
            +
            acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
            +
            Returns
            +
            string|null const char* - the principal for the given acl binding.
            +
            + +

            rd_kafka_AclBinding_host()

            +
            public static rd_kafka_AclBinding_host ( 
            +    \FFI\CData|null $acl
            + ): string|null
            +
            +
            Remarks
            lifetime of the returned string is the same as the acl.
            + +
            +
            Parameters
            +
            acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
            +
            Returns
            +
            string|null const char* - the host for the given acl binding.
            +
            + +

            rd_kafka_AclBinding_operation()

            +
            public static rd_kafka_AclBinding_operation ( 
            +    \FFI\CData|null $acl
            + ): int
            +
            +
            +
            Parameters
            +
            acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
            +
            Returns
            +
            int rd_kafka_AclOperation_t - the acl operation for the given acl binding.
            +
            + +

            rd_kafka_AclBinding_permission_type()

            +
            public static rd_kafka_AclBinding_permission_type ( 
            +    \FFI\CData|null $acl
            + ): int
            +
            +
            +
            Parameters
            +
            acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
            +
            Returns
            +
            int rd_kafka_AclPermissionType_t - the permission type for the given acl binding.
            +
            + +

            rd_kafka_AclBinding_resource_pattern_type()

            +
            public static rd_kafka_AclBinding_resource_pattern_type ( 
            +    \FFI\CData|null $acl
            + ): int
            +
            +
            +
            Parameters
            +
            acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
            +
            Returns
            +
            int rd_kafka_ResourcePatternType_t - the resource pattern type for the given acl binding.
            +
            + +

            rd_kafka_AclBinding_error()

            +
            public static rd_kafka_AclBinding_error ( 
            +    \FFI\CData|null $acl
            + ): \FFI\CData|null
            +
            +
            +
            Parameters
            +
            acl \FFI\CData|null const rd_kafka_AclBinding_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_error_t* - the error object for the given acl binding, or NULL on success.
            +
            + +

            rd_kafka_AclBinding_destroy()

            +
            public static rd_kafka_AclBinding_destroy ( 
            +    \FFI\CData|null $acl_binding
            + ): void
            +
            +
            +
            Parameters
            +
            acl_binding \FFI\CData|null rd_kafka_AclBinding_t*
            +
            +

            rd_kafka_AclBinding_destroy_array()

            +
            public static rd_kafka_AclBinding_destroy_array ( 
            +    \FFI\CData|null $acl_bindings, 
            +    int|null $acl_bindings_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            acl_bindings \FFI\CData|null rd_kafka_AclBinding_t**
            +
            acl_bindings_cnt int|null size_t
            +
            +

            rd_kafka_CreateAcls_result_acls()

            +
            public static rd_kafka_CreateAcls_result_acls ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of acl results from a CreateAcls result.

            +

            The returned acl result life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_CreateAcls_result_t* - CreateAcls result to get acl results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_acl_result_t**
            +
            + +

            rd_kafka_CreateAcls()

            +
            public static rd_kafka_CreateAcls ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $new_acls, 
            +    int|null $new_acls_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Create acls as specified by the new_acls array of size new_topic_cnt elements.

            + +

            Supported admin options:

            +
              +
            • rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
            • +
            +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_CREATEACLS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            new_acls \FFI\CData|null rd_kafka_AclBinding_t** - Array of new acls to create.
            +
            new_acls_cnt int|null size_t - Number of elements in new_acls array.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_DescribeAcls_result_acls()

            +
            public static rd_kafka_DescribeAcls_result_acls ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of resource results from a DescribeAcls result.

            +

            DescribeAcls - describe access control lists.

            +

            The returned resources life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_DescribeAcls_result_t* - DescribeAcls result to get acls from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_AclBinding_t**
            +
            + +

            rd_kafka_DescribeAcls()

            +
            public static rd_kafka_DescribeAcls ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $acl_filter, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Describe acls matching the filter provided in acl_filter.

            + +

            Supported admin options:

            +
              +
            • rd_kafka_AdminOptions_set_operation_timeout() - default 0
            • +
            +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            acl_filter \FFI\CData|null rd_kafka_AclBindingFilter_t* - Filter for the returned acls.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_DeleteAcls_result_responses()

            +
            public static rd_kafka_DeleteAcls_result_responses ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of DeleteAcls result responses from a DeleteAcls result.

            +

            The returned responses life-time is the same as the result object.

            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_DeleteAcls_result_t* - DeleteAcls result to get responses from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DeleteAcls_result_response_t**
            +
            + +

            rd_kafka_DeleteAcls_result_response_error()

            +
            public static rd_kafka_DeleteAcls_result_response_error ( 
            +    \FFI\CData|null $result_response
            + ): \FFI\CData|null
            +
            +
            +
            Parameters
            +
            result_response \FFI\CData|null const rd_kafka_DeleteAcls_result_response_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_error_t* - the error object for the given DeleteAcls result response, or NULL on success.
            +
            + +

            rd_kafka_DeleteAcls_result_response_matching_acls()

            +
            public static rd_kafka_DeleteAcls_result_response_matching_acls ( 
            +    \FFI\CData|null $result_response, 
            +    \FFI\CData|null $matching_acls_cntp
            + ): \FFI\CData|null
            +
            +
            Remarks
            lifetime of the returned acl bindings is the same as the result_response.
            + +
            +
            Parameters
            +
            result_response \FFI\CData|null const rd_kafka_DeleteAcls_result_response_t*
            +
            matching_acls_cntp \FFI\CData|null size_t*
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_AclBinding_t** - the matching acls array for the given DeleteAcls result response.
            +
            + +

            rd_kafka_DeleteAcls()

            +
            public static rd_kafka_DeleteAcls ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $del_acls, 
            +    int|null $del_acls_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Delete acls matching the filteres provided in del_acls array of size del_acls_cnt.

            + +

            Supported admin options:

            +
              +
            • rd_kafka_AdminOptions_set_operation_timeout() - default 0
            • +
            +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DELETEACLS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            del_acls \FFI\CData|null rd_kafka_AclBindingFilter_t** - Filters for the acls to delete.
            +
            del_acls_cnt int|null size_t - Number of elements in del_acls array.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_conf_set_resolve_cb()

            +
            public static rd_kafka_conf_set_resolve_cb ( 
            +    \FFI\CData|null $conf, 
            +    \FFI\CData|\Closure $resolve_cb
            + ): void
            +
            +

            Set address resolution callback.

            +

            The callback is responsible for resolving the hostname node and the service service into a list of socket addresses as getaddrinfo(3) would. The hints and res parameters function as they do for getaddrinfo(3). The callback's opaque argument is the opaque set with rd_kafka_conf_set_opaque().

            +

            If the callback is invoked with a NULL node, service, and hints, the callback should instead free the addrinfo struct specified in res. In this case the callback must succeed; the return value will not be checked by the caller.

            +

            The callback's return value is interpreted as the return value of getaddrinfo(3).

            +
            Remarks
            The callback will be called from an internal librdkafka thread.
            + +
            +
            Parameters
            +
            conf \FFI\CData|null rd_kafka_conf_t*
            +
            resolve_cb \FFI\CData|\Closure int()(const char, const char*, const struct addrinfo*, struct addrinfo**, void*)
            +
            + +

            rd_kafka_sasl_set_credentials()

            +
            public static rd_kafka_sasl_set_credentials ( 
            +    \FFI\CData|null $rk, 
            +    string|null $username, 
            +    string|null $password
            + ): \FFI\CData|null
            +
            +

            Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by this Kafka client.

            +

            This function sets or resets the SASL username and password credentials used by this Kafka client. The new credentials will be used the next time this client needs to authenticate to a broker. This function will not disconnect existing connections that might have been made using the old credentials.

            +
            Remarks
            This function only applies to the SASL PLAIN and SCRAM mechanisms.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t*
            +
            username string|null const char*
            +
            password string|null const char*
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on error.
            +
            + +

            rd_kafka_Node_id()

            +
            public static rd_kafka_Node_id ( 
            +    \FFI\CData|null $node
            + ): int|null
            +
            +

            Get the id of node.

            + +
            +
            Parameters
            +
            node \FFI\CData|null const rd_kafka_Node_t* - ) - The Node instance.
            +
            Returns
            +
            int|null int - The node id.
            +
            + +

            rd_kafka_Node_host()

            +
            public static rd_kafka_Node_host ( 
            +    \FFI\CData|null $node
            + ): string|null
            +
            +

            Get the host of node.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the node object.
            + +
            +
            Parameters
            +
            node \FFI\CData|null const rd_kafka_Node_t* - ) - The Node instance.
            +
            Returns
            +
            string|null const char* - The node host.
            +
            + +

            rd_kafka_Node_port()

            +
            public static rd_kafka_Node_port ( 
            +    \FFI\CData|null $node
            + ): int|null
            +
            +

            Get the port of node.

            + +
            +
            Parameters
            +
            node \FFI\CData|null const rd_kafka_Node_t* - ) - The Node instance.
            +
            Returns
            +
            int|null uint16_t - The node port.
            +
            + +

            rd_kafka_consumer_group_state_name()

            +
            public static rd_kafka_consumer_group_state_name ( 
            +    int $state
            + ): string|null
            +
            +

            Returns a name for a state code.

            + +
            +
            Parameters
            +
            state int rd_kafka_consumer_group_state_t - ) - The state value.
            +
            Returns
            +
            string|null const char* - The group state name corresponding to the provided group state value.
            +
            + +

            rd_kafka_consumer_group_state_code()

            +
            public static rd_kafka_consumer_group_state_code ( 
            +    string|null $name
            + ): int
            +
            +

            Returns a code for a state name.

            + +
            +
            Parameters
            +
            name string|null const char* - ) - The state name.
            +
            Returns
            +
            int rd_kafka_consumer_group_state_t - The group state value corresponding to the provided group state name.
            +
            + +

            rd_kafka_event_ListConsumerGroups_result()

            +
            public static rd_kafka_event_ListConsumerGroups_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get ListConsumerGroups result.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the rkev object.
            +

            Event types: RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_ListConsumerGroups_result_t* - the result of a ListConsumerGroups request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_DescribeConsumerGroups_result()

            +
            public static rd_kafka_event_DescribeConsumerGroups_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get DescribeConsumerGroups result.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the rkev object.
            +

            Event types: RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DescribeConsumerGroups_result_t* - the result of a DescribeConsumerGroups request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_AlterConsumerGroupOffsets_result()

            +
            public static rd_kafka_event_AlterConsumerGroupOffsets_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get AlterConsumerGroupOffsets result.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the rkev object.
            +

            Event types: RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_AlterConsumerGroupOffsets_result_t* - the result of a AlterConsumerGroupOffsets request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_ListConsumerGroupOffsets_result()

            +
            public static rd_kafka_event_ListConsumerGroupOffsets_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get ListConsumerGroupOffsets result.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the rkev object.
            +

            Event types: RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_ListConsumerGroupOffsets_result_t* - the result of a ListConsumerGroupOffsets request, or NULL if event is of different type.
            +
            + +

            rd_kafka_interceptor_f_on_broker_state_change_t()

            +
            public static rd_kafka_interceptor_f_on_broker_state_change_t ( 
            +    \FFI\CData|null $rk, 
            +    int|null $broker_id, 
            +    string|null $secproto, 
            +    string|null $name, 
            +    int|null $port, 
            +    string|null $state, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            on_broker_state_change() is called just after a broker has been created or its state has been changed.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - The client instance.
            +
            broker_id int|null int32_t - The broker id (-1 is used for bootstrap brokers).
            +
            secproto string|null const char* - The security protocol.
            +
            name string|null const char* - The original name of the broker.
            +
            port int|null int - The port of the broker.
            +
            state string|null const char* - Broker state name.
            +
            ic_opaque \FFI\CData|object|string|null void* - The interceptor’s opaque pointer specified in ..add..().
            +
            Returns
            +
            int rd_kafka_resp_err_t - an error code on failure, the error is logged but otherwise ignored.
            +
            + +

            rd_kafka_interceptor_add_on_broker_state_change()

            +
            public static rd_kafka_interceptor_add_on_broker_state_change ( 
            +    \FFI\CData|null $rk, 
            +    string|null $ic_name, 
            +    \FFI\CData|\Closure $on_broker_state_change, 
            +    \FFI\CData|object|string|null $ic_opaque
            + ): int
            +
            +

            Append an on_broker_state_change() interceptor.

            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            ic_name string|null const char* - Interceptor name, used in logging.
            +
            on_broker_state_change \FFI\CData|\Closure rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_broker_state_change_t*)(rd_kafka_t*, int32_t, const char*, const char*, int, const char*, void*)
            +
            ic_opaque \FFI\CData|object|string|null void* - Opaque value that will be passed to the function.
            +
            Returns
            +
            int rd_kafka_resp_err_t - RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT if an existing interceptor with the same ic_name and function has already been added to conf.
            +
            + +

            rd_kafka_AdminOptions_set_require_stable_offsets()

            +
            public static rd_kafka_AdminOptions_set_require_stable_offsets ( 
            +    \FFI\CData|null $options, 
            +    int|null $true_or_false
            + ): \FFI\CData|null
            +
            +

            Whether broker should return stable offsets (transaction-committed).

            + +
            Remarks
            This option is valid for ListConsumerGroupOffsets.
            + +
            +
            Parameters
            +
            options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
            +
            true_or_false int|null int - Defaults to false.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success, a new error instance that must be released with rd_kafka_error_destroy() in case of error.
            +
            + +

            rd_kafka_AdminOptions_set_match_consumer_group_states()

            +
            public static rd_kafka_AdminOptions_set_match_consumer_group_states ( 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $consumer_group_states, 
            +    int|null $consumer_group_states_cnt
            + ): \FFI\CData|null
            +
            +

            Set consumer groups states to query for.

            + +
            Remarks
            This option is valid for ListConsumerGroups.
            + +
            +
            Parameters
            +
            options \FFI\CData|null rd_kafka_AdminOptions_t* - Admin options.
            +
            consumer_group_states \FFI\CData|null const rd_kafka_consumer_group_state_t* - Array of consumer group states.
            +
            consumer_group_states_cnt int|null size_t - Size of the consumer_group_states array.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success, a new error instance that must be released with rd_kafka_error_destroy() in case of error.
            +
            + +

            rd_kafka_ListConsumerGroups()

            +
            public static rd_kafka_ListConsumerGroups ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            List the consumer groups available in the cluster.

            + +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_ConsumerGroupListing_group_id()

            +
            public static rd_kafka_ConsumerGroupListing_group_id ( 
            +    \FFI\CData|null $grplist
            + ): string|null
            +
            +

            Gets the group id for the grplist group.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the grplist object.
            + +
            +
            Parameters
            +
            grplist \FFI\CData|null const rd_kafka_ConsumerGroupListing_t* - ) - The group listing.
            +
            Returns
            +
            string|null const char* - The group id.
            +
            + +

            rd_kafka_ConsumerGroupListing_is_simple_consumer_group()

            +
            public static rd_kafka_ConsumerGroupListing_is_simple_consumer_group ( 
            +    \FFI\CData|null $grplist
            + ): int|null
            +
            +

            Is the grplist group a simple consumer group.

            + +
            +
            Parameters
            +
            grplist \FFI\CData|null const rd_kafka_ConsumerGroupListing_t* - ) - The group listing.
            +
            Returns
            +
            int|null int - 1 if the group is a simple consumer group, else 0.
            +
            + +

            rd_kafka_ConsumerGroupListing_state()

            +
            public static rd_kafka_ConsumerGroupListing_state ( 
            +    \FFI\CData|null $grplist
            + ): int
            +
            +

            Gets state for the grplist group.

            + +
            +
            Parameters
            +
            grplist \FFI\CData|null const rd_kafka_ConsumerGroupListing_t* - ) - The group listing.
            +
            Returns
            +
            int rd_kafka_consumer_group_state_t - A group state.
            +
            + +

            rd_kafka_ListConsumerGroups_result_valid()

            +
            public static rd_kafka_ListConsumerGroups_result_valid ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of valid list groups from a ListConsumerGroups result.

            +

            The returned groups life-time is the same as the result object.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the result object.
            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_ListConsumerGroups_result_t* - Result to get group results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_ConsumerGroupListing_t**
            +
            + +

            rd_kafka_ListConsumerGroups_result_errors()

            +
            public static rd_kafka_ListConsumerGroups_result_errors ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of errors from a ListConsumerGroups call result.

            +

            The returned errors life-time is the same as the result object.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the result object.
            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_ListConsumerGroups_result_t* - ListConsumerGroups result.
            +
            cntp \FFI\CData|null size_t* - Is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_error_t** - Array of errors in result.
            +
            + +

            rd_kafka_DescribeConsumerGroups()

            +
            public static rd_kafka_DescribeConsumerGroups ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $groups, 
            +    int|null $groups_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Describe groups from cluster as specified by the groups array of size groups_cnt elements.

            + +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            groups \FFI\CData|null const char** - Array of groups to describe.
            +
            groups_cnt int|null size_t - Number of elements in groups array.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_DescribeConsumerGroups_result_groups()

            +
            public static rd_kafka_DescribeConsumerGroups_result_groups ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of group results from a DescribeConsumerGroups result.

            +

            The returned groups life-time is the same as the result object.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the result object.
            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_DescribeConsumerGroups_result_t* - Result to get group results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t**
            +
            + +

            rd_kafka_ConsumerGroupDescription_group_id()

            +
            public static rd_kafka_ConsumerGroupDescription_group_id ( 
            +    \FFI\CData|null $grpdesc
            + ): string|null
            +
            +

            Gets the group id for the grpdesc group.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
            + +
            +
            Parameters
            +
            grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
            +
            Returns
            +
            string|null const char* - The group id.
            +
            + +

            rd_kafka_ConsumerGroupDescription_error()

            +
            public static rd_kafka_ConsumerGroupDescription_error ( 
            +    \FFI\CData|null $grpdesc
            + ): \FFI\CData|null
            +
            +

            Gets the error for the grpdesc group.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
            + +
            +
            Parameters
            +
            grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_error_t* - The group description error.
            +
            + +

            rd_kafka_ConsumerGroupDescription_is_simple_consumer_group()

            +
            public static rd_kafka_ConsumerGroupDescription_is_simple_consumer_group ( 
            +    \FFI\CData|null $grpdesc
            + ): int|null
            +
            +

            Is the grpdesc group a simple consumer group.

            + +
            +
            Parameters
            +
            grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
            +
            Returns
            +
            int|null int - 1 if the group is a simple consumer group, else 0.
            +
            + +

            rd_kafka_ConsumerGroupDescription_partition_assignor()

            +
            public static rd_kafka_ConsumerGroupDescription_partition_assignor ( 
            +    \FFI\CData|null $grpdesc
            + ): string|null
            +
            +

            Gets the partition assignor for the grpdesc group.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
            + +
            +
            Parameters
            +
            grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
            +
            Returns
            +
            string|null const char* - The partition assignor.
            +
            + +

            rd_kafka_ConsumerGroupDescription_state()

            +
            public static rd_kafka_ConsumerGroupDescription_state ( 
            +    \FFI\CData|null $grpdesc
            + ): int
            +
            +

            Gets state for the grpdesc group.

            + +
            +
            Parameters
            +
            grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
            +
            Returns
            +
            int rd_kafka_consumer_group_state_t - A group state.
            +
            + +

            rd_kafka_ConsumerGroupDescription_coordinator()

            +
            public static rd_kafka_ConsumerGroupDescription_coordinator ( 
            +    \FFI\CData|null $grpdesc
            + ): \FFI\CData|null
            +
            +

            Gets the coordinator for the grpdesc group.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
            + +
            +
            Parameters
            +
            grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_Node_t* - The group coordinator.
            +
            + +

            rd_kafka_ConsumerGroupDescription_member_count()

            +
            public static rd_kafka_ConsumerGroupDescription_member_count ( 
            +    \FFI\CData|null $grpdesc
            + ): int|null
            +
            +

            Gets the members count of grpdesc group.

            + +
            +
            Parameters
            +
            grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - ) - The group description.
            +
            Returns
            +
            int|null size_t - The member count.
            +
            + +

            rd_kafka_ConsumerGroupDescription_member()

            +
            public static rd_kafka_ConsumerGroupDescription_member ( 
            +    \FFI\CData|null $grpdesc, 
            +    int|null $idx
            + ): \FFI\CData|null
            +
            +

            Gets a member of grpdesc group.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the grpdesc object.
            + +
            +
            Parameters
            +
            grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t* - The group description.
            +
            idx int|null size_t - The member idx.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_MemberDescription_t* - A member at index idx, or NULL if idx is out of range.
            +
            + +

            rd_kafka_MemberDescription_client_id()

            +
            public static rd_kafka_MemberDescription_client_id ( 
            +    \FFI\CData|null $member
            + ): string|null
            +
            +

            Gets client id of member.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the member object.
            + +
            +
            Parameters
            +
            member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
            +
            Returns
            +
            string|null const char* - The client id.
            +
            + +

            rd_kafka_MemberDescription_group_instance_id()

            +
            public static rd_kafka_MemberDescription_group_instance_id ( 
            +    \FFI\CData|null $member
            + ): string|null
            +
            +

            Gets group instance id of member.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the member object.
            + +
            +
            Parameters
            +
            member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
            +
            Returns
            +
            string|null const char* - The group instance id, or NULL if not available.
            +
            + +

            rd_kafka_MemberDescription_consumer_id()

            +
            public static rd_kafka_MemberDescription_consumer_id ( 
            +    \FFI\CData|null $member
            + ): string|null
            +
            +

            Gets consumer id of member.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the member object.
            + +
            +
            Parameters
            +
            member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
            +
            Returns
            +
            string|null const char* - The consumer id.
            +
            + +

            rd_kafka_MemberDescription_host()

            +
            public static rd_kafka_MemberDescription_host ( 
            +    \FFI\CData|null $member
            + ): string|null
            +
            +

            Gets host of member.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the member object.
            + +
            +
            Parameters
            +
            member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
            +
            Returns
            +
            string|null const char* - The host.
            +
            + +

            rd_kafka_MemberDescription_assignment()

            +
            public static rd_kafka_MemberDescription_assignment ( 
            +    \FFI\CData|null $member
            + ): \FFI\CData|null
            +
            +

            Gets assignment of member.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the member object.
            + +
            +
            Parameters
            +
            member \FFI\CData|null const rd_kafka_MemberDescription_t* - ) - The group member.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_MemberAssignment_t* - The member assignment.
            +
            + +

            rd_kafka_MemberAssignment_partitions()

            +
            public static rd_kafka_MemberAssignment_partitions ( 
            +    \FFI\CData|null $assignment
            + ): \FFI\CData|null
            +
            +

            Gets assigned partitions of a member assignment.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the assignment object.
            + +
            +
            Parameters
            +
            assignment \FFI\CData|null const rd_kafka_MemberAssignment_t* - ) - The group member assignment.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_topic_partition_list_t* - The assigned partitions.
            +
            + +

            rd_kafka_ListConsumerGroupOffsets_new()

            +
            public static rd_kafka_ListConsumerGroupOffsets_new ( 
            +    string|null $group_id, 
            +    \FFI\CData|null $partitions
            + ): \FFI\CData|null
            +
            +

            Create a new ListConsumerGroupOffsets object. This object is later passed to rd_kafka_ListConsumerGroupOffsets().

            + +
            +
            Parameters
            +
            group_id string|null const char* - Consumer group id.
            +
            partitions \FFI\CData|null const rd_kafka_topic_partition_list_t* - Partitions to list committed offsets for. Only the topic and partition fields are used.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_ListConsumerGroupOffsets_t* - a new allocated ListConsumerGroupOffsets object. Use rd_kafka_ListConsumerGroupOffsets_destroy() to free object when done.
            +
            + +

            rd_kafka_ListConsumerGroupOffsets_destroy()

            +
            public static rd_kafka_ListConsumerGroupOffsets_destroy ( 
            +    \FFI\CData|null $list_grpoffsets
            + ): void
            +
            +
            +
            Parameters
            +
            list_grpoffsets \FFI\CData|null rd_kafka_ListConsumerGroupOffsets_t*
            +
            +

            rd_kafka_ListConsumerGroupOffsets_destroy_array()

            +
            public static rd_kafka_ListConsumerGroupOffsets_destroy_array ( 
            +    \FFI\CData|null $list_grpoffsets, 
            +    int|null $list_grpoffset_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            list_grpoffsets \FFI\CData|null rd_kafka_ListConsumerGroupOffsets_t**
            +
            list_grpoffset_cnt int|null size_t
            +
            +

            rd_kafka_ListConsumerGroupOffsets()

            +
            public static rd_kafka_ListConsumerGroupOffsets ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $list_grpoffsets, 
            +    int|null $list_grpoffsets_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            List committed offsets for a set of partitions in a consumer group.

            + +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
            +
            +The current implementation only supports one group per invocation.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            list_grpoffsets \FFI\CData|null rd_kafka_ListConsumerGroupOffsets_t** - Array of group committed offsets to list. MUST only be one single element.
            +
            list_grpoffsets_cnt int|null size_t - Number of elements in list_grpoffsets array. MUST always be 1.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_ListConsumerGroupOffsets_result_groups()

            +
            public static rd_kafka_ListConsumerGroupOffsets_result_groups ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of results from a ListConsumerGroupOffsets result.

            +

            The returned groups life-time is the same as the result object.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the result object.
            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_ListConsumerGroupOffsets_result_t* - Result to get group results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_group_result_t**
            +
            + +

            rd_kafka_AlterConsumerGroupOffsets_new()

            +
            public static rd_kafka_AlterConsumerGroupOffsets_new ( 
            +    string|null $group_id, 
            +    \FFI\CData|null $partitions
            + ): \FFI\CData|null
            +
            +

            Create a new AlterConsumerGroupOffsets object. This object is later passed to rd_kafka_AlterConsumerGroupOffsets().

            + +
            +
            Parameters
            +
            group_id string|null const char* - Consumer group id.
            +
            partitions \FFI\CData|null const rd_kafka_topic_partition_list_t* - Partitions to alter committed offsets for. Only the topic and partition fields are used.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_AlterConsumerGroupOffsets_t* - a new allocated AlterConsumerGroupOffsets object. Use rd_kafka_AlterConsumerGroupOffsets_destroy() to free object when done.
            +
            + +

            rd_kafka_AlterConsumerGroupOffsets_destroy()

            +
            public static rd_kafka_AlterConsumerGroupOffsets_destroy ( 
            +    \FFI\CData|null $alter_grpoffsets
            + ): void
            +
            +
            +
            Parameters
            +
            alter_grpoffsets \FFI\CData|null rd_kafka_AlterConsumerGroupOffsets_t*
            +
            +

            rd_kafka_AlterConsumerGroupOffsets_destroy_array()

            +
            public static rd_kafka_AlterConsumerGroupOffsets_destroy_array ( 
            +    \FFI\CData|null $alter_grpoffsets, 
            +    int|null $alter_grpoffset_cnt
            + ): void
            +
            +
            +
            Parameters
            +
            alter_grpoffsets \FFI\CData|null rd_kafka_AlterConsumerGroupOffsets_t**
            +
            alter_grpoffset_cnt int|null size_t
            +
            +

            rd_kafka_AlterConsumerGroupOffsets()

            +
            public static rd_kafka_AlterConsumerGroupOffsets ( 
            +    \FFI\CData|null $rk, 
            +    \FFI\CData|null $alter_grpoffsets, 
            +    int|null $alter_grpoffsets_cnt, 
            +    \FFI\CData|null $options, 
            +    \FFI\CData|null $rkqu
            + ): void
            +
            +

            Alter committed offsets for a set of partitions in a consumer group. This will succeed at the partition level only if the group is not actively subscribed to the corresponding topic.

            + +
            Remarks
            The result event type emitted on the supplied queue is of type RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
            +
            +The current implementation only supports one group per invocation.
            + +
            +
            Parameters
            +
            rk \FFI\CData|null rd_kafka_t* - Client instance.
            +
            alter_grpoffsets \FFI\CData|null rd_kafka_AlterConsumerGroupOffsets_t** - Array of group committed offsets to alter. MUST only be one single element.
            +
            alter_grpoffsets_cnt int|null size_t - Number of elements in alter_grpoffsets array. MUST always be 1.
            +
            options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
            +
            rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
            +
            + +

            rd_kafka_AlterConsumerGroupOffsets_result_groups()

            +
            public static rd_kafka_AlterConsumerGroupOffsets_result_groups ( 
            +    \FFI\CData|null $result, 
            +    \FFI\CData|null $cntp
            + ): \FFI\CData|null
            +
            +

            Get an array of results from a AlterConsumerGroupOffsets result.

            +

            The returned groups life-time is the same as the result object.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the result object.
            + +
            +
            Parameters
            +
            result \FFI\CData|null const rd_kafka_AlterConsumerGroupOffsets_result_t* - Result to get group results from.
            +
            cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_group_result_t**
            +
            + +

            rd_kafka_mock_broker_error_stack_cnt()

            +
            public static rd_kafka_mock_broker_error_stack_cnt ( 
            +    \FFI\CData|null $mcluster, 
            +    int|null $broker_id, 
            +    int|null $ApiKey, 
            +    \FFI\CData|null $cntp
            + ): int
            +
            +
            +
            Parameters
            +
            mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
            +
            broker_id int|null int32_t
            +
            ApiKey int|null int16_t
            +
            cntp \FFI\CData|null size_t*
            +
            Returns
            +
            int rd_kafka_resp_err_t
            +
            +

            rd_kafka_topic_partition_set_leader_epoch()

            +
            public static rd_kafka_topic_partition_set_leader_epoch ( 
            +    \FFI\CData|null $rktpar, 
            +    int|null $leader_epoch
            + ): void
            +
            +

            Sets the offset leader epoch (use -1 to clear).

            + +
            Remarks
            See KIP-320 for more information.
            + +
            +
            Parameters
            +
            rktpar \FFI\CData|null rd_kafka_topic_partition_t* - Partition object.
            +
            leader_epoch int|null int32_t - Offset leader epoch, use -1 to reset.
            +
            + +

            rd_kafka_topic_partition_get_leader_epoch()

            +
            public static rd_kafka_topic_partition_get_leader_epoch ( 
            +    \FFI\CData|null $rktpar
            + ): int|null
            +
            +
            Remarks
            See KIP-320 for more information.
            + +
            +
            Parameters
            +
            rktpar \FFI\CData|null const rd_kafka_topic_partition_t* - ) - Partition object.
            +
            Returns
            +
            int|null int32_t - the offset leader epoch, if relevant and known, else -1.
            +
            + +

            rd_kafka_message_leader_epoch()

            +
            public static rd_kafka_message_leader_epoch ( 
            +    \FFI\CData|null $rkmessage
            + ): int|null
            +
            +
            Remarks
            This API must only be used on consumed messages without error.
            +
            +Requires broker version >= 2.10 (KIP-320).
            + +
            +
            Parameters
            +
            rkmessage \FFI\CData|null const rd_kafka_message_t* - )
            +
            Returns
            +
            int|null int32_t - the message’s partition leader epoch at the time the message was fetched and if known, else -1.
            +
            + +

            rd_kafka_offset_store_message()

            +
            public static rd_kafka_offset_store_message ( 
            +    \FFI\CData|null $rkmessage
            + ): \FFI\CData|null
            +
            +

            Store offset +1 for the consumed message.

            +

            The message offset + 1 will be committed to broker according to auto.commit.interval.ms or manual offset-less commit()

            +
            Warning
            This method may only be called for partitions that are currently assigned. Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. Since v1.9.0.
            +
            +Avoid storing offsets after calling rd_kafka_seek() (et.al) as this may later interfere with resuming a paused partition, instead store offsets prior to calling seek.
            +
            Remarks
            enable.auto.offset.store must be set to "false" when using this API.
            + +
            +
            Parameters
            +
            rkmessage \FFI\CData|null rd_kafka_message_t* - )
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success or an error object on failure.
            +
            + +

            rd_kafka_event_IncrementalAlterConfigs_result()

            +
            public static rd_kafka_event_IncrementalAlterConfigs_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get IncrementalAlterConfigs result.

            + +

            Event types: RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_IncrementalAlterConfigs_result_t* - the result of a IncrementalAlterConfigs request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_DescribeUserScramCredentials_result()

            +
            public static rd_kafka_event_DescribeUserScramCredentials_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get DescribeUserScramCredentials result.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the rkev object.
            +

            Event types: RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_DescribeUserScramCredentials_result_t* - the result of a DescribeUserScramCredentials request, or NULL if event is of different type.
            +
            + +

            rd_kafka_event_AlterUserScramCredentials_result()

            +
            public static rd_kafka_event_AlterUserScramCredentials_result ( 
            +    \FFI\CData|null $rkev
            + ): \FFI\CData|null
            +
            +

            Get AlterUserScramCredentials result.

            + +
            Remarks
            The lifetime of the returned memory is the same as the lifetime of the rkev object.
            +

            Event types: RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT

            + +
            +
            Parameters
            +
            rkev \FFI\CData|null rd_kafka_event_t* - )
            +
            Returns
            +
            \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_t* - the result of a AlterUserScramCredentials request, or NULL if event is of different type.
            +
            + +

            rd_kafka_ConfigResource_add_incremental_config()

            +
            public static rd_kafka_ConfigResource_add_incremental_config ( 
            +    \FFI\CData|null $config, 
            +    string|null $name, 
            +    int $op_type, 
            +    string|null $value
            + ): \FFI\CData|null
            +
            +

            Add the value of the configuration entry for a subsequent incremental alter config operation. APPEND and SUBTRACT are possible for list-type configuration entries only.

            + +
            +
            Parameters
            +
            config \FFI\CData|null rd_kafka_ConfigResource_t* - ConfigResource to add config property to.
            +
            name string|null const char* - Configuration name, depends on resource type.
            +
            op_type int rd_kafka_AlterConfigOpType_t - Operation type, one of rd_kafka_AlterConfigOpType_t.
            +
            value string|null const char* - Configuration value, depends on resource type and name. Set to NULL, only with with op_type set to DELETE, to revert configuration value to default.
            +
            Returns
            +
            \FFI\CData|null rd_kafka_error_t* - NULL on success, or an rd_kafka_error_t * with the corresponding error code and string. Error ownership belongs to the caller. Possible error codes:
              +
              +
            • RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input.
            • + + +

              rd_kafka_IncrementalAlterConfigs()

              +
              public static rd_kafka_IncrementalAlterConfigs ( 
              +    \FFI\CData|null $rk, 
              +    \FFI\CData|null $configs, 
              +    int|null $config_cnt, 
              +    \FFI\CData|null $options, 
              +    \FFI\CData|null $rkqu
              + ): void
              +
              +

              Incrementally update the configuration for the specified resources. Updates are not transactional so they may succeed for some resources while fail for others. The configs for a particular resource are updated atomically, executing the corresponding incremental operations on the provided configurations.

              +
              Remarks
              Requires broker version >=2.3.0
              +
              +Multiple resources and resource types may be set, but at most one resource of type RD_KAFKA_RESOURCE_BROKER is allowed per call since these resource requests must be sent to the broker specified in the resource. Broker option will be ignored in this case.
              + +
              +
              Parameters
              +
              rk \FFI\CData|null rd_kafka_t* - Client instance.
              +
              configs \FFI\CData|null rd_kafka_ConfigResource_t** - Array of config entries to alter.
              +
              config_cnt int|null size_t - Number of elements in configs array.
              +
              options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
              +
              rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
              +
              + +

              rd_kafka_IncrementalAlterConfigs_result_resources()

              +
              public static rd_kafka_IncrementalAlterConfigs_result_resources ( 
              +    \FFI\CData|null $result, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +

              Get an array of resource results from a IncrementalAlterConfigs result.

              +

              Use rd_kafka_ConfigResource_error() and rd_kafka_ConfigResource_error_string() to extract per-resource error results on the returned array elements.

              +

              The returned object life-times are the same as the result object.

              + +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_IncrementalAlterConfigs_result_t* - Result object to get resource results from.
              +
              cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_ConfigResource_t** - an array of ConfigResource elements, or NULL if not available.
              +
              + +

              rd_kafka_ScramCredentialInfo_mechanism()

              +
              public static rd_kafka_ScramCredentialInfo_mechanism ( 
              +    \FFI\CData|null $scram_credential_info
              + ): int
              +
              +
              +
              Parameters
              +
              scram_credential_info \FFI\CData|null const rd_kafka_ScramCredentialInfo_t*
              +
              Returns
              +
              int rd_kafka_ScramMechanism_t
              +
              +

              rd_kafka_ScramCredentialInfo_iterations()

              +
              public static rd_kafka_ScramCredentialInfo_iterations ( 
              +    \FFI\CData|null $scram_credential_info
              + ): int|null
              +
              +
              +
              Parameters
              +
              scram_credential_info \FFI\CData|null const rd_kafka_ScramCredentialInfo_t*
              +
              Returns
              +
              int|null int32_t
              +
              +

              rd_kafka_UserScramCredentialsDescription_user()

              +
              public static rd_kafka_UserScramCredentialsDescription_user ( 
              +    \FFI\CData|null $description
              + ): string|null
              +
              +
              +
              Parameters
              +
              description \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t*
              +
              Returns
              +
              string|null const char*
              +
              +

              rd_kafka_UserScramCredentialsDescription_error()

              +
              public static rd_kafka_UserScramCredentialsDescription_error ( 
              +    \FFI\CData|null $description
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              description \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_error_t*
              +
              +

              rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count()

              +
              public static rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count ( 
              +    \FFI\CData|null $description
              + ): int|null
              +
              +
              +
              Parameters
              +
              description \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t*
              +
              Returns
              +
              int|null size_t
              +
              +

              rd_kafka_UserScramCredentialsDescription_scramcredentialinfo()

              +
              public static rd_kafka_UserScramCredentialsDescription_scramcredentialinfo ( 
              +    \FFI\CData|null $description, 
              +    int|null $idx
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              description \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t*
              +
              idx int|null size_t
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_ScramCredentialInfo_t*
              +
              +

              rd_kafka_DescribeUserScramCredentials_result_descriptions()

              +
              public static rd_kafka_DescribeUserScramCredentials_result_descriptions ( 
              +    \FFI\CData|null $result, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +

              Get an array of descriptions from a DescribeUserScramCredentials result.

              +

              The returned value life-time is the same as the result object.

              + +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_DescribeUserScramCredentials_result_t* - Result to get descriptions from.
              +
              cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_UserScramCredentialsDescription_t**
              +
              + +

              rd_kafka_DescribeUserScramCredentials()

              +
              public static rd_kafka_DescribeUserScramCredentials ( 
              +    \FFI\CData|null $rk, 
              +    \FFI\CData|null $users, 
              +    int|null $user_cnt, 
              +    \FFI\CData|null $options, 
              +    \FFI\CData|null $rkqu
              + ): void
              +
              +

              Describe SASL/SCRAM credentials. This operation is supported by brokers with version 2.7.0 or higher.

              + +
              +
              Parameters
              +
              rk \FFI\CData|null rd_kafka_t* - Client instance.
              +
              users \FFI\CData|null const char** - The users for which credentials are to be described. All users’ credentials are described if NULL.
              +
              user_cnt int|null size_t - Number of elements in users array.
              +
              options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
              +
              rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
              +
              + +

              rd_kafka_UserScramCredentialUpsertion_new()

              +
              public static rd_kafka_UserScramCredentialUpsertion_new ( 
              +    string|null $username, 
              +    int $mechanism, 
              +    int|null $iterations, 
              +    \FFI\CData|null $password, 
              +    int|null $password_size, 
              +    \FFI\CData|null $salt, 
              +    int|null $salt_size
              + ): \FFI\CData|null
              +
              +

              Allocates a new UserScramCredentialUpsertion given its fields. If salt isn't given a 64 B salt is generated using OpenSSL RAND_priv_bytes, if available.

              + +
              Remarks
              A random salt is generated, when NULL, only if OpenSSL >= 1.1.1. Otherwise it's a required param.
              + +
              +
              Parameters
              +
              username string|null const char* - The username (not empty).
              +
              mechanism int rd_kafka_ScramMechanism_t - SASL/SCRAM mechanism.
              +
              iterations int|null int32_t - SASL/SCRAM iterations.
              +
              password \FFI\CData|null const unsigned char* - Password bytes (not empty).
              +
              password_size int|null size_t - Size of password (greater than 0).
              +
              salt \FFI\CData|null const unsigned char* - Salt bytes (optional).
              +
              salt_size int|null size_t - Size of salt (optional).
              +
              Returns
              +
              \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t* - A newly created instance of rd_kafka_UserScramCredentialAlteration_t. Ownership belongs to the caller, use rd_kafka_UserScramCredentialAlteration_destroy to destroy.
              +
              + +

              rd_kafka_UserScramCredentialDeletion_new()

              +
              public static rd_kafka_UserScramCredentialDeletion_new ( 
              +    string|null $username, 
              +    int $mechanism
              + ): \FFI\CData|null
              +
              +

              Allocates a new UserScramCredentialDeletion given its fields.

              + +
              +
              Parameters
              +
              username string|null const char* - The username (not empty).
              +
              mechanism int rd_kafka_ScramMechanism_t - SASL/SCRAM mechanism.
              +
              Returns
              +
              \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t* - A newly created instance of rd_kafka_UserScramCredentialAlteration_t. Ownership belongs to the caller, use rd_kafka_UserScramCredentialAlteration_destroy to destroy.
              +
              + +

              rd_kafka_UserScramCredentialAlteration_destroy()

              +
              public static rd_kafka_UserScramCredentialAlteration_destroy ( 
              +    \FFI\CData|null $alteration
              + ): void
              +
              +
              +
              Parameters
              +
              alteration \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t*
              +
              +

              rd_kafka_UserScramCredentialAlteration_destroy_array()

              +
              public static rd_kafka_UserScramCredentialAlteration_destroy_array ( 
              +    \FFI\CData|null $alterations, 
              +    int|null $alteration_cnt
              + ): void
              +
              +
              +
              Parameters
              +
              alterations \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t**
              +
              alteration_cnt int|null size_t
              +
              +

              rd_kafka_AlterUserScramCredentials_result_response_user()

              +
              public static rd_kafka_AlterUserScramCredentials_result_response_user ( 
              +    \FFI\CData|null $response
              + ): string|null
              +
              +
              +
              Parameters
              +
              response \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_response_t*
              +
              Returns
              +
              string|null const char*
              +
              +

              rd_kafka_AlterUserScramCredentials_result_response_error()

              +
              public static rd_kafka_AlterUserScramCredentials_result_response_error ( 
              +    \FFI\CData|null $response
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              response \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_response_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_error_t*
              +
              +

              rd_kafka_AlterUserScramCredentials_result_responses()

              +
              public static rd_kafka_AlterUserScramCredentials_result_responses ( 
              +    \FFI\CData|null $result, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +

              Get an array of responses from a AlterUserScramCredentials result.

              +

              The returned value life-time is the same as the result object.

              + +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_t* - Result to get responses from.
              +
              cntp \FFI\CData|null size_t* - is updated to the number of elements in the array.
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_AlterUserScramCredentials_result_response_t**
              +
              + +

              rd_kafka_AlterUserScramCredentials()

              +
              public static rd_kafka_AlterUserScramCredentials ( 
              +    \FFI\CData|null $rk, 
              +    \FFI\CData|null $alterations, 
              +    int|null $alteration_cnt, 
              +    \FFI\CData|null $options, 
              +    \FFI\CData|null $rkqu
              + ): void
              +
              +

              Alter SASL/SCRAM credentials. This operation is supported by brokers with version 2.7.0 or higher.

              +
              Remarks
              For upsertions to be processed, librdkfka must be build with OpenSSL support. It's needed to calculate the HMAC.
              + +
              +
              Parameters
              +
              rk \FFI\CData|null rd_kafka_t* - Client instance.
              +
              alterations \FFI\CData|null rd_kafka_UserScramCredentialAlteration_t** - The alterations to be applied.
              +
              alteration_cnt int|null size_t - Number of elements in alterations array.
              +
              options \FFI\CData|null const rd_kafka_AdminOptions_t* - Optional admin options, or NULL for defaults.
              +
              rkqu \FFI\CData|null rd_kafka_queue_t* - Queue to emit result on.
              +
              + +

              rd_kafka_Uuid_base64str()

              +
              public static rd_kafka_Uuid_base64str ( 
              +    \FFI\CData|null $uuid
              + ): string|null
              +
              +
              +
              Parameters
              +
              uuid \FFI\CData|null const rd_kafka_Uuid_t*
              +
              Returns
              +
              string|null const char*
              +
              +

              rd_kafka_Uuid_least_significant_bits()

              +
              public static rd_kafka_Uuid_least_significant_bits ( 
              +    \FFI\CData|null $uuid
              + ): int|null
              +
              +
              +
              Parameters
              +
              uuid \FFI\CData|null const rd_kafka_Uuid_t*
              +
              Returns
              +
              int|null int64_t
              +
              +

              rd_kafka_Uuid_most_significant_bits()

              +
              public static rd_kafka_Uuid_most_significant_bits ( 
              +    \FFI\CData|null $uuid
              + ): int|null
              +
              +
              +
              Parameters
              +
              uuid \FFI\CData|null const rd_kafka_Uuid_t*
              +
              Returns
              +
              int|null int64_t
              +
              +

              rd_kafka_Uuid_new()

              +
              public static rd_kafka_Uuid_new ( 
              +    int|null $most_significant_bits, 
              +    int|null $least_significant_bits
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              most_significant_bits int|null int64_t
              +
              least_significant_bits int|null int64_t
              +
              Returns
              +
              \FFI\CData|null rd_kafka_Uuid_t*
              +
              +

              rd_kafka_Uuid_copy()

              +
              public static rd_kafka_Uuid_copy ( 
              +    \FFI\CData|null $uuid
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              uuid \FFI\CData|null const rd_kafka_Uuid_t*
              +
              Returns
              +
              \FFI\CData|null rd_kafka_Uuid_t*
              +
              +

              rd_kafka_Uuid_destroy()

              +
              public static rd_kafka_Uuid_destroy ( 
              +    \FFI\CData|null $uuid
              + ): void
              +
              +
              +
              Parameters
              +
              uuid \FFI\CData|null rd_kafka_Uuid_t*
              +
              +

              rd_kafka_Node_rack()

              +
              public static rd_kafka_Node_rack ( 
              +    \FFI\CData|null $node
              + ): string|null
              +
              +
              +
              Parameters
              +
              node \FFI\CData|null const rd_kafka_Node_t*
              +
              Returns
              +
              string|null const char*
              +
              +

              rd_kafka_event_DescribeTopics_result()

              +
              public static rd_kafka_event_DescribeTopics_result ( 
              +    \FFI\CData|null $rkev
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              rkev \FFI\CData|null rd_kafka_event_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_DescribeTopics_result_t*
              +
              +

              rd_kafka_event_DescribeCluster_result()

              +
              public static rd_kafka_event_DescribeCluster_result ( 
              +    \FFI\CData|null $rkev
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              rkev \FFI\CData|null rd_kafka_event_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
              +
              +

              rd_kafka_event_ListOffsets_result()

              +
              public static rd_kafka_event_ListOffsets_result ( 
              +    \FFI\CData|null $rkev
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              rkev \FFI\CData|null rd_kafka_event_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_ListOffsets_result_t*
              +
              +

              rd_kafka_AdminOptions_set_include_authorized_operations()

              +
              public static rd_kafka_AdminOptions_set_include_authorized_operations ( 
              +    \FFI\CData|null $options, 
              +    int|null $true_or_false
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              options \FFI\CData|null rd_kafka_AdminOptions_t*
              +
              true_or_false int|null int
              +
              Returns
              +
              \FFI\CData|null rd_kafka_error_t*
              +
              +

              rd_kafka_AdminOptions_set_isolation_level()

              +
              public static rd_kafka_AdminOptions_set_isolation_level ( 
              +    \FFI\CData|null $options, 
              +    int $value
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              options \FFI\CData|null rd_kafka_AdminOptions_t*
              +
              value int rd_kafka_IsolationLevel_t
              +
              Returns
              +
              \FFI\CData|null rd_kafka_error_t*
              +
              +

              rd_kafka_TopicCollection_of_topic_names()

              +
              public static rd_kafka_TopicCollection_of_topic_names ( 
              +    \FFI\CData|null $topics, 
              +    int|null $topics_cnt
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              topics \FFI\CData|null const char**
              +
              topics_cnt int|null size_t
              +
              Returns
              +
              \FFI\CData|null rd_kafka_TopicCollection_t*
              +
              +

              rd_kafka_TopicCollection_destroy()

              +
              public static rd_kafka_TopicCollection_destroy ( 
              +    \FFI\CData|null $topics
              + ): void
              +
              +
              +
              Parameters
              +
              topics \FFI\CData|null rd_kafka_TopicCollection_t*
              +
              +

              rd_kafka_DescribeTopics()

              +
              public static rd_kafka_DescribeTopics ( 
              +    \FFI\CData|null $rk, 
              +    \FFI\CData|null $topics, 
              +    \FFI\CData|null $options, 
              +    \FFI\CData|null $rkqu
              + ): void
              +
              +
              +
              Parameters
              +
              rk \FFI\CData|null rd_kafka_t*
              +
              topics \FFI\CData|null const rd_kafka_TopicCollection_t*
              +
              options \FFI\CData|null const rd_kafka_AdminOptions_t*
              +
              rkqu \FFI\CData|null rd_kafka_queue_t*
              +
              +

              rd_kafka_DescribeTopics_result_topics()

              +
              public static rd_kafka_DescribeTopics_result_topics ( 
              +    \FFI\CData|null $result, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_DescribeTopics_result_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_TopicDescription_t**
              +
              +

              rd_kafka_TopicDescription_partitions()

              +
              public static rd_kafka_TopicDescription_partitions ( 
              +    \FFI\CData|null $topicdesc, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_TopicPartitionInfo_t**
              +
              +

              rd_kafka_TopicPartitionInfo_partition()

              +
              public static rd_kafka_TopicPartitionInfo_partition ( 
              +    \FFI\CData|null $partition
              + ): int|null
              +
              +
              +
              Parameters
              +
              partition \FFI\CData|null const rd_kafka_TopicPartitionInfo_t*
              +
              Returns
              +
              int|null const int
              +
              +

              rd_kafka_TopicPartitionInfo_leader()

              +
              public static rd_kafka_TopicPartitionInfo_leader ( 
              +    \FFI\CData|null $partition
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              partition \FFI\CData|null const rd_kafka_TopicPartitionInfo_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_Node_t*
              +
              +

              rd_kafka_TopicPartitionInfo_isr()

              +
              public static rd_kafka_TopicPartitionInfo_isr ( 
              +    \FFI\CData|null $partition, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              partition \FFI\CData|null const rd_kafka_TopicPartitionInfo_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_Node_t**
              +
              +

              rd_kafka_TopicPartitionInfo_replicas()

              +
              public static rd_kafka_TopicPartitionInfo_replicas ( 
              +    \FFI\CData|null $partition, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              partition \FFI\CData|null const rd_kafka_TopicPartitionInfo_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_Node_t**
              +
              +

              rd_kafka_TopicDescription_authorized_operations()

              +
              public static rd_kafka_TopicDescription_authorized_operations ( 
              +    \FFI\CData|null $topicdesc, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_AclOperation_t*
              +
              +

              rd_kafka_TopicDescription_name()

              +
              public static rd_kafka_TopicDescription_name ( 
              +    \FFI\CData|null $topicdesc
              + ): string|null
              +
              +
              +
              Parameters
              +
              topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
              +
              Returns
              +
              string|null const char*
              +
              +

              rd_kafka_TopicDescription_topic_id()

              +
              public static rd_kafka_TopicDescription_topic_id ( 
              +    \FFI\CData|null $topicdesc
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_Uuid_t*
              +
              +

              rd_kafka_TopicDescription_is_internal()

              +
              public static rd_kafka_TopicDescription_is_internal ( 
              +    \FFI\CData|null $topicdesc
              + ): int|null
              +
              +
              +
              Parameters
              +
              topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
              +
              Returns
              +
              int|null int
              +
              +

              rd_kafka_TopicDescription_error()

              +
              public static rd_kafka_TopicDescription_error ( 
              +    \FFI\CData|null $topicdesc
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              topicdesc \FFI\CData|null const rd_kafka_TopicDescription_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_error_t*
              +
              +

              rd_kafka_DescribeCluster()

              +
              public static rd_kafka_DescribeCluster ( 
              +    \FFI\CData|null $rk, 
              +    \FFI\CData|null $options, 
              +    \FFI\CData|null $rkqu
              + ): void
              +
              +
              +
              Parameters
              +
              rk \FFI\CData|null rd_kafka_t*
              +
              options \FFI\CData|null const rd_kafka_AdminOptions_t*
              +
              rkqu \FFI\CData|null rd_kafka_queue_t*
              +
              +

              rd_kafka_DescribeCluster_result_nodes()

              +
              public static rd_kafka_DescribeCluster_result_nodes ( 
              +    \FFI\CData|null $result, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_Node_t**
              +
              +

              rd_kafka_DescribeCluster_result_authorized_operations()

              +
              public static rd_kafka_DescribeCluster_result_authorized_operations ( 
              +    \FFI\CData|null $result, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_AclOperation_t*
              +
              +

              rd_kafka_DescribeCluster_result_controller()

              +
              public static rd_kafka_DescribeCluster_result_controller ( 
              +    \FFI\CData|null $result
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_Node_t*
              +
              +

              rd_kafka_DescribeCluster_result_cluster_id()

              +
              public static rd_kafka_DescribeCluster_result_cluster_id ( 
              +    \FFI\CData|null $result
              + ): string|null
              +
              +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_DescribeCluster_result_t*
              +
              Returns
              +
              string|null const char*
              +
              +

              rd_kafka_ConsumerGroupDescription_authorized_operations()

              +
              public static rd_kafka_ConsumerGroupDescription_authorized_operations ( 
              +    \FFI\CData|null $grpdesc, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              grpdesc \FFI\CData|null const rd_kafka_ConsumerGroupDescription_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_AclOperation_t*
              +
              +

              rd_kafka_ListOffsetsResultInfo_topic_partition()

              +
              public static rd_kafka_ListOffsetsResultInfo_topic_partition ( 
              +    \FFI\CData|null $result_info
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              result_info \FFI\CData|null const rd_kafka_ListOffsetsResultInfo_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_topic_partition_t*
              +
              +

              rd_kafka_ListOffsetsResultInfo_timestamp()

              +
              public static rd_kafka_ListOffsetsResultInfo_timestamp ( 
              +    \FFI\CData|null $result_info
              + ): int|null
              +
              +
              +
              Parameters
              +
              result_info \FFI\CData|null const rd_kafka_ListOffsetsResultInfo_t*
              +
              Returns
              +
              int|null int64_t
              +
              +

              rd_kafka_ListOffsets_result_infos()

              +
              public static rd_kafka_ListOffsets_result_infos ( 
              +    \FFI\CData|null $result, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              result \FFI\CData|null const rd_kafka_ListOffsets_result_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null const rd_kafka_ListOffsetsResultInfo_t**
              +
              +

              rd_kafka_ListOffsets()

              +
              public static rd_kafka_ListOffsets ( 
              +    \FFI\CData|null $rk, 
              +    \FFI\CData|null $topic_partitions, 
              +    \FFI\CData|null $options, 
              +    \FFI\CData|null $rkqu
              + ): void
              +
              +
              +
              Parameters
              +
              rk \FFI\CData|null rd_kafka_t*
              +
              topic_partitions \FFI\CData|null rd_kafka_topic_partition_list_t*
              +
              options \FFI\CData|null const rd_kafka_AdminOptions_t*
              +
              rkqu \FFI\CData|null rd_kafka_queue_t*
              +
              +

              rd_kafka_mock_start_request_tracking()

              +
              public static rd_kafka_mock_start_request_tracking ( 
              +    \FFI\CData|null $mcluster
              + ): void
              +
              +
              +
              Parameters
              +
              mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
              +
              +

              rd_kafka_mock_stop_request_tracking()

              +
              public static rd_kafka_mock_stop_request_tracking ( 
              +    \FFI\CData|null $mcluster
              + ): void
              +
              +
              +
              Parameters
              +
              mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
              +
              +

              rd_kafka_mock_request_destroy()

              +
              public static rd_kafka_mock_request_destroy ( 
              +    \FFI\CData|null $mreq
              + ): void
              +
              +
              +
              Parameters
              +
              mreq \FFI\CData|null rd_kafka_mock_request_t*
              +
              +

              rd_kafka_mock_request_id()

              +
              public static rd_kafka_mock_request_id ( 
              +    \FFI\CData|null $mreq
              + ): int|null
              +
              +
              +
              Parameters
              +
              mreq \FFI\CData|null rd_kafka_mock_request_t*
              +
              Returns
              +
              int|null int32_t
              +
              +

              rd_kafka_mock_request_api_key()

              +
              public static rd_kafka_mock_request_api_key ( 
              +    \FFI\CData|null $mreq
              + ): int|null
              +
              +
              +
              Parameters
              +
              mreq \FFI\CData|null rd_kafka_mock_request_t*
              +
              Returns
              +
              int|null int16_t
              +
              +

              rd_kafka_mock_request_timestamp()

              +
              public static rd_kafka_mock_request_timestamp ( 
              +    \FFI\CData|null $mreq
              + ): int|null
              +
              +
              +
              Parameters
              +
              mreq \FFI\CData|null rd_kafka_mock_request_t*
              +
              Returns
              +
              int|null int64_t
              +
              +

              rd_kafka_mock_get_requests()

              +
              public static rd_kafka_mock_get_requests ( 
              +    \FFI\CData|null $mcluster, 
              +    \FFI\CData|null $cntp
              + ): \FFI\CData|null
              +
              +
              +
              Parameters
              +
              mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
              +
              cntp \FFI\CData|null size_t*
              +
              Returns
              +
              \FFI\CData|null rd_kafka_mock_request_t**
              +
              +

              rd_kafka_mock_clear_requests()

              +
              public static rd_kafka_mock_clear_requests ( 
              +    \FFI\CData|null $mcluster
              + ): void
              +
              +
              +
              Parameters
              +
              mcluster \FFI\CData|null rd_kafka_mock_cluster_t*
              +
              +

              Used by

              + + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/NativePartitionerCallbackProxy/index.html b/api/RdKafka/FFI/NativePartitionerCallbackProxy/index.html new file mode 100644 index 00000000..2d452701 --- /dev/null +++ b/api/RdKafka/FFI/NativePartitionerCallbackProxy/index.html @@ -0,0 +1,2955 @@ + + + + + + + + + + + + + + + + + + + + + + + + + NativePartitionerCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class NativePartitionerCallbackProxy

              +

              Class \RdKafka\FFI\NativePartitionerCallbackProxy

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    string $partitionerMethod
              + ): 
              +
              +
              +
              Parameters
              +
              partitionerMethod string
              +
              +

              __invoke()

              +
              public __invoke ( 
              +    ?\FFI\CData $topic, 
              +    ?\FFI\CData $keydata, 
              +    int $keylen, 
              +    int $partition_cnt, 
              +    ?\FFI\CData $topic_opaque = null, 
              +    ?\FFI\CData $msg_opaque = null
              + ): int
              +
              +
              +
              Parameters
              +
              topic ?\FFI\CData
              +
              keydata ?\FFI\CData
              +
              keylen int
              +
              partition_cnt int
              +
              topic_opaque ?\FFI\CData
              +
              msg_opaque ?\FFI\CData
              +
              Returns
              +
              int
              +
              +

              create()

              +
              public static create ( 
              +    string $partitionerMethod
              + ): \Closure
              +
              +
              +
              Parameters
              +
              partitionerMethod string
              +
              Returns
              +
              \Closure
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (11 / 11)
              • +
              • 💚 + Methods: 100% (3 / 3)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/OffsetCommitCallbackProxy/index.html b/api/RdKafka/FFI/OffsetCommitCallbackProxy/index.html new file mode 100644 index 00000000..751d08aa --- /dev/null +++ b/api/RdKafka/FFI/OffsetCommitCallbackProxy/index.html @@ -0,0 +1,2896 @@ + + + + + + + + + + + + + + + + + + + + + + + + + OffsetCommitCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class OffsetCommitCallbackProxy

              +

              Class \RdKafka\FFI\OffsetCommitCallbackProxy

              + +

              Methods

              +

              __invoke()

              +
              public __invoke ( 
              +    \FFI\CData $consumer, 
              +    int $err, 
              +    \FFI\CData $nativeTopicPartitionList, 
              +    ?\FFI\CData $opaque = null
              + ): void
              +
              +
              +
              Parameters
              +
              consumer \FFI\CData
              +
              err int
              +
              nativeTopicPartitionList \FFI\CData
              +
              opaque ?\FFI\CData
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (6 / 6)
              • +
              • 💚 + Methods: 100% (1 / 1)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/OpaqueMap/index.html b/api/RdKafka/FFI/OpaqueMap/index.html new file mode 100644 index 00000000..5d5f7d8d --- /dev/null +++ b/api/RdKafka/FFI/OpaqueMap/index.html @@ -0,0 +1,2947 @@ + + + + + + + + + + + + + + + + + + + + + + + + + OpaqueMap - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class OpaqueMap

              +

              Class \RdKafka\FFI\OpaqueMap

              +

              Methods

              +

              get()

              +
              public static get ( 
              +    ?\FFI\CData $cOpaque
              + ): mixed|null
              +
              +
              +
              Parameters
              +
              cOpaque ?\FFI\CData
              +
              Returns
              +
              mixed|null
              +
              +

              pull()

              +
              public static pull ( 
              +    ?\FFI\CData $cOpaque
              + ): mixed|null
              +
              +
              +
              Parameters
              +
              cOpaque ?\FFI\CData
              +
              Returns
              +
              mixed|null
              +
              +

              push()

              +
              public static push ( 
              +    mixed|null $opaque
              + ): ?\FFI\CData
              +
              +
              +
              Parameters
              +
              opaque mixed|null
              +
              Returns
              +
              ?\FFI\CData
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 96.3% (26 / 27)
              • +
              • 🧡 + Methods: 66.67% (2 / 3)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/PartitionerCallbackProxy/index.html b/api/RdKafka/FFI/PartitionerCallbackProxy/index.html new file mode 100644 index 00000000..d9be7310 --- /dev/null +++ b/api/RdKafka/FFI/PartitionerCallbackProxy/index.html @@ -0,0 +1,2902 @@ + + + + + + + + + + + + + + + + + + + + + + + + + PartitionerCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class PartitionerCallbackProxy

              +

              Class \RdKafka\FFI\PartitionerCallbackProxy

              + +

              Methods

              +

              __invoke()

              +
              public __invoke ( 
              +    ?\FFI\CData $topic, 
              +    ?\FFI\CData $keydata, 
              +    int $keylen, 
              +    int $partition_cnt, 
              +    ?\FFI\CData $topic_opaque = null, 
              +    ?\FFI\CData $msg_opaque = null
              + ): int
              +
              +
              +
              Parameters
              +
              topic ?\FFI\CData
              +
              keydata ?\FFI\CData
              +
              keylen int
              +
              partition_cnt int
              +
              topic_opaque ?\FFI\CData
              +
              msg_opaque ?\FFI\CData
              +
              Returns
              +
              int
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (6 / 6)
              • +
              • 💚 + Methods: 100% (1 / 1)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/RebalanceCallbackProxy/index.html b/api/RdKafka/FFI/RebalanceCallbackProxy/index.html new file mode 100644 index 00000000..c37030c7 --- /dev/null +++ b/api/RdKafka/FFI/RebalanceCallbackProxy/index.html @@ -0,0 +1,2896 @@ + + + + + + + + + + + + + + + + + + + + + + + + + RebalanceCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class RebalanceCallbackProxy

              +

              Class \RdKafka\FFI\RebalanceCallbackProxy

              + +

              Methods

              +

              __invoke()

              +
              public __invoke ( 
              +    \FFI\CData $consumer, 
              +    int $err, 
              +    \FFI\CData $nativeTopicPartitionList, 
              +    ?\FFI\CData $opaque = null
              + ): void
              +
              +
              +
              Parameters
              +
              consumer \FFI\CData
              +
              err int
              +
              nativeTopicPartitionList \FFI\CData
              +
              opaque ?\FFI\CData
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (6 / 6)
              • +
              • 💚 + Methods: 100% (1 / 1)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/FFI/StatsCallbackProxy/index.html b/api/RdKafka/FFI/StatsCallbackProxy/index.html new file mode 100644 index 00000000..56603d99 --- /dev/null +++ b/api/RdKafka/FFI/StatsCallbackProxy/index.html @@ -0,0 +1,2898 @@ + + + + + + + + + + + + + + + + + + + + + + + + + StatsCallbackProxy - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class StatsCallbackProxy

              +

              Class \RdKafka\FFI\StatsCallbackProxy

              + +

              Methods

              +

              __invoke()

              +
              public __invoke ( 
              +    \FFI\CData $consumerOrProducer, 
              +    \FFI\CData $json, 
              +    int $json_len, 
              +    ?\FFI\CData $opaque = null
              + ): int
              +
              +
              +
              Parameters
              +
              consumerOrProducer \FFI\CData
              +
              json \FFI\CData
              +
              json_len int
              +
              opaque ?\FFI\CData
              +
              Returns
              +
              int
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (7 / 7)
              • +
              • 💚 + Methods: 100% (1 / 1)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/KafkaConsumer/index.html b/api/RdKafka/KafkaConsumer/index.html new file mode 100644 index 00000000..c09a41e1 --- /dev/null +++ b/api/RdKafka/KafkaConsumer/index.html @@ -0,0 +1,3231 @@ + + + + + + + + + + + + + + + + + + + + + + + + + KafkaConsumer - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + + + + + +
              +
              + + + + + + + +

              Class KafkaConsumer

              +

              Class \RdKafka\KafkaConsumer

              + +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    \RdKafka\Conf $conf
              + ): 
              +
              +
              +
              Parameters
              +
              conf \RdKafka\Conf
              +
              +

              __destruct()

              +
              public __destruct (  ): 
              +
              +

              assign()

              +
              public assign ( 
              +    \RdKafka\TopicPartition[] $topic_partitions = null
              + ): void
              +
              +
              +
              Parameters
              +
              topic_partitions \RdKafka\TopicPartition[]
              +
              +

              close()

              +
              public close (  ): void
              +
              +

              commit()

              +
              public commit ( 
              +    \RdKafka\Message|\RdKafka\TopicPartition[]|null $message_or_offsets = null
              + ): void
              +
              +
              +
              Parameters
              +
              message_or_offsets \RdKafka\Message|\RdKafka\TopicPartition[]|null
              +
              +

              commitAsync()

              +
              public commitAsync ( 
              +    \RdKafka\Message|\RdKafka\TopicPartition[]|null $message_or_offsets = null
              + ): void
              +
              +
              +
              Parameters
              +
              message_or_offsets \RdKafka\Message|\RdKafka\TopicPartition[]|null
              +
              +

              consume()

              +
              public consume ( 
              +    int $timeout_ms
              + ): \RdKafka\Message
              +
              +
              +
              Parameters
              +
              timeout_ms int
              +
              Returns
              +
              \RdKafka\Message
              +
              +

              getAssignment()

              +
              public getAssignment (  ): \RdKafka\TopicPartition[]
              +
              +
              +
              Returns
              +
              \RdKafka\TopicPartition[]
              +
              +

              getCommittedOffsets()

              +
              public getCommittedOffsets ( 
              +    \RdKafka\TopicPartition[] $topics, 
              +    int $timeout_ms
              + ): \RdKafka\TopicPartition[]
              +
              +
              +
              Parameters
              +
              topics \RdKafka\TopicPartition[]
              +
              timeout_ms int
              +
              Returns
              +
              \RdKafka\TopicPartition[]
              +
              +

              getSubscription()

              +
              public getSubscription (  ): array
              +
              +
              +
              Returns
              +
              array
              +
              +

              newTopic()

              +
              public newTopic ( 
              +    string $topic_name, 
              +    ?\RdKafka\TopicConf $topic_conf = null
              + ): \RdKafka\KafkaConsumerTopic
              +
              +
              +
              Parameters
              +
              topic_name string
              +
              topic_conf ?\RdKafka\TopicConf
              +
              Returns
              +
              \RdKafka\KafkaConsumerTopic
              +
              +

              offsetsForTimes()

              +
              public offsetsForTimes ( 
              +    \RdKafka\TopicPartition[] $topicPartitions, 
              +    int $timeout_ms
              + ): \RdKafka\TopicPartition[]
              +
              +
              +
              Parameters
              +
              topicPartitions \RdKafka\TopicPartition[]
              +
              timeout_ms int
              +
              Returns
              +
              \RdKafka\TopicPartition[]
              +
              +

              subscribe()

              +
              public subscribe ( 
              +    array $topics
              + ): void
              +
              +
              +
              Parameters
              +
              topics array
              +
              +

              unsubscribe()

              +
              public unsubscribe (  ): void
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 87.94% (124 / 141)
              • +
              • ❤️ + Methods: 41.18% (7 / 17)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/KafkaConsumerTopic/index.html b/api/RdKafka/KafkaConsumerTopic/index.html new file mode 100644 index 00000000..01190acc --- /dev/null +++ b/api/RdKafka/KafkaConsumerTopic/index.html @@ -0,0 +1,2921 @@ + + + + + + + + + + + + + + + + + + + + + + + + + KafkaConsumerTopic - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class KafkaConsumerTopic

              +

              Class \RdKafka\KafkaConsumerTopic

              + +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    \RdKafka\KafkaConsumer $consumer, 
              +    string $name, 
              +    ?\RdKafka\TopicConf $conf = null
              + ): 
              +
              +
              +
              Parameters
              +
              consumer \RdKafka\KafkaConsumer
              +
              name string
              +
              conf ?\RdKafka\TopicConf
              +
              +

              offsetStore()

              +
              public offsetStore ( 
              +    int $partition, 
              +    int $offset
              + ): void
              +
              +
              +
              Parameters
              +
              partition int
              +
              offset int
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 90% (9 / 10)
              • +
              • ❤️ + Methods: 50% (1 / 2)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/KafkaErrorException/index.html b/api/RdKafka/KafkaErrorException/index.html new file mode 100644 index 00000000..a24c1807 --- /dev/null +++ b/api/RdKafka/KafkaErrorException/index.html @@ -0,0 +1,3027 @@ + + + + + + + + + + + + + + + + + + + + + + + + + KafkaErrorException - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class KafkaErrorException

              +

              Class \RdKafka\KafkaErrorException

              + +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    string $message, 
              +    int $code, 
              +    string $error_string, 
              +    bool $isFatal = false, 
              +    bool $isRetriable = false, 
              +    bool $transactionRequiresAbort = false
              + ): 
              +
              +
              +
              Parameters
              +
              message string
              +
              code int
              +
              error_string string
              +
              isFatal bool
              +
              isRetriable bool
              +
              transactionRequiresAbort bool
              +
              +

              fromCData()

              +
              public static fromCData ( 
              +    \FFI\CData $error
              + ): mixed
              +
              +
              +
              Parameters
              +
              error \FFI\CData
              +
              Returns
              +
              mixed
              +
              +

              getErrorString()

              +
              public getErrorString (  ): string
              +
              +
              +
              Returns
              +
              string
              +
              +

              isFatal()

              +
              public isFatal (  ): bool
              +
              +
              +
              Returns
              +
              bool
              +
              +

              isRetriable()

              +
              public isRetriable (  ): bool
              +
              +
              +
              Returns
              +
              bool
              +
              +

              transactionRequiresAbort()

              +
              public transactionRequiresAbort (  ): bool
              +
              +
              +
              Returns
              +
              bool
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 90.91% (20 / 22)
              • +
              • 💛 + Methods: 83.33% (5 / 6)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Message/index.html b/api/RdKafka/Message/index.html new file mode 100644 index 00000000..fe2db289 --- /dev/null +++ b/api/RdKafka/Message/index.html @@ -0,0 +1,3235 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Message - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class Message

              +

              Class \RdKafka\Message

              +

              Properties

              +

              brokerId

              +
               public int brokerId
              +
              +

              err

              +
               public int err
              +
              +

              headers

              +
               public array headers
              +
              +

              key

              +
               public ?string key
              +
              +

              latency

              +
               public int latency
              +
              +

              len

              +
               public ?int len
              +
              +

              offset

              +
               public int offset
              +
              +

              opaque

              +
               public mixed|null opaque
              +
              +

              partition

              +
               public int partition
              +
              +

              payload

              +
               public ?string payload
              +
              +

              status

              +
               public int status
              +
              +

              timestamp

              +
               public int timestamp
              +
              +

              timestampType

              +
               public int timestampType
              +
              +

              topic_name

              +
               public ?string topic_name
              +
              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    \FFI\CData $nativeMessage
              + ): 
              +
              +
              +
              Parameters
              +
              nativeMessage \FFI\CData
              +
              +

              errstr()

              +
              public errstr (  ): string
              +
              +
              +
              Returns
              +
              string
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 89.09% (49 / 55)
              • +
              • ❤️ + Methods: 33.33% (1 / 3)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Metadata/Broker/index.html b/api/RdKafka/Metadata/Broker/index.html new file mode 100644 index 00000000..7a6f8ccc --- /dev/null +++ b/api/RdKafka/Metadata/Broker/index.html @@ -0,0 +1,2966 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Broker - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class Broker

              +

              Class \RdKafka\Metadata\Broker

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    int $id, 
              +    string $host, 
              +    int $port
              + ): 
              +
              +
              +
              Parameters
              +
              id int
              +
              host string
              +
              port int
              +
              +

              getHost()

              +
              public getHost (  ): string
              +
              +
              +
              Returns
              +
              string
              +
              +

              getId()

              +
              public getId (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getPort()

              +
              public getPort (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (6 / 6)
              • +
              • 💚 + Methods: 100% (4 / 4)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Metadata/Collection/index.html b/api/RdKafka/Metadata/Collection/index.html new file mode 100644 index 00000000..04e62dc8 --- /dev/null +++ b/api/RdKafka/Metadata/Collection/index.html @@ -0,0 +1,3033 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Collection - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class Collection

              +

              Class \RdKafka\Metadata\Collection

              +
                +
              • implements \Iterator
              • +
              • implements \Countable
              • +
              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    array $items = []
              + ): 
              +
              +
              +
              Parameters
              +
              items array
              +
              +

              count()

              +
              public count (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              current()

              +
              public current (  ): mixed
              +
              +
              +
              Returns
              +
              mixed
              +
              +

              key()

              +
              public key (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              next()

              +
              public next (  ): void
              +
              +

              rewind()

              +
              public rewind (  ): void
              +
              +

              valid()

              +
              public valid (  ): bool
              +
              +
              +
              Returns
              +
              bool
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (7 / 7)
              • +
              • 💚 + Methods: 100% (7 / 7)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Metadata/Partition/index.html b/api/RdKafka/Metadata/Partition/index.html new file mode 100644 index 00000000..77444625 --- /dev/null +++ b/api/RdKafka/Metadata/Partition/index.html @@ -0,0 +1,3020 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Partition - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class Partition

              +

              Class \RdKafka\Metadata\Partition

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    int $id, 
              +    int $err, 
              +    int $leader, 
              +    \RdKafka\Metadata\Collection $replicas, 
              +    \RdKafka\Metadata\Collection $isrs
              + ): 
              +
              +
              +
              Parameters
              +
              id int
              +
              err int
              +
              leader int
              +
              replicas \RdKafka\Metadata\Collection
              +
              isrs \RdKafka\Metadata\Collection
              +
              +

              getErr()

              +
              public getErr (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getId()

              +
              public getId (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getIsrs()

              +
              public getIsrs (  ): \RdKafka\Metadata\Collection
              +
              +
              +
              Returns
              +
              \RdKafka\Metadata\Collection
              +
              +

              getLeader()

              +
              public getLeader (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getReplicas()

              +
              public getReplicas (  ): \RdKafka\Metadata\Collection
              +
              +
              +
              Returns
              +
              \RdKafka\Metadata\Collection
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 90% (9 / 10)
              • +
              • 💛 + Methods: 83.33% (5 / 6)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Metadata/Topic/index.html b/api/RdKafka/Metadata/Topic/index.html new file mode 100644 index 00000000..3929ed6b --- /dev/null +++ b/api/RdKafka/Metadata/Topic/index.html @@ -0,0 +1,2966 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Topic - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class Topic

              +

              Class \RdKafka\Metadata\Topic

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    string $topic, 
              +    \RdKafka\Metadata\Collection $partitions, 
              +    int $err
              + ): 
              +
              +
              +
              Parameters
              +
              topic string
              +
              partitions \RdKafka\Metadata\Collection
              +
              err int
              +
              +

              getErr()

              +
              public getErr (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getPartitions()

              +
              public getPartitions (  ): \RdKafka\Metadata\Collection|\RdKafka\Metadata\Partition[]
              +
              +
              +
              Returns
              +
              \RdKafka\Metadata\Collection|\RdKafka\Metadata\Partition[]
              +
              +

              getTopic()

              +
              public getTopic (  ): string
              +
              +
              +
              Returns
              +
              string
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (6 / 6)
              • +
              • 💚 + Methods: 100% (4 / 4)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Metadata/index.html b/api/RdKafka/Metadata/index.html new file mode 100644 index 00000000..40699d2c --- /dev/null +++ b/api/RdKafka/Metadata/index.html @@ -0,0 +1,3012 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Metadata - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class Metadata

              +

              Class \RdKafka\Metadata

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    \RdKafka $kafka, 
              +    bool $all_topics, 
              +    ?\RdKafka\Topic $only_topic, 
              +    int $timeout_ms
              + ): 
              +
              +
              +
              Parameters
              +
              kafka \RdKafka
              +
              all_topics bool
              +
              only_topic ?\RdKafka\Topic
              +
              timeout_ms int
              +
              +

              __destruct()

              +
              public __destruct (  ): 
              +
              +

              getBrokers()

              +
              public getBrokers (  ): \RdKafka\Metadata\Collection|\RdKafka\Metadata\Broker[]
              +
              +
              +
              Returns
              +
              \RdKafka\Metadata\Collection|\RdKafka\Metadata\Broker[]
              +
              +

              getOrigBrokerId()

              +
              public getOrigBrokerId (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getOrigBrokerName()

              +
              public getOrigBrokerName (  ): string
              +
              +
              +
              Returns
              +
              string
              +
              +

              getTopics()

              +
              public getTopics (  ): \RdKafka\Metadata\Collection|\RdKafka\Metadata\Topic[]
              +
              +
              +
              Returns
              +
              \RdKafka\Metadata\Collection|\RdKafka\Metadata\Topic[]
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 96.15% (50 / 52)
              • +
              • 💛 + Methods: 81.82% (9 / 11)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Producer/index.html b/api/RdKafka/Producer/index.html new file mode 100644 index 00000000..e4ef4403 --- /dev/null +++ b/api/RdKafka/Producer/index.html @@ -0,0 +1,3163 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Producer - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + + + + + +
              +
              + + + + + + + +

              Class Producer

              +

              Class \RdKafka\Producer

              + +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    ?\RdKafka\Conf $conf = null
              + ): 
              +
              +
              +
              Parameters
              +
              conf ?\RdKafka\Conf
              +
              +

              abortTransaction()

              +
              public abortTransaction ( 
              +    int $timeout_ms
              + ): void
              +
              +
              +
              Parameters
              +
              timeout_ms int
              +
              +

              addBrokers()

              +
              public addBrokers ( 
              +    string $broker_list
              + ): int
              +
              +
              +
              Parameters
              +
              broker_list string
              +
              Returns
              +
              int
              +
              +

              beginTransaction()

              +
              public beginTransaction (  ): void
              +
              +

              commitTransaction()

              +
              public commitTransaction ( 
              +    int $timeout_ms
              + ): void
              +
              +
              +
              Parameters
              +
              timeout_ms int
              +
              +

              flush()

              +
              public flush ( 
              +    int $timeout_ms
              + ): int
              +
              +
              +
              Parameters
              +
              timeout_ms int
              +
              Returns
              +
              int
              +
              +

              getOutQLen()

              +
              public getOutQLen (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              initTransactions()

              +
              public initTransactions ( 
              +    int $timeout_ms
              + ): void
              +
              +

              Initializing transactions must be done before producing and starting a transaction

              +
              +
              Parameters
              +
              timeout_ms int
              +
              +

              newTopic()

              +
              public newTopic ( 
              +    string $topic_name, 
              +    ?\RdKafka\TopicConf $topic_conf = null
              + ): \RdKafka\ProducerTopic
              +
              +
              +
              Parameters
              +
              topic_name string
              +
              topic_conf ?\RdKafka\TopicConf
              +
              Returns
              +
              \RdKafka\ProducerTopic
              +
              +

              poll()

              +
              public poll ( 
              +    int $timeout_ms
              + ): int
              +
              +
              +
              Parameters
              +
              timeout_ms int
              +
              Returns
              +
              int
              +
              +

              purge()

              +
              public purge ( 
              +    int $purge_flags
              + ): int
              +
              +
              +
              Parameters
              +
              purge_flags int
              +
              Returns
              +
              int
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 78.95% (15 / 19)
              • +
              • 🧡 + Methods: 63.64% (7 / 11)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/ProducerTopic/index.html b/api/RdKafka/ProducerTopic/index.html new file mode 100644 index 00000000..b3eba27d --- /dev/null +++ b/api/RdKafka/ProducerTopic/index.html @@ -0,0 +1,2966 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ProducerTopic - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class ProducerTopic

              +

              Class \RdKafka\ProducerTopic

              + +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    \RdKafka\Producer $producer, 
              +    string $name, 
              +    ?\RdKafka\TopicConf $conf = null
              + ): 
              +
              +
              +
              Parameters
              +
              producer \RdKafka\Producer
              +
              name string
              +
              conf ?\RdKafka\TopicConf
              +
              +

              produce()

              +
              public produce ( 
              +    int $partition, 
              +    int $msgflags, 
              +    ?string $payload = null, 
              +    ?string $key = null, 
              +    mixed $opaque = null
              + ): void
              +
              +
              +
              Parameters
              +
              partition int
              +
              msgflags int
              +
              payload ?string
              +
              key ?string
              +
              opaque mixed
              +
              +

              producev()

              +
              public producev ( 
              +    int $partition, 
              +    int $msgflags, 
              +    ?string $payload = null, 
              +    ?string $key = null, 
              +    ?array $headers = null, 
              +    ?int $timestamp_ms = null, 
              +    mixed $opaque = null
              + ): void
              +
              +
              +
              Parameters
              +
              partition int
              +
              msgflags int
              +
              payload ?string
              +
              key ?string
              +
              headers ?array
              +
              timestamp_ms ?int
              +
              opaque mixed
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 89.66% (52 / 58)
              • +
              • 🧡 + Methods: 60% (3 / 5)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Queue/index.html b/api/RdKafka/Queue/index.html new file mode 100644 index 00000000..0f8d6037 --- /dev/null +++ b/api/RdKafka/Queue/index.html @@ -0,0 +1,3018 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Queue - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class Queue

              +

              Class \RdKafka\Queue

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    \FFI\CData $queue
              + ): 
              +
              +
              +
              Parameters
              +
              queue \FFI\CData
              +
              +

              __destruct()

              +
              public __destruct (  ): 
              +
              +

              consume()

              +
              public consume ( 
              +    int $timeout_ms
              + ): ?\RdKafka\Message
              +
              +
              +
              Parameters
              +
              timeout_ms int
              +
              Returns
              +
              ?\RdKafka\Message
              +
              +

              fromRdKafka()

              +
              public static fromRdKafka ( 
              +    \RdKafka $kafka
              + ): self
              +
              +
              +
              Parameters
              +
              kafka \RdKafka
              +
              Returns
              +
              self
              +
              +

              getCData()

              +
              public getCData (  ): \FFI\CData
              +
              +
              +
              Returns
              +
              \FFI\CData
              +
              +

              poll()

              +
              public poll ( 
              +    int $timeout_ms
              + ): ?\RdKafka\Event
              +
              +
              +
              Parameters
              +
              timeout_ms int
              +
              Returns
              +
              ?\RdKafka\Event
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 76.92% (20 / 26)
              • +
              • ❤️ + Methods: 50% (3 / 6)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Test/ApiKey/index.html b/api/RdKafka/Test/ApiKey/index.html new file mode 100644 index 00000000..cad0b3ef --- /dev/null +++ b/api/RdKafka/Test/ApiKey/index.html @@ -0,0 +1,3765 @@ + + + + + + + + + + + + + + + + + + + + + + + + + ApiKey - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class ApiKey

              +

              Class \RdKafka\Test\ApiKey

              + +

              Constants

              +

              AddOffsetsToTxn

              +
              public AddOffsetsToTxn = 25
              +
              +

              AddPartitionsToTxn

              +
              public AddPartitionsToTxn = 24
              +
              +

              AlterConfigs

              +
              public AlterConfigs = 33
              +
              +

              AlterReplicaLogDirs

              +
              public AlterReplicaLogDirs = 34
              +
              +

              ApiVersion

              +
              public ApiVersion = 18
              +
              +

              ControlledShutdown

              +
              public ControlledShutdown = 7
              +
              +

              CreateAcls

              +
              public CreateAcls = 30
              +
              +

              CreateDelegationToken

              +
              public CreateDelegationToken = 38
              +
              +

              CreatePartitions

              +
              public CreatePartitions = 37
              +
              +

              CreateTopics

              +
              public CreateTopics = 19
              +
              +

              DeleteAcls

              +
              public DeleteAcls = 31
              +
              +

              DeleteGroups

              +
              public DeleteGroups = 42
              +
              +

              DeleteRecords

              +
              public DeleteRecords = 21
              +
              +

              DeleteTopics

              +
              public DeleteTopics = 20
              +
              +

              DescribeAcls

              +
              public DescribeAcls = 29
              +
              +

              DescribeConfigs

              +
              public DescribeConfigs = 32
              +
              +

              DescribeDelegationToken

              +
              public DescribeDelegationToken = 41
              +
              +

              DescribeGroups

              +
              public DescribeGroups = 15
              +
              +

              DescribeLogDirs

              +
              public DescribeLogDirs = 35
              +
              +

              EndTxn

              +
              public EndTxn = 26
              +
              +

              ExpireDelegationToken

              +
              public ExpireDelegationToken = 40
              +
              +

              Fetch

              +
              public Fetch = 1
              +
              +

              FindCoordinator

              +
              public FindCoordinator = 10
              +
              +

              Heartbeat

              +
              public Heartbeat = 12
              +
              +

              InitProducerId

              +
              public InitProducerId = 22
              +
              +

              JoinGroup

              +
              public JoinGroup = 11
              +
              +

              LeaderAndIsr

              +
              public LeaderAndIsr = 4
              +
              +

              LeaveGroup

              +
              public LeaveGroup = 13
              +
              +

              ListGroups

              +
              public ListGroups = 16
              +
              +

              Metadata

              +
              public Metadata = 3
              +
              +

              None

              +
              public None = -1
              +
              +

              Offset

              +
              public Offset = 2
              +
              +

              OffsetCommit

              +
              public OffsetCommit = 8
              +
              +

              OffsetFetch

              +
              public OffsetFetch = 9
              +
              +

              OffsetForLeaderEpoch

              +
              public OffsetForLeaderEpoch = 23
              +
              +

              Produce

              +
              public Produce = 0
              +
              +

              RenewDelegationToken

              +
              public RenewDelegationToken = 39
              +
              +

              SaslAuthenticate

              +
              public SaslAuthenticate = 36
              +
              +

              SaslHandshake

              +
              public SaslHandshake = 17
              +
              +

              StopReplica

              +
              public StopReplica = 5
              +
              +

              SyncGroup

              +
              public SyncGroup = 14
              +
              +

              TxnOffsetCommit

              +
              public TxnOffsetCommit = 28
              +
              +

              UpdateMetadata

              +
              public UpdateMetadata = 6
              +
              +

              WriteTxnMarkers

              +
              public WriteTxnMarkers = 27
              +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Test/MockCluster/index.html b/api/RdKafka/Test/MockCluster/index.html new file mode 100644 index 00000000..d9f2f347 --- /dev/null +++ b/api/RdKafka/Test/MockCluster/index.html @@ -0,0 +1,3462 @@ + + + + + + + + + + + + + + + + + + + + + + + + + MockCluster - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + + + + + +
              +
              + + + + + + + +

              Class MockCluster

              +

              Class \RdKafka\Test\MockCluster

              +

              Note: MockCluster is experimental - even in librdkafka! +Expect breaking changes within minor versions of this library.

              +

              Methods

              +

              __destruct()

              +
              public __destruct (  ): 
              +
              +

              create()

              +
              public static create ( 
              +    int $brokerCount, 
              +    ?\RdKafka\Conf $conf = null
              + ): self
              +
              +
              +
              Parameters
              +
              brokerCount int
              +
              conf ?\RdKafka\Conf
              +
              Returns
              +
              self
              +
              +

              createTopic()

              +
              public createTopic ( 
              +    string $topic, 
              +    int $partitionCount, 
              +    int $replicationFactor
              + ): void
              +
              +

              Creates a topic.

              +

              This is an alternative to automatic topic creation as performed by +the client itself. +The Topic Admin API (CreateTopics) is not supported by the +mock broker.

              +
              +
              Parameters
              +
              topic string
              +
              partitionCount int
              +
              replicationFactor int
              +
              +

              fromProducer()

              +
              public static fromProducer ( 
              +    \RdKafka\Producer $producer
              + ): self
              +
              +

              Derive mock cluster from Producer created by setting +the test.mock.num.brokers configuration property.

              +
              +
              Parameters
              +
              producer \RdKafka\Producer
              +
              Returns
              +
              self
              +
              +

              getBootstraps()

              +
              public getBootstraps (  ): string
              +
              +
              +
              Returns
              +
              string the mock cluster’s bootstrap.servers list
              +
              +

              pushBrokerRequestErrorRtts()

              +
              public pushBrokerRequestErrorRtts ( 
              +    int $brokerId, 
              +    int $apiKey, 
              +    int $count, 
              +    int[] $errorCodeAndRttTuples
              + ): void
              +
              +

              Same as {@link MockCluster::pushBrokerRequestErrors()} but for a specific broker.

              +

              The broker errors take precedence over the cluster errors.

              +
              +
              Parameters
              +
              brokerId int
              +
              apiKey int
              +
              count int
              +
              errorCodeAndRttTuples int[] plain tuples of error code or 0 (int) and response RTT/delay in millisecond (int)
              +
              +

              pushBrokerRequestErrors()

              +
              public pushBrokerRequestErrors ( 
              +    int $brokerId, 
              +    int $apiKey, 
              +    int $count, 
              +    int[] $errorCodes
              + ): void
              +
              +

              Same as {@link MockCluster::pushRequestErrors()} but for a specific broker.

              +

              The broker errors take precedence over the cluster errors.

              +
              +
              Parameters
              +
              brokerId int
              +
              apiKey int
              +
              count int
              +
              errorCodes int[] a list of error codes or 0
              +
              +

              pushRequestErrors()

              +
              public pushRequestErrors ( 
              +    int $apiKey, 
              +    int $count, 
              +    int $errorCodes
              + ): void
              +
              +

              Push cnt errors onto the cluster’s error stack for the given apiKey.

              +

              ApiKey is the Kafka protocol request type, e.g., Produce (0).

              +

              The following cnt protocol requests matching apiKey will fail with the +provided error code and removed from the stack, starting with +the first error code, then the second, etc.

              +
              +
              Parameters
              +
              apiKey int
              +
              count int
              +
              errorCodes int
              +
              +

              pushRequestErrorsArray()

              +
              public pushRequestErrorsArray ( 
              +    int $apiKey, 
              +    int $count, 
              +    int[] $errorCodes
              + ): void
              +
              +

              See {@link MockCluster::pushRequestErrors()}

              +
              +
              Parameters
              +
              apiKey int
              +
              count int
              +
              errorCodes int[]
              +
              +

              setApiVersion()

              +
              public setApiVersion ( 
              +    int $apiKey, 
              +    int $minVersion, 
              +    int $maxVersion
              + ): void
              +
              +

              Set the allowed ApiVersion range for apiKey.

              +

              Set minVersion and maxVersion to -1 to disable the API completely. +MaxVersion MUST not exceed the maximum implemented value.

              +
              +
              Parameters
              +
              apiKey int Protocol request type/key
              +
              minVersion int Minimum version supported (or -1 to disable).
              +
              maxVersion int Maximum version supported (or -1 to disable).
              +
              +

              setBrokerDown()

              +
              public setBrokerDown ( 
              +    int $brokerId
              + ): void
              +
              +

              Disconnects the broker and disallows any new connections.

              +

              This does NOT trigger leader change.

              +
              +
              Parameters
              +
              brokerId int
              +
              +

              setBrokerRack()

              +
              public setBrokerRack ( 
              +    int $brokerId, 
              +    string $rack
              + ): void
              +
              +

              Sets the broker’s rack as reported in Metadata to the client.

              +
              +
              Parameters
              +
              brokerId int
              +
              rack string
              +
              +

              setBrokerUp()

              +
              public setBrokerUp ( 
              +    int $brokerId
              + ): void
              +
              +

              Makes the broker accept connections again.

              +

              This does NOT trigger leader change.

              +
              +
              Parameters
              +
              brokerId int
              +
              +

              setCoordinator()

              +
              public setCoordinator ( 
              +    string $keyType, 
              +    string $key, 
              +    int $brokerId
              + ): void
              +
              +

              Explicitly sets the coordinator.

              +

              If this API is not a standard hashing scheme will be used.

              +
              +
              Parameters
              +
              keyType string “transaction” or “group”
              +
              key string The transactional.id or group.id
              +
              brokerId int The new coordinator, does not have to be a valid broker.
              +
              +

              setPartitionFollower()

              +
              public setPartitionFollower ( 
              +    string $topic, 
              +    int $partition, 
              +    int $brokerId
              + ): void
              +
              +

              Sets the partition’s preferred replica / follower.

              +

              The topic will be created if it does not exist.

              +
              +
              Parameters
              +
              topic string
              +
              partition int
              +
              brokerId int does not need to point to an existing broker.
              +
              +

              setPartitionFollowerWatermarks()

              +
              public setPartitionFollowerWatermarks ( 
              +    string $topic, 
              +    int $partition, 
              +    int $low, 
              +    int $high
              + ): void
              +
              +

              Sets the partition’s preferred replica / follower low and high watermarks.

              +

              The topic will be created if it does not exist. +Setting an offset to -1 will revert back to the leader’s corresponding watermark.

              +
              +
              Parameters
              +
              topic string
              +
              partition int
              +
              low int
              +
              high int
              +
              +

              setPartitionLeader()

              +
              public setPartitionLeader ( 
              +    string $topic, 
              +    int $partition, 
              +    int $brokerId
              + ): void
              +
              +

              Sets the partition leader.

              +

              The topic will be created if it does not exist.

              +
              +
              Parameters
              +
              topic string
              +
              partition int
              +
              brokerId int needs to be an existing broker
              +
              +

              setRtt()

              +
              public setRtt ( 
              +    int $brokerId, 
              +    int $roundTripTimeDelayMs
              + ): void
              +
              +

              Set broker round-trip-time delay in milliseconds.

              +
              +
              Parameters
              +
              brokerId int
              +
              roundTripTimeDelayMs int
              +
              +

              setTopicError()

              +
              public setTopicError ( 
              +    string $topic, 
              +    int $errorCode
              + ): void
              +
              +

              Set the topic error to return in protocol requests.

              +

              Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest.

              +
              +
              Parameters
              +
              topic string
              +
              errorCode int
              +
              +

              Test Coverage 🧡

              +
                +
              • 🧡 + Lines: 67.71% (65 / 96)
              • +
              • ❤️ + Methods: 35% (7 / 20)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/Topic/index.html b/api/RdKafka/Topic/index.html new file mode 100644 index 00000000..e0eec450 --- /dev/null +++ b/api/RdKafka/Topic/index.html @@ -0,0 +1,2984 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Topic - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class Topic

              +

              abstract Class \RdKafka\Topic

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    \RdKafka $kafka, 
              +    string $name, 
              +    ?\RdKafka\TopicConf $conf = null
              + ): 
              +
              +
              +
              Parameters
              +
              kafka \RdKafka
              +
              name string
              +
              conf ?\RdKafka\TopicConf
              +
              +

              __destruct()

              +
              public __destruct (  ): 
              +
              +

              getCData()

              +
              public getCData (  ): \FFI\CData
              +
              +
              +
              Returns
              +
              \FFI\CData
              +
              +

              getName()

              +
              public getName (  ): string
              +
              +
              +
              Returns
              +
              string
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 88.24% (15 / 17)
              • +
              • 💛 + Methods: 80% (4 / 5)
              • +
              +

              Extended by

              + + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/TopicConf/index.html b/api/RdKafka/TopicConf/index.html new file mode 100644 index 00000000..0f7c4273 --- /dev/null +++ b/api/RdKafka/TopicConf/index.html @@ -0,0 +1,3091 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TopicConf - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class TopicConf

              +

              Class \RdKafka\TopicConf

              + +

              Methods

              +

              __construct()

              +
              public __construct (  ): 
              +
              +

              __destruct()

              +
              public __destruct (  ): 
              +
              +

              dump()

              +
              public dump (  ): array
              +
              +
              +
              Returns
              +
              array
              +
              +

              getCData()

              +
              public getCData (  ): \FFI\CData
              +
              +
              +
              Returns
              +
              \FFI\CData
              +
              +

              getOpaque()

              +
              public getOpaque (  ): mixed|null
              +
              +
              +
              Returns
              +
              mixed|null
              +
              +

              set()

              +
              public set ( 
              +    string $name, 
              +    string $value
              + ): void
              +
              +
              +
              Parameters
              +
              name string
              +
              value string
              +
              +

              setOpaque()

              +
              public setOpaque ( 
              +    mixed $opaque
              + ): void
              +
              +
              +
              Parameters
              +
              opaque mixed
              +
              +

              setPartitioner()

              +
              public setPartitioner ( 
              +    int $partitioner
              + ): void
              +
              +
              +
              Parameters
              +
              partitioner int
              +
              +

              setPartitionerCb()

              +
              public setPartitionerCb ( 
              +    callable $callback
              + ): void
              +
              +
              +
              Parameters
              +
              callback callable
              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 76.19% (48 / 63)
              • +
              • 🧡 + Methods: 66.67% (6 / 9)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/TopicPartition/index.html b/api/RdKafka/TopicPartition/index.html new file mode 100644 index 00000000..e40b85d8 --- /dev/null +++ b/api/RdKafka/TopicPartition/index.html @@ -0,0 +1,3203 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TopicPartition - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + + + + + +
              +
              + + + + + + + +

              Class TopicPartition

              +

              Class \RdKafka\TopicPartition

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    string $topic, 
              +    int $partition, 
              +    int $offset, 
              +    string|null $metadata = null
              + ): 
              +
              +
              +
              Parameters
              +
              topic string
              +
              partition int
              +
              offset int
              +
              metadata string|null requires librdkafka >= 1.2.0
              +
              +

              fromCData()

              +
              public static fromCData ( 
              +    \FFI\CData $topicPartition
              + ): self
              +
              +
              +
              Parameters
              +
              topicPartition \FFI\CData
              +
              Returns
              +
              self
              +
              +

              getErr()

              +
              public getErr (  ): ?int
              +
              +
              +
              Returns
              +
              ?int
              +
              +

              getMetadata()

              +
              public getMetadata (  ): ?string
              +
              +
              +
              Returns
              +
              ?string
              +
              +

              getMetadataSize()

              +
              public getMetadataSize (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getOffset()

              +
              public getOffset (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getOpqaque()

              +
              public getOpqaque (  ): ?object
              +
              +
              +
              Returns
              +
              ?object
              +
              +

              getPartition()

              +
              public getPartition (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              getTopic()

              +
              public getTopic (  ): string
              +
              +
              +
              Returns
              +
              string
              +
              +

              setMetadata()

              +
              public setMetadata ( 
              +    ?string $metadata
              + ): void
              +
              +
              +
              Parameters
              +
              metadata ?string
              +
              +

              setOffset()

              +
              public setOffset ( 
              +    int $offset
              + ): void
              +
              +
              +
              Parameters
              +
              offset int
              +
              +

              setPartition()

              +
              public setPartition ( 
              +    int $partition
              + ): void
              +
              +
              +
              Parameters
              +
              partition int
              +
              +

              setTopic()

              +
              public setTopic ( 
              +    string $topic_name
              + ): void
              +
              +
              +
              Parameters
              +
              topic_name string
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 100% (30 / 30)
              • +
              • 💚 + Methods: 100% (13 / 13)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/TopicPartitionList/index.html b/api/RdKafka/TopicPartitionList/index.html new file mode 100644 index 00000000..e754611a --- /dev/null +++ b/api/RdKafka/TopicPartitionList/index.html @@ -0,0 +1,3110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + TopicPartitionList - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Class TopicPartitionList

              +

              Class \RdKafka\TopicPartitionList

              +
                +
              • implements \Iterator
              • +
              • implements \Countable
              • +
              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    \RdKafka\TopicPartition[] $items
              + ): 
              +
              +
              +
              Parameters
              +
              items \RdKafka\TopicPartition[]
              +
              +

              asArray()

              +
              public asArray (  ): \RdKafka\TopicPartition[]
              +
              +
              +
              Returns
              +
              \RdKafka\TopicPartition[]
              +
              +

              count()

              +
              public count (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              current()

              +
              public current (  ): \RdKafka\TopicPartition
              +
              +
              +
              Returns
              +
              \RdKafka\TopicPartition
              +
              +

              fromCData()

              +
              public static fromCData ( 
              +    \FFI\CData $topicPartitionList
              + ): self
              +
              +
              +
              Parameters
              +
              topicPartitionList \FFI\CData
              +
              Returns
              +
              self
              +
              +

              getCData()

              +
              public getCData (  ): \FFI\CData
              +
              +
              +
              Returns
              +
              \FFI\CData
              +
              +

              key()

              +
              public key (  ): int
              +
              +
              +
              Returns
              +
              int
              +
              +

              next()

              +
              public next (  ): void
              +
              +

              rewind()

              +
              public rewind (  ): void
              +
              +

              valid()

              +
              public valid (  ): bool
              +
              +
              +
              Returns
              +
              bool
              +
              +

              Test Coverage 💚

              +
                +
              • 💚 + Lines: 96.3% (26 / 27)
              • +
              • 💛 + Methods: 90% (9 / 10)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/RdKafka/index.html b/api/RdKafka/index.html new file mode 100644 index 00000000..34f4e372 --- /dev/null +++ b/api/RdKafka/index.html @@ -0,0 +1,3197 @@ + + + + + + + + + + + + + + + + + + + + + + + + + RdKafka - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + + + + + +
              +
              + + + + + + + +

              Class RdKafka

              +

              abstract Class \RdKafka

              +

              Methods

              +

              __construct()

              +
              public __construct ( 
              +    int $type, 
              +    ?\RdKafka\Conf $conf = null
              + ): 
              +
              +
              +
              Parameters
              +
              type int
              +
              conf ?\RdKafka\Conf
              +
              +

              __destruct()

              +
              public __destruct (  ): 
              +
              +

              getCData()

              +
              public getCData (  ): \FFI\CData
              +
              +
              +
              Returns
              +
              \FFI\CData
              +
              +

              getMetadata()

              +
              public getMetadata ( 
              +    bool $all_topics, 
              +    ?\RdKafka\Topic $only_topic, 
              +    int $timeout_ms
              + ): \RdKafka\Metadata
              +
              +
              +
              Parameters
              +
              all_topics bool
              +
              only_topic ?\RdKafka\Topic
              +
              timeout_ms int
              +
              Returns
              +
              \RdKafka\Metadata
              +
              +

              getOpaque()

              +
              public getOpaque (  ): mixed|null
              +
              +
              +
              Returns
              +
              mixed|null
              +
              +

              pausePartitions()

              +
              public pausePartitions ( 
              +    \TopicPartition[] $topicPartitions
              + ): \TopicPartition[]
              +
              +
              +
              Parameters
              +
              topicPartitions \TopicPartition[]
              +
              Returns
              +
              \TopicPartition[]
              +
              +

              queryWatermarkOffsets()

              +
              public queryWatermarkOffsets ( 
              +    string $topic, 
              +    int $partition, 
              +    int &$low, 
              +    int &$high, 
              +    int $timeout_ms
              + ): void
              +
              +
              +
              Parameters
              +
              topic string
              +
              partition int
              +
              low int
              +
              high int
              +
              timeout_ms int
              +
              +

              resolveFromCData()

              +
              public static resolveFromCData ( 
              +    ?\FFI\CData $kafka = null
              + ): ?self
              +
              +
              +
              Parameters
              +
              kafka ?\FFI\CData
              +
              Returns
              +
              ?self
              +
              +

              resumePartitions()

              +
              public resumePartitions ( 
              +    \TopicPartition[] $topicPartitions
              + ): \TopicPartition[]
              +
              +
              +
              Parameters
              +
              topicPartitions \TopicPartition[]
              +
              Returns
              +
              \TopicPartition[]
              +
              +

              setLogLevel()

              +
              public setLogLevel ( 
              +    int $level
              + ): void
              +
              +
              +
              Parameters
              +
              level int
              +
              +
              +

              Deprecated

              +

              Set via Conf parameter log_level instead

              +
              +

              setLogger()

              +
              public setLogger ( 
              +    int $logger
              + ): void
              +
              +
              +
              Parameters
              +
              logger int
              +
              +
              +

              Deprecated

              +

              Use Conf::setLogCb instead

              +
              +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 85.33% (64 / 75)
              • +
              • ❤️ + Methods: 40% (6 / 15)
              • +
              +

              Extended by

              + + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/constants/index.html b/api/constants/index.html new file mode 100644 index 00000000..a30f500a --- /dev/null +++ b/api/constants/index.html @@ -0,0 +1,37205 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Constants - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Constants

              +

              RD_KAFKA_LOG_PRINT

              +
              public RD_KAFKA_LOG_PRINT = 100
              +
              +

              RD_KAFKA_LOG_SYSLOG

              +
              public RD_KAFKA_LOG_SYSLOG = 101
              +
              +

              RD_KAFKA_LOG_SYSLOG_PRINT

              +
              public RD_KAFKA_LOG_SYSLOG_PRINT = 102
              +
              +

              RD_KAFKA_MSG_PARTITIONER_RANDOM

              +
              public RD_KAFKA_MSG_PARTITIONER_RANDOM = 2
              +
              +

              RD_KAFKA_MSG_PARTITIONER_CONSISTENT

              +
              public RD_KAFKA_MSG_PARTITIONER_CONSISTENT = 3
              +
              +

              RD_KAFKA_MSG_PARTITIONER_CONSISTENT_RANDOM

              +
              public RD_KAFKA_MSG_PARTITIONER_CONSISTENT_RANDOM = 4
              +
              +

              RD_KAFKA_MSG_PARTITIONER_MURMUR2

              +
              public RD_KAFKA_MSG_PARTITIONER_MURMUR2 = 5
              +
              +

              RD_KAFKA_MSG_PARTITIONER_MURMUR2_RANDOM

              +
              public RD_KAFKA_MSG_PARTITIONER_MURMUR2_RANDOM = 6
              +
              +

              RD_KAFKA_MSG_PARTITIONER_FNV1A

              +
              public RD_KAFKA_MSG_PARTITIONER_FNV1A = 7
              +
              +

              RD_KAFKA_MSG_PARTITIONER_FNV1A_RANDOM

              +
              public RD_KAFKA_MSG_PARTITIONER_FNV1A_RANDOM = 8
              +
              +

              RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE

              +
              public RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE = 8
              +
              +

              Flags for rd_kafka_destroy_flags()

              +

              Don't call consumer_close() to leave group and commit final offsets.

              +

              This also disables consumer callbacks to be called from rd_kafka_destroy*(), such as rebalance_cb.

              +

              The consumer group handler is still closed internally, but from an application perspective none of the functionality from consumer_close() is performed.

              + + +

              RD_KAFKA_PARTITION_UA

              +
              public RD_KAFKA_PARTITION_UA = -1
              +
              +

              Unassigned partition.

              +

              The unassigned partition is used by the producer API for messages that should be partitioned using the configured or default partitioner.

              + + +

              RD_KAFKA_OFFSET_BEGINNING

              +
              public RD_KAFKA_OFFSET_BEGINNING = -2
              +
              +

              Start consuming from beginning of \ kafka partition queue: oldest msg

              + + +

              RD_KAFKA_OFFSET_END

              +
              public RD_KAFKA_OFFSET_END = -1
              +
              +

              Start consuming from end of kafka \ partition queue: next msg

              + + +

              RD_KAFKA_OFFSET_STORED

              +
              public RD_KAFKA_OFFSET_STORED = -1000
              +
              +

              Start consuming from offset retrieved \ from offset store

              + + +

              RD_KAFKA_OFFSET_INVALID

              +
              public RD_KAFKA_OFFSET_INVALID = -1001
              +
              +

              Invalid offset

              + + +

              RD_KAFKA_OFFSET_TAIL_BASE

              +

              public RD_KAFKA_OFFSET_TAIL_BASE = -2000
              +
              +define

              +

              RD_KAFKA_MSG_F_FREE

              +
              public RD_KAFKA_MSG_F_FREE = 1
              +
              +

              Producer message flags.

              +

              Delegate freeing of payload to rdkafka. \

              + + +

              RD_KAFKA_MSG_F_COPY

              +
              public RD_KAFKA_MSG_F_COPY = 2
              +
              +

              rdkafka will make a copy of the payload. \

              + + +

              RD_KAFKA_MSG_F_BLOCK

              +
              public RD_KAFKA_MSG_F_BLOCK = 4
              +
              +

              Block produce*() on message queue full. \ WARNING: If a delivery report callback \ is used, the application MUST \ call rd_kafka_poll() (or equiv.) \ to make sure delivered messages \ are drained from the internal \ delivery report queue. \ Failure to do so will result \ in indefinitely blocking on \ the produce() call when the \ message queue is full.

              + + +

              RD_KAFKA_MSG_F_PARTITION

              +
              public RD_KAFKA_MSG_F_PARTITION = 8
              +
              +

              produce_batch() will honor \ per-message partition.

              + + +

              RD_KAFKA_PURGE_F_QUEUE

              +
              public RD_KAFKA_PURGE_F_QUEUE = 1
              +
              +

              Flags for rd_kafka_purge()

              +

              Purge messages in internal queues.

              + + +

              RD_KAFKA_PURGE_F_INFLIGHT

              +
              public RD_KAFKA_PURGE_F_INFLIGHT = 2
              +
              +

              Purge messages in-flight to or from the broker. Purging these messages will void any future acknowledgements from the broker, making it impossible for the application to know if these messages were successfully delivered or not. Retrying these messages may lead to duplicates.

              + + +

              RD_KAFKA_PURGE_F_NON_BLOCKING

              +
              public RD_KAFKA_PURGE_F_NON_BLOCKING = 4
              +
              +

              Don't wait for background thread queue purging to finish.

              + + +

              RD_KAFKA_EVENT_NONE

              +
              public RD_KAFKA_EVENT_NONE = 0
              +
              +

              Unset value

              + + +

              RD_KAFKA_EVENT_DR

              +
              public RD_KAFKA_EVENT_DR = 1
              +
              +

              Producer Delivery report batch

              + + +

              RD_KAFKA_EVENT_FETCH

              +
              public RD_KAFKA_EVENT_FETCH = 2
              +
              +

              Fetched message (consumer)

              + + +

              RD_KAFKA_EVENT_LOG

              +
              public RD_KAFKA_EVENT_LOG = 4
              +
              +

              Log message

              + + +

              RD_KAFKA_EVENT_ERROR

              +
              public RD_KAFKA_EVENT_ERROR = 8
              +
              +

              Error

              + + +

              RD_KAFKA_EVENT_REBALANCE

              +
              public RD_KAFKA_EVENT_REBALANCE = 16
              +
              +

              Group rebalance (consumer)

              + + +

              RD_KAFKA_EVENT_OFFSET_COMMIT

              +
              public RD_KAFKA_EVENT_OFFSET_COMMIT = 32
              +
              +

              Offset commit result

              + + +

              RD_KAFKA_EVENT_STATS

              +
              public RD_KAFKA_EVENT_STATS = 64
              +
              +

              Stats

              + + +

              RD_KAFKA_EVENT_CREATETOPICS_RESULT

              +
              public RD_KAFKA_EVENT_CREATETOPICS_RESULT = 100
              +
              +

              CreateTopics_result_t

              + + +

              RD_KAFKA_EVENT_DELETETOPICS_RESULT

              +
              public RD_KAFKA_EVENT_DELETETOPICS_RESULT = 101
              +
              +

              DeleteTopics_result_t

              + + +

              RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT

              +
              public RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT = 102
              +
              +

              CreatePartitions_result_t

              + + +

              RD_KAFKA_EVENT_ALTERCONFIGS_RESULT

              +
              public RD_KAFKA_EVENT_ALTERCONFIGS_RESULT = 103
              +
              +

              AlterConfigs_result_t

              + + +

              RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT

              +
              public RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT = 104
              +
              +

              DescribeConfigs_result_t

              + + +

              RD_KAFKA_PRODUCER

              +
              public RD_KAFKA_PRODUCER = 0
              +
              +

              Producer client

              + + +

              RD_KAFKA_CONSUMER

              +
              public RD_KAFKA_CONSUMER = 1
              +
              +

              Consumer client

              + + +

              RD_KAFKA_TIMESTAMP_NOT_AVAILABLE

              +
              public RD_KAFKA_TIMESTAMP_NOT_AVAILABLE = 0
              +
              +

              Timestamp not available

              + + +

              RD_KAFKA_TIMESTAMP_CREATE_TIME

              +
              public RD_KAFKA_TIMESTAMP_CREATE_TIME = 1
              +
              +

              Message creation time

              + + +

              RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME

              +
              public RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME = 2
              +
              +

              Log append time

              + + +

              RD_KAFKA_RESP_ERR__BEGIN

              +
              public RD_KAFKA_RESP_ERR__BEGIN = -200
              +
              +

              Begin internal error codes

              + + +

              RD_KAFKA_RESP_ERR__BAD_MSG

              +
              public RD_KAFKA_RESP_ERR__BAD_MSG = -199
              +
              +

              Received message is incorrect

              + + +

              RD_KAFKA_RESP_ERR__BAD_COMPRESSION

              +
              public RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198
              +
              +

              Bad/unknown compression

              + + +

              RD_KAFKA_RESP_ERR__DESTROY

              +
              public RD_KAFKA_RESP_ERR__DESTROY = -197
              +
              +

              Broker is going away

              + + +

              RD_KAFKA_RESP_ERR__FAIL

              +
              public RD_KAFKA_RESP_ERR__FAIL = -196
              +
              +

              Generic failure

              + + +

              RD_KAFKA_RESP_ERR__TRANSPORT

              +
              public RD_KAFKA_RESP_ERR__TRANSPORT = -195
              +
              +

              Broker transport failure

              + + +

              RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE

              +
              public RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194
              +
              +

              Critical system resource

              + + +

              RD_KAFKA_RESP_ERR__RESOLVE

              +
              public RD_KAFKA_RESP_ERR__RESOLVE = -193
              +
              +

              Failed to resolve broker

              + + +

              RD_KAFKA_RESP_ERR__MSG_TIMED_OUT

              +
              public RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192
              +
              +

              Produced message timed out

              + + +

              RD_KAFKA_RESP_ERR__PARTITION_EOF

              +
              public RD_KAFKA_RESP_ERR__PARTITION_EOF = -191
              +
              +

              Reached the end of the topic+partition queue on the broker. Not really an error. This event is disabled by default, see the enable.partition.eof configuration property.

              + + +

              RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION

              +
              public RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190
              +
              +

              Permanent: Partition does not exist in cluster.

              + + +

              RD_KAFKA_RESP_ERR__FS

              +
              public RD_KAFKA_RESP_ERR__FS = -189
              +
              +

              File or filesystem error

              + + +

              RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC

              +
              public RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188
              +
              +

              Permanent: Topic does not exist in cluster.

              + + +

              RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN

              +
              public RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187
              +
              +

              All broker connections are down.

              + + +

              RD_KAFKA_RESP_ERR__INVALID_ARG

              +
              public RD_KAFKA_RESP_ERR__INVALID_ARG = -186
              +
              +

              Invalid argument, or invalid configuration

              + + +

              RD_KAFKA_RESP_ERR__TIMED_OUT

              +
              public RD_KAFKA_RESP_ERR__TIMED_OUT = -185
              +
              +

              Operation timed out

              + + +

              RD_KAFKA_RESP_ERR__QUEUE_FULL

              +
              public RD_KAFKA_RESP_ERR__QUEUE_FULL = -184
              +
              +

              Queue is full

              + + +

              RD_KAFKA_RESP_ERR__ISR_INSUFF

              +
              public RD_KAFKA_RESP_ERR__ISR_INSUFF = -183
              +
              +

              ISR count < required.acks

              + + +

              RD_KAFKA_RESP_ERR__NODE_UPDATE

              +
              public RD_KAFKA_RESP_ERR__NODE_UPDATE = -182
              +
              +

              Broker node update

              + + +

              RD_KAFKA_RESP_ERR__SSL

              +
              public RD_KAFKA_RESP_ERR__SSL = -181
              +
              +

              SSL error

              + + +

              RD_KAFKA_RESP_ERR__WAIT_COORD

              +
              public RD_KAFKA_RESP_ERR__WAIT_COORD = -180
              +
              +

              Waiting for coordinator to become available.

              + + +

              RD_KAFKA_RESP_ERR__UNKNOWN_GROUP

              +
              public RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179
              +
              +

              Unknown client group

              + + +

              RD_KAFKA_RESP_ERR__IN_PROGRESS

              +
              public RD_KAFKA_RESP_ERR__IN_PROGRESS = -178
              +
              +

              Operation in progress

              + + +

              RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS

              +
              public RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177
              +
              +

              Previous operation in progress, wait for it to finish.

              + + +

              RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION

              +
              public RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176
              +
              +

              This operation would interfere with an existing subscription

              + + +

              RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS

              +
              public RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175
              +
              +

              Assigned partitions (rebalance_cb)

              + + +

              RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS

              +
              public RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174
              +
              +

              Revoked partitions (rebalance_cb)

              + + +

              RD_KAFKA_RESP_ERR__CONFLICT

              +
              public RD_KAFKA_RESP_ERR__CONFLICT = -173
              +
              +

              Conflicting use

              + + +

              RD_KAFKA_RESP_ERR__STATE

              +
              public RD_KAFKA_RESP_ERR__STATE = -172
              +
              +

              Wrong state

              + + +

              RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL

              +
              public RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171
              +
              +

              Unknown protocol

              + + +

              RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED

              +
              public RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170
              +
              +

              Not implemented

              + + +

              RD_KAFKA_RESP_ERR__AUTHENTICATION

              +
              public RD_KAFKA_RESP_ERR__AUTHENTICATION = -169
              +
              +

              Authentication failure

              + + +

              RD_KAFKA_RESP_ERR__NO_OFFSET

              +
              public RD_KAFKA_RESP_ERR__NO_OFFSET = -168
              +
              +

              No stored offset

              + + +

              RD_KAFKA_RESP_ERR__OUTDATED

              +
              public RD_KAFKA_RESP_ERR__OUTDATED = -167
              +
              +

              Outdated

              + + +

              RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE

              +
              public RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166
              +
              +

              Timed out in queue

              + + +

              RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE

              +
              public RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165
              +
              +

              Feature not supported by broker

              + + +

              RD_KAFKA_RESP_ERR__WAIT_CACHE

              +
              public RD_KAFKA_RESP_ERR__WAIT_CACHE = -164
              +
              +

              Awaiting cache update

              + + +

              RD_KAFKA_RESP_ERR__INTR

              +
              public RD_KAFKA_RESP_ERR__INTR = -163
              +
              +

              Operation interrupted (e.g., due to yield))

              + + +

              RD_KAFKA_RESP_ERR__KEY_SERIALIZATION

              +
              public RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = -162
              +
              +

              Key serialization error

              + + +

              RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION

              +
              public RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = -161
              +
              +

              Value serialization error

              + + +

              RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION

              +
              public RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = -160
              +
              +

              Key deserialization error

              + + +

              RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION

              +
              public RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = -159
              +
              +

              Value deserialization error

              + + +

              RD_KAFKA_RESP_ERR__PARTIAL

              +
              public RD_KAFKA_RESP_ERR__PARTIAL = -158
              +
              +

              Partial response

              + + +

              RD_KAFKA_RESP_ERR__READ_ONLY

              +
              public RD_KAFKA_RESP_ERR__READ_ONLY = -157
              +
              +

              Modification attempted on read-only object

              + + +

              RD_KAFKA_RESP_ERR__NOENT

              +
              public RD_KAFKA_RESP_ERR__NOENT = -156
              +
              +

              No such entry / item not found

              + + +

              RD_KAFKA_RESP_ERR__UNDERFLOW

              +
              public RD_KAFKA_RESP_ERR__UNDERFLOW = -155
              +
              +

              Read underflow

              + + +

              RD_KAFKA_RESP_ERR__INVALID_TYPE

              +
              public RD_KAFKA_RESP_ERR__INVALID_TYPE = -154
              +
              +

              Invalid type

              + + +

              RD_KAFKA_RESP_ERR__RETRY

              +
              public RD_KAFKA_RESP_ERR__RETRY = -153
              +
              +

              Retry operation

              + + +

              RD_KAFKA_RESP_ERR__PURGE_QUEUE

              +
              public RD_KAFKA_RESP_ERR__PURGE_QUEUE = -152
              +
              +

              Purged in queue

              + + +

              RD_KAFKA_RESP_ERR__PURGE_INFLIGHT

              +
              public RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = -151
              +
              +

              Purged in flight

              + + +

              RD_KAFKA_RESP_ERR__FATAL

              +
              public RD_KAFKA_RESP_ERR__FATAL = -150
              +
              +

              Fatal error: see rd_kafka_fatal_error()

              + + +

              RD_KAFKA_RESP_ERR__INCONSISTENT

              +
              public RD_KAFKA_RESP_ERR__INCONSISTENT = -149
              +
              +

              Inconsistent state

              + + +

              RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE

              +
              public RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148
              +
              +

              Gap-less ordering would not be guaranteed if proceeding

              + + +

              RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED

              +
              public RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147
              +
              +

              Maximum poll interval exceeded

              + + +

              RD_KAFKA_RESP_ERR__END

              +
              public RD_KAFKA_RESP_ERR__END = -100
              +
              +

              End internal error codes

              + + +

              RD_KAFKA_RESP_ERR_UNKNOWN

              +
              public RD_KAFKA_RESP_ERR_UNKNOWN = -1
              +
              +

              Unknown broker error

              + + +

              RD_KAFKA_RESP_ERR_NO_ERROR

              +
              public RD_KAFKA_RESP_ERR_NO_ERROR = 0
              +
              +

              Success

              + + +

              RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE

              +
              public RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1
              +
              +

              Offset out of range

              + + +

              RD_KAFKA_RESP_ERR_INVALID_MSG

              +
              public RD_KAFKA_RESP_ERR_INVALID_MSG = 2
              +
              +

              Invalid message

              + + +

              RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART

              +
              public RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3
              +
              +

              Unknown topic or partition

              + + +

              RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE

              +
              public RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4
              +
              +

              Invalid message size

              + + +

              RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE

              +
              public RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5
              +
              +

              Leader not available

              + + +

              RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION

              +
              public RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6
              +
              +

              Not leader for partition

              + + +

              RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT

              +
              public RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7
              +
              +

              Request timed out

              + + +

              RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE

              +
              public RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8
              +
              +

              Broker not available

              + + +

              RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE

              +
              public RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9
              +
              +

              Replica not available

              + + +

              RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE

              +
              public RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10
              +
              +

              Message size too large

              + + +

              RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH

              +
              public RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11
              +
              +

              StaleControllerEpochCode

              + + +

              RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE

              +
              public RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12
              +
              +

              Offset metadata string too large

              + + +

              RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION

              +
              public RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13
              +
              +

              Broker disconnected before response received

              + + +

              RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS

              +
              public RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14
              +
              +

              Group coordinator load in progress

              + + +

              RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE

              +
              public RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15
              +
              +

              Group coordinator not available

              + + +

              RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP

              +
              public RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16
              +
              +

              Not coordinator for group

              + + +

              RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION

              +
              public RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17
              +
              +

              Invalid topic

              + + +

              RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE

              +
              public RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18
              +
              +

              Message batch larger than configured server segment size

              + + +

              RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS

              +
              public RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19
              +
              +

              Not enough in-sync replicas

              + + +

              RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND

              +
              public RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20
              +
              +

              Message(s) written to insufficient number of in-sync replicas

              + + +

              RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS

              +
              public RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21
              +
              +

              Invalid required acks value

              + + +

              RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION

              +
              public RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22
              +
              +

              Specified group generation id is not valid

              + + +

              RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL

              +
              public RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23
              +
              +

              Inconsistent group protocol

              + + +

              RD_KAFKA_RESP_ERR_INVALID_GROUP_ID

              +
              public RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24
              +
              +

              Invalid group.id

              + + +

              RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID

              +
              public RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25
              +
              +

              Unknown member

              + + +

              RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT

              +
              public RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26
              +
              +

              Invalid session timeout

              + + +

              RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS

              +
              public RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27
              +
              +

              Group rebalance in progress

              + + +

              RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE

              +
              public RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28
              +
              +

              Commit offset data size is not valid

              + + +

              RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED

              +
              public RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29
              +
              +

              Topic authorization failed

              + + +

              RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED

              +
              public RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30
              +
              +

              Group authorization failed

              + + +

              RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED

              +
              public RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31
              +
              +

              Cluster authorization failed

              + + +

              RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP

              +
              public RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32
              +
              +

              Invalid timestamp

              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33
              +
              +

              Unsupported SASL mechanism

              + + +

              RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE

              +
              public RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34
              +
              +

              Illegal SASL state

              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35
              +
              +

              Unuspported version

              + + +

              RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS

              +
              public RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36
              +
              +

              Topic already exists

              + + +

              RD_KAFKA_RESP_ERR_INVALID_PARTITIONS

              +
              public RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37
              +
              +

              Invalid number of partitions

              + + +

              RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR

              +
              public RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38
              +
              +

              Invalid replication factor

              + + +

              RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT

              +
              public RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39
              +
              +

              Invalid replica assignment

              + + +

              RD_KAFKA_RESP_ERR_INVALID_CONFIG

              +
              public RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40
              +
              +

              Invalid config

              + + +

              RD_KAFKA_RESP_ERR_NOT_CONTROLLER

              +
              public RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41
              +
              +

              Not controller for cluster

              + + +

              RD_KAFKA_RESP_ERR_INVALID_REQUEST

              +
              public RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42
              +
              +

              Invalid request

              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43
              +
              +

              Message format on broker does not support request

              + + +

              RD_KAFKA_RESP_ERR_POLICY_VIOLATION

              +
              public RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44
              +
              +

              Policy violation

              + + +

              RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER

              +
              public RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45
              +
              +

              Broker received an out of order sequence number

              + + +

              RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER

              +
              public RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46
              +
              +

              Broker received a duplicate sequence number

              + + +

              RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH

              +
              public RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47
              +
              +

              Producer attempted an operation with an old epoch

              + + +

              RD_KAFKA_RESP_ERR_INVALID_TXN_STATE

              +
              public RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48
              +
              +

              Producer attempted a transactional operation in an invalid state

              + + +

              RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING

              +
              public RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49
              +
              +

              Producer attempted to use a producer id which is not currently assigned to its transactional id

              + + +

              RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT

              +
              public RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50
              +
              +

              Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms

              + + +

              RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS

              +
              public RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51
              +
              +

              Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing

              + + +

              RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED

              +
              public RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52
              +
              +

              Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer

              + + +

              RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED

              +
              public RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53
              +
              +

              Transactional Id authorization failed

              + + +

              RD_KAFKA_RESP_ERR_SECURITY_DISABLED

              +
              public RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54
              +
              +

              Security features are disabled

              + + +

              RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED

              +
              public RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55
              +
              +

              Operation not attempted

              + + +

              RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR

              +
              public RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56
              +
              +

              Disk error when trying to access log file on the disk

              + + +

              RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND

              +
              public RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57
              +
              +

              The user-specified log directory is not found in the broker config

              + + +

              RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED

              +
              public RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58
              +
              +

              SASL Authentication failed

              + + +

              RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID

              +
              public RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59
              +
              +

              Unknown Producer Id

              + + +

              RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS

              +
              public RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60
              +
              +

              Partition reassignment is in progress

              + + +

              RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED

              +
              public RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61
              +
              +

              Delegation Token feature is not enabled

              + + +

              RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND

              +
              public RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62
              +
              +

              Delegation Token is not found on server

              + + +

              RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH

              +
              public RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63
              +
              +

              Specified Principal is not valid Owner/Renewer

              + + +

              RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED

              +
              public RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64
              +
              +

              Delegation Token requests are not allowed on this connection

              + + +

              RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED

              +
              public RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65
              +
              +

              Delegation Token authorization failed

              + + +

              RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED

              +
              public RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66
              +
              +

              Delegation Token is expired

              + + +

              RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE

              +
              public RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67
              +
              +

              Supplied principalType is not supported

              + + +

              RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP

              +
              public RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68
              +
              +

              The group is not empty

              + + +

              RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND

              +
              public RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69
              +
              +

              The group id does not exist

              + + +

              RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND

              +
              public RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70
              +
              +

              The fetch session ID was not found

              + + +

              RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH

              +
              public RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71
              +
              +

              The fetch session epoch is invalid

              + + +

              RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND

              +
              public RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72
              +
              +

              No matching listener

              + + +

              RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED

              +
              public RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73
              +
              +

              Topic deletion is disabled

              + + +

              RD_KAFKA_VTYPE_END

              +
              public RD_KAFKA_VTYPE_END = 0
              +
              +

              va-arg sentinel

              + + +

              RD_KAFKA_VTYPE_TOPIC

              +
              public RD_KAFKA_VTYPE_TOPIC = 1
              +
              +

              (const char *) Topic name

              + + +

              RD_KAFKA_VTYPE_RKT

              +
              public RD_KAFKA_VTYPE_RKT = 2
              +
              +

              (rd_kafka_topic_t *) Topic handle

              + + +

              RD_KAFKA_VTYPE_PARTITION

              +
              public RD_KAFKA_VTYPE_PARTITION = 3
              +
              +

              (int32_t) Partition

              + + +

              RD_KAFKA_VTYPE_VALUE

              +
              public RD_KAFKA_VTYPE_VALUE = 4
              +
              +

              (void *, size_t) Message value (payload)

              + + +

              RD_KAFKA_VTYPE_KEY

              +
              public RD_KAFKA_VTYPE_KEY = 5
              +
              +

              (void *, size_t) Message key

              + + +

              RD_KAFKA_VTYPE_OPAQUE

              +
              public RD_KAFKA_VTYPE_OPAQUE = 6
              +
              +

              (void *) Per-message application opaque value. This is the same as the _private field in rd_kafka_message_t, also known as the msg_opaque.

              + + +

              RD_KAFKA_VTYPE_MSGFLAGS

              +
              public RD_KAFKA_VTYPE_MSGFLAGS = 7
              +
              +

              (int) RD_KAFKA_MSG_F_.. flags

              + + +

              RD_KAFKA_VTYPE_TIMESTAMP

              +
              public RD_KAFKA_VTYPE_TIMESTAMP = 8
              +
              +

              (int64_t) Milliseconds since epoch UTC

              + + +

              RD_KAFKA_VTYPE_HEADER

              +
              public RD_KAFKA_VTYPE_HEADER = 9
              +
              +

              (const char *, const void *, ssize_t) Message Header

              + + +

              RD_KAFKA_VTYPE_HEADERS

              +
              public RD_KAFKA_VTYPE_HEADERS = 10
              +
              +

              (rd_kafka_headers_t *) Headers list

              + + +

              RD_KAFKA_MSG_STATUS_NOT_PERSISTED

              +
              public RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0
              +
              +

              Message was never transmitted to the broker, or failed with an error indicating it was not written to the log. Application retry risks ordering, but not duplication.

              + + +

              RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED

              +
              public RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1
              +
              +

              Message was transmitted to broker, but no acknowledgement was received. Application retry risks ordering and duplication.

              + + +

              RD_KAFKA_MSG_STATUS_PERSISTED

              +
              public RD_KAFKA_MSG_STATUS_PERSISTED = 2
              +
              +

              Message was written to the log and acknowledged by the broker. No reason for application to retry. Note: this value should only be trusted with acks=all.

              + + +

              RD_KAFKA_CONF_UNKNOWN

              +
              public RD_KAFKA_CONF_UNKNOWN = -2
              +
              +

              Unknown configuration name.

              + + +

              RD_KAFKA_CONF_INVALID

              +
              public RD_KAFKA_CONF_INVALID = -1
              +
              +

              Invalid configuration value or property or value not supported in this build.

              + + +

              RD_KAFKA_CONF_OK

              +
              public RD_KAFKA_CONF_OK = 0
              +
              +

              Configuration okay

              + + +

              RD_KAFKA_ADMIN_OP_ANY

              +
              public RD_KAFKA_ADMIN_OP_ANY = 0
              +
              +

              Default value

              + + +

              RD_KAFKA_ADMIN_OP_CREATETOPICS

              +
              public RD_KAFKA_ADMIN_OP_CREATETOPICS = 1
              +
              +

              CreateTopics

              + + +

              RD_KAFKA_ADMIN_OP_DELETETOPICS

              +
              public RD_KAFKA_ADMIN_OP_DELETETOPICS = 2
              +
              +

              DeleteTopics

              + + +

              RD_KAFKA_ADMIN_OP_CREATEPARTITIONS

              +
              public RD_KAFKA_ADMIN_OP_CREATEPARTITIONS = 3
              +
              +

              CreatePartitions

              + + +

              RD_KAFKA_ADMIN_OP_ALTERCONFIGS

              +
              public RD_KAFKA_ADMIN_OP_ALTERCONFIGS = 4
              +
              +

              AlterConfigs

              + + +

              RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS

              +
              public RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS = 5
              +
              +

              DescribeConfigs

              + + +

              RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG

              +
              public RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0
              +
              +

              Source unknown, e.g., in the ConfigEntry used for alter requests where source is not set

              + + +

              RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG

              +
              public RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1
              +
              +

              Dynamic topic config that is configured for a specific topic

              + + +

              RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG

              +
              public RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2
              +
              +

              Dynamic broker config that is configured for a specific broker

              + + +

              RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG

              +
              public RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3
              +
              +

              Dynamic broker config that is configured as default for all brokers in the cluster

              + + +

              RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG

              +
              public RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4
              +
              +

              Static broker config provided as broker properties at startup (e.g. from server.properties file)

              + + +

              RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG

              +
              public RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5
              +
              +

              Built-in default configuration for configs that have a default value

              + + +

              RD_KAFKA_CONFIG_SOURCE__CNT

              +
              public RD_KAFKA_CONFIG_SOURCE__CNT = 6
              +
              +

              Number of source types defined

              + + +

              RD_KAFKA_RESOURCE_UNKNOWN

              +
              public RD_KAFKA_RESOURCE_UNKNOWN = 0
              +
              +

              Unknown

              + + +

              RD_KAFKA_RESOURCE_ANY

              +
              public RD_KAFKA_RESOURCE_ANY = 1
              +
              +

              Any (used for lookups)

              + + +

              RD_KAFKA_RESOURCE_TOPIC

              +
              public RD_KAFKA_RESOURCE_TOPIC = 2
              +
              +

              Topic

              + + +

              RD_KAFKA_RESOURCE_GROUP

              +
              public RD_KAFKA_RESOURCE_GROUP = 3
              +
              +

              Group

              + + +

              RD_KAFKA_RESOURCE_BROKER

              +
              public RD_KAFKA_RESOURCE_BROKER = 4
              +
              +

              Broker

              + + +

              RD_KAFKA_RESOURCE__CNT

              +
              public RD_KAFKA_RESOURCE__CNT = 5
              +
              +

              Number of resource types defined

              + + +

              RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH

              +
              public RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74
              +
              +

              Leader epoch is older than broker epoch

              + + +

              RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH

              +
              public RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75
              +
              +

              Leader epoch is newer than broker epoch

              + + +

              RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH

              +
              public RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77
              +
              +

              Broker epoch has changed

              + + +

              RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE

              +
              public RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78
              +
              +

              Leader high watermark is not caught up

              + + +

              RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED

              +
              public RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79
              +
              +

              Group member needs a valid member ID

              + + +

              RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE

              +
              public RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80
              +
              +

              Preferred leader was not available

              + + +

              RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED

              +
              public RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81
              +
              +

              Consumer group has reached maximum size

              + + +

              RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH

              +
              public RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH = 256
              +
              +

              SASL/OAUTHBEARER token needs to be refreshed

              + + +

              RD_KAFKA_CERT_PUBLIC_KEY

              +
              public RD_KAFKA_CERT_PUBLIC_KEY = 0
              +
              +

              Client's public key

              + + +

              RD_KAFKA_CERT_PRIVATE_KEY

              +
              public RD_KAFKA_CERT_PRIVATE_KEY = 1
              +
              +

              Client's private key

              + + +

              RD_KAFKA_CERT_CA

              +
              public RD_KAFKA_CERT_CA = 2
              +
              +

              CA certificate

              + + +

              RD_KAFKA_CERT__CNT

              +

              public RD_KAFKA_CERT__CNT = 3
              +
              +enum rd_kafka_cert_type_t

              +

              RD_KAFKA_CERT_ENC_PKCS12

              +
              public RD_KAFKA_CERT_ENC_PKCS12 = 0
              +
              +

              PKCS#12

              + + +

              RD_KAFKA_CERT_ENC_DER

              +
              public RD_KAFKA_CERT_ENC_DER = 1
              +
              +

              DER / binary X.509 ASN1

              + + +

              RD_KAFKA_CERT_ENC_PEM

              +
              public RD_KAFKA_CERT_ENC_PEM = 2
              +
              +

              PEM

              + + +

              RD_KAFKA_CERT_ENC__CNT

              +

              public RD_KAFKA_CERT_ENC__CNT = 3
              +
              +enum rd_kafka_cert_enc_t

              +

              RD_KAFKA_THREAD_MAIN

              +
              public RD_KAFKA_THREAD_MAIN = 0
              +
              +

              librdkafka's internal main thread

              + + +

              RD_KAFKA_THREAD_BACKGROUND

              +
              public RD_KAFKA_THREAD_BACKGROUND = 1
              +
              +

              Background thread (if enabled)

              + + +

              RD_KAFKA_THREAD_BROKER

              +
              public RD_KAFKA_THREAD_BROKER = 2
              +
              +

              Per-broker thread

              + + +

              RD_KAFKA_RESP_ERR__UNKNOWN_BROKER

              +
              public RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146
              +
              +

              Unknown broker

              + + +

              RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS

              +
              public RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14
              +
              +

              Coordinator load in progress

              + + +

              RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE

              +
              public RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15
              +
              +

              Coordinator not available

              + + +

              RD_KAFKA_RESP_ERR_NOT_COORDINATOR

              +
              public RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16
              +
              +

              Not coordinator

              + + +

              RD_KAFKA_RESP_ERR__NOT_CONFIGURED

              +
              public RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145
              +
              +

              Functionality not configured

              + + +

              RD_KAFKA_RESP_ERR__FENCED

              +
              public RD_KAFKA_RESP_ERR__FENCED = -144
              +
              +

              Instance has been fenced

              + + +

              RD_KAFKA_RESP_ERR__APPLICATION

              +
              public RD_KAFKA_RESP_ERR__APPLICATION = -143
              +
              +

              Application generated error

              + + +

              RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID

              +
              public RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82
              +
              +

              Static consumer fenced by other consumer with same group.instance.id.

              + + +

              RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE

              +
              public RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83
              +
              +

              Eligible partition leaders are not available

              + + +

              RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED

              +
              public RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84
              +
              +

              Leader election not needed for topic partition

              + + +

              RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS

              +
              public RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85
              +
              +

              No partition reassignment is in progress

              + + +

              RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC

              +
              public RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86
              +
              +

              Deleting offsets of a topic while the consumer group is subscribed to it

              + + +

              RD_KAFKA_RESP_ERR_INVALID_RECORD

              +
              public RD_KAFKA_RESP_ERR_INVALID_RECORD = 87
              +
              +

              Broker failed to validate record

              + + +

              RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT

              +
              public RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88
              +
              +

              There are unstable offsets that need to be cleared

              + + +

              RD_KAFKA_EVENT_DELETERECORDS_RESULT

              +
              public RD_KAFKA_EVENT_DELETERECORDS_RESULT = 105
              +
              +

              DeleteRecords_result_t

              + + +

              RD_KAFKA_EVENT_DELETEGROUPS_RESULT

              +
              public RD_KAFKA_EVENT_DELETEGROUPS_RESULT = 106
              +
              +

              DeleteGroups_result_t

              + + +

              RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT

              +
              public RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT = 107
              +
              +

              DeleteConsumerGroupOffsets_result_t

              + + +

              RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST

              +
              public RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = -142
              +
              +

              Assignment lost

              + + +

              RD_KAFKA_RESP_ERR__NOOP

              +
              public RD_KAFKA_RESP_ERR__NOOP = -141
              +
              +

              No operation performed

              + + +

              RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED

              +
              public RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89
              +
              +

              Throttling quota has been exceeded

              + + +

              RD_KAFKA_RESP_ERR_PRODUCER_FENCED

              +
              public RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90
              +
              +

              There is a newer producer with the same transactionalId which fences the current one

              + + +

              RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND

              +
              public RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91
              +
              +

              Request illegally referred to resource that does not exist

              + + +

              RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE

              +
              public RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92
              +
              +

              Request illegally referred to the same resource twice

              + + +

              RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL

              +
              public RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93
              +
              +

              Requested credential would not meet criteria for acceptability

              + + +

              RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET

              +
              public RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94
              +
              +

              Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters

              + + +

              RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION

              +
              public RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95
              +
              +

              Invalid update version

              + + +

              RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED

              +
              public RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96
              +
              +

              Unable to update finalized features due to server error

              + + +

              RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE

              +
              public RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97
              +
              +

              Request principal deserialization failed during forwarding

              + + +

              RD_KAFKA_ADMIN_OP_DELETERECORDS

              +
              public RD_KAFKA_ADMIN_OP_DELETERECORDS = 6
              +
              +

              DeleteRecords

              + + +

              RD_KAFKA_ADMIN_OP_DELETEGROUPS

              +
              public RD_KAFKA_ADMIN_OP_DELETEGROUPS = 7
              +
              +

              DeleteGroups

              + + +

              RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS

              +
              public RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS = 8
              +
              +

              DeleteConsumerGroupOffsets

              + + +

              RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET

              +
              public RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140
              +
              +

              No offset to automatically reset to

              + + +

              RD_KAFKA_EVENT_BACKGROUND

              +
              public RD_KAFKA_EVENT_BACKGROUND = 512
              +
              +

              Enable background thread.

              + + +

              RD_KAFKA_EVENT_CREATEACLS_RESULT

              +
              public RD_KAFKA_EVENT_CREATEACLS_RESULT = 1024
              +
              +

              CreateAcls_result_t

              + + +

              RD_KAFKA_EVENT_DESCRIBEACLS_RESULT

              +
              public RD_KAFKA_EVENT_DESCRIBEACLS_RESULT = 2048
              +
              +

              DescribeAcls_result_t

              + + +

              RD_KAFKA_EVENT_DELETEACLS_RESULT

              +
              public RD_KAFKA_EVENT_DELETEACLS_RESULT = 4096
              +
              +

              DeleteAcls_result_t

              + + +

              RD_KAFKA_ADMIN_OP_CREATEACLS

              +
              public RD_KAFKA_ADMIN_OP_CREATEACLS = 9
              +
              +

              CreateAcls

              + + +

              RD_KAFKA_ADMIN_OP_DESCRIBEACLS

              +
              public RD_KAFKA_ADMIN_OP_DESCRIBEACLS = 10
              +
              +

              DescribeAcls

              + + +

              RD_KAFKA_ADMIN_OP_DELETEACLS

              +
              public RD_KAFKA_ADMIN_OP_DELETEACLS = 11
              +
              +

              DeleteAcls

              + + +

              RD_KAFKA_RESOURCE_PATTERN_UNKNOWN

              +
              public RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0
              +
              +

              Unknown

              + + +

              RD_KAFKA_RESOURCE_PATTERN_ANY

              +
              public RD_KAFKA_RESOURCE_PATTERN_ANY = 1
              +
              +

              Any (used for lookups)

              + + +

              RD_KAFKA_RESOURCE_PATTERN_MATCH

              +
              public RD_KAFKA_RESOURCE_PATTERN_MATCH = 2
              +
              +

              Match: will perform pattern matching

              + + +

              RD_KAFKA_RESOURCE_PATTERN_LITERAL

              +
              public RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3
              +
              +

              Literal: A literal resource name

              + + +

              RD_KAFKA_RESOURCE_PATTERN_PREFIXED

              +
              public RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4
              +
              +

              Prefixed: A prefixed resource name

              + + +

              RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT

              +

              public RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT = 5
              +
              +enum rd_kafka_ResourcePatternType_t

              +

              RD_KAFKA_ACL_OPERATION_UNKNOWN

              +
              public RD_KAFKA_ACL_OPERATION_UNKNOWN = 0
              +
              +

              Unknown

              + + +

              RD_KAFKA_ACL_OPERATION_ANY

              +
              public RD_KAFKA_ACL_OPERATION_ANY = 1
              +
              +

              In a filter, matches any AclOperation

              + + +

              RD_KAFKA_ACL_OPERATION_ALL

              +
              public RD_KAFKA_ACL_OPERATION_ALL = 2
              +
              +

              ALL operation

              + + +

              RD_KAFKA_ACL_OPERATION_READ

              +
              public RD_KAFKA_ACL_OPERATION_READ = 3
              +
              +

              READ operation

              + + +

              RD_KAFKA_ACL_OPERATION_WRITE

              +
              public RD_KAFKA_ACL_OPERATION_WRITE = 4
              +
              +

              WRITE operation

              + + +

              RD_KAFKA_ACL_OPERATION_CREATE

              +
              public RD_KAFKA_ACL_OPERATION_CREATE = 5
              +
              +

              CREATE operation

              + + +

              RD_KAFKA_ACL_OPERATION_DELETE

              +
              public RD_KAFKA_ACL_OPERATION_DELETE = 6
              +
              +

              DELETE operation

              + + +

              RD_KAFKA_ACL_OPERATION_ALTER

              +
              public RD_KAFKA_ACL_OPERATION_ALTER = 7
              +
              +

              ALTER operation

              + + +

              RD_KAFKA_ACL_OPERATION_DESCRIBE

              +
              public RD_KAFKA_ACL_OPERATION_DESCRIBE = 8
              +
              +

              DESCRIBE operation

              + + +

              RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION

              +
              public RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9
              +
              +

              CLUSTER_ACTION operation

              + + +

              RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS

              +
              public RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10
              +
              +

              DESCRIBE_CONFIGS operation

              + + +

              RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS

              +
              public RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11
              +
              +

              ALTER_CONFIGS operation

              + + +

              RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE

              +
              public RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12
              +
              +

              IDEMPOTENT_WRITE operation

              + + +

              RD_KAFKA_ACL_OPERATION__CNT

              +

              public RD_KAFKA_ACL_OPERATION__CNT = 13
              +
              +enum rd_kafka_AclOperation_t

              +

              RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN

              +
              public RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0
              +
              +

              Unknown

              + + +

              RD_KAFKA_ACL_PERMISSION_TYPE_ANY

              +
              public RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1
              +
              +

              In a filter, matches any AclPermissionType

              + + +

              RD_KAFKA_ACL_PERMISSION_TYPE_DENY

              +
              public RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2
              +
              +

              Disallows access

              + + +

              RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW

              +
              public RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3
              +
              +

              Grants access.

              + + +

              RD_KAFKA_ACL_PERMISSION_TYPE__CNT

              +

              public RD_KAFKA_ACL_PERMISSION_TYPE__CNT = 4
              +
              +enum rd_kafka_AclPermissionType_t

              +

              RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT

              +
              public RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT = 8192
              +
              +

              ListConsumerGroupsResult_t

              + + +

              RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT

              +
              public RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT = 16384
              +
              +

              DescribeConsumerGroups_result_t

              + + +

              RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT

              +
              public RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT = 32768
              +
              +

              ListConsumerGroupOffsets_result_t

              + + +

              RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT

              +
              public RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT = 65536
              +
              +

              AlterConsumerGroupOffsets_result_t

              + + +

              RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN

              +

              public RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0
              +
              +enum rd_kafka_consumer_group_state_t

              +

              RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE

              +

              public RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1
              +
              +enum rd_kafka_consumer_group_state_t

              +

              RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE

              +

              public RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2
              +
              +enum rd_kafka_consumer_group_state_t

              +

              RD_KAFKA_CONSUMER_GROUP_STATE_STABLE

              +

              public RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3
              +
              +enum rd_kafka_consumer_group_state_t

              +

              RD_KAFKA_CONSUMER_GROUP_STATE_DEAD

              +

              public RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4
              +
              +enum rd_kafka_consumer_group_state_t

              +

              RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY

              +

              public RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5
              +
              +enum rd_kafka_consumer_group_state_t

              +

              RD_KAFKA_CONSUMER_GROUP_STATE__CNT

              +

              public RD_KAFKA_CONSUMER_GROUP_STATE__CNT = 6
              +
              +enum rd_kafka_consumer_group_state_t

              +

              RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS

              +
              public RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS = 12
              +
              +

              ListConsumerGroups

              + + +

              RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS

              +
              public RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS = 13
              +
              +

              DescribeConsumerGroups

              + + +

              RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS

              +
              public RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS = 14
              +
              +

              ListConsumerGroupOffsets

              + + +

              RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS

              +
              public RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS = 15
              +
              +

              AlterConsumerGroupOffsets

              + + +

              RD_KAFKA_RESP_ERR__LOG_TRUNCATION

              +
              public RD_KAFKA_RESP_ERR__LOG_TRUNCATION = -139
              +
              +

              Partition log truncation detected

              + + +

              RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT

              +
              public RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT = 131072
              +
              +

              IncrementalAlterConfigs_result_t

              + + +

              RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT

              +
              public RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT = 262144
              +
              +

              DescribeUserScramCredentials_result_t

              + + +

              RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT

              +
              public RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT = 524288
              +
              +

              AlterUserScramCredentials_result_t

              + + +

              RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS

              +
              public RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS = 16
              +
              +

              IncrementalAlterConfigs

              + + +

              RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS

              +
              public RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS = 17
              +
              +

              DescribeUserScramCredentials

              + + +

              RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS

              +
              public RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS = 18
              +
              +

              AlterUserScramCredentials

              + + +

              RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET

              +

              public RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0
              +
              +enum rd_kafka_AlterConfigOpType_t

              +

              RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE

              +

              public RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1
              +
              +enum rd_kafka_AlterConfigOpType_t

              +

              RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND

              +

              public RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2
              +
              +enum rd_kafka_AlterConfigOpType_t

              +

              RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT

              +

              public RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3
              +
              +enum rd_kafka_AlterConfigOpType_t

              +

              RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT

              +

              public RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT = 4
              +
              +enum rd_kafka_AlterConfigOpType_t

              +

              RD_KAFKA_SCRAM_MECHANISM_UNKNOWN

              +

              public RD_KAFKA_SCRAM_MECHANISM_UNKNOWN = 0
              +
              +enum rd_kafka_ScramMechanism_t

              +

              RD_KAFKA_SCRAM_MECHANISM_SHA_256

              +

              public RD_KAFKA_SCRAM_MECHANISM_SHA_256 = 1
              +
              +enum rd_kafka_ScramMechanism_t

              +

              RD_KAFKA_SCRAM_MECHANISM_SHA_512

              +

              public RD_KAFKA_SCRAM_MECHANISM_SHA_512 = 2
              +
              +enum rd_kafka_ScramMechanism_t

              +

              RD_KAFKA_SCRAM_MECHANISM__CNT

              +

              public RD_KAFKA_SCRAM_MECHANISM__CNT = 3
              +
              +enum rd_kafka_ScramMechanism_t

              +

              RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT

              +

              public RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT = 1048576
              +
              +define

              +

              RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT

              +

              public RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT = 2097152
              +
              +define

              +

              RD_KAFKA_EVENT_LISTOFFSETS_RESULT

              +

              public RD_KAFKA_EVENT_LISTOFFSETS_RESULT = 4194304
              +
              +define

              +

              RD_KAFKA_ADMIN_OP_DESCRIBETOPICS

              +

              public RD_KAFKA_ADMIN_OP_DESCRIBETOPICS = 19
              +
              +enum rd_kafka_admin_op_t

              +

              RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER

              +

              public RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER = 20
              +
              +enum rd_kafka_admin_op_t

              +

              RD_KAFKA_ADMIN_OP_LISTOFFSETS

              +

              public RD_KAFKA_ADMIN_OP_LISTOFFSETS = 21
              +
              +enum rd_kafka_admin_op_t

              +

              RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED

              +

              public RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED = 0
              +
              +enum rd_kafka_IsolationLevel_t

              +

              RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED

              +

              public RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED = 1
              +
              +enum rd_kafka_IsolationLevel_t

              +

              RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP

              +

              public RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP = -3
              +
              +enum rd_kafka_OffsetSpec_t

              +

              RD_KAFKA_OFFSET_SPEC_EARLIEST

              +

              public RD_KAFKA_OFFSET_SPEC_EARLIEST = -2
              +
              +enum rd_kafka_OffsetSpec_t

              +

              RD_KAFKA_OFFSET_SPEC_LATEST

              +

              public RD_KAFKA_OFFSET_SPEC_LATEST = -1
              +
              +enum rd_kafka_OffsetSpec_t

              +

              RD_KAFKA_SUPPORTED_METHODS

              +
              public RD_KAFKA_SUPPORTED_METHODS = ['rd_kafka_version' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_version_str' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_get_debug_contexts' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_get_err_descs' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_err2str' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_err2name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_last_error' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_errno2err' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_errno' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_fatal_error' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_test_fatal_error' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_add' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_add_range' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_del' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_del_by_idx' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_copy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_set_offset' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_find' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_list_sort' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_headers_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_headers_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_headers_copy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_header_add' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_header_remove' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_header_get_last' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_header_get' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_header_get_all' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_message_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_message_timestamp' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_message_latency' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_message_headers' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_message_detach_headers' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_message_set_headers' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_header_cnt' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_message_status' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_dup' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_dup_filter' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_events' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_background_event_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_dr_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_dr_msg_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_consume_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_rebalance_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_offset_commit_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_error_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_throttle_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_log_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_stats_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_socket_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_connect_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_closesocket_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_opaque' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_opaque' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_default_topic_conf' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_get' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_get' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_dump' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_dump' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_dump_free' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_properties_show' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_dup' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_default_topic_conf_dup' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_set' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_set_opaque' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_set_partitioner_cb' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_conf_set_msg_order_cmp' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_available' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_msg_partitioner_random' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_msg_partitioner_consistent' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_msg_partitioner_consistent_random' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_msg_partitioner_murmur2' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_msg_partitioner_murmur2_random' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_destroy_flags' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_type' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_memberid' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_clusterid' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_controllerid' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_opaque' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_poll' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_yield' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_pause_partitions' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_resume_partitions' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_query_watermark_offsets' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_get_watermark_offsets' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_offsets_for_times' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_mem_free' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_get_main' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_get_consumer' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_get_partition' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_get_background' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_forward' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_set_log_queue' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_length' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_io_event_enable' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_cb_event_enable' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume_start' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume_start_queue' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume_stop' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_seek' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume_batch' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume_callback' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume_queue' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume_batch_queue' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consume_callback_queue' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_offset_store' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_offsets_store' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_subscribe' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_unsubscribe' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_subscription' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consumer_poll' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_consumer_close' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_assign' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_assignment' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_commit' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_commit_message' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_commit_queue' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_committed' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_position' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_produce' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_producev' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_produce_batch' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_flush' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_purge' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_metadata' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_metadata_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_list_groups' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_group_list_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_brokers_add' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_set_logger' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_set_log_level' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_log_print' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_log_syslog' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_outq_len' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_dump' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_thread_cnt' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_wait_destroyed' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_unittest' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_poll_set_consumer' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_type' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_message_next' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_message_array' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_message_count' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_error' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_error_string' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_error_is_fatal' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_opaque' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_log' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_stats' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_topic_partition_list' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_topic_partition' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_CreateTopics_result' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_DeleteTopics_result' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_CreatePartitions_result' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_AlterConfigs_result' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_event_DescribeConfigs_result' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_poll' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_queue_poll_callback' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_plugin_f_conf_init_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_conf_set_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_conf_dup_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_conf_destroy_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_new_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_destroy_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_send_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_acknowledgement_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_consume_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_commit_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_request_sent_t' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_interceptor_add_on_conf_set' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_interceptor_add_on_conf_dup' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_interceptor_add_on_conf_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf_interceptor_add_on_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_send' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_acknowledgement' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_consume' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_commit' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_request_sent' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_result_error' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_result_error_string' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_result_name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_request_timeout' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_operation_timeout' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_validate_only' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_broker' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_opaque' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewTopic_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewTopic_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewTopic_destroy_array' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewTopic_set_replica_assignment' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewTopic_set_config' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_CreateTopics' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_CreateTopics_result_topics' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_DeleteTopic_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_DeleteTopic_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_DeleteTopic_destroy_array' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_DeleteTopics' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_DeleteTopics_result_topics' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewPartitions_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewPartitions_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewPartitions_destroy_array' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_NewPartitions_set_replica_assignment' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_CreatePartitions' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_CreatePartitions_result_topics' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigSource_name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigEntry_name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigEntry_value' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigEntry_source' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigEntry_is_read_only' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigEntry_is_default' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigEntry_is_sensitive' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigEntry_is_synonym' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigEntry_synonyms' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ResourceType_name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_new' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_destroy' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_destroy_array' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_set_config' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_configs' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_type' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_name' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_error' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_error_string' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AlterConfigs' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_AlterConfigs_result_resources' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_DescribeConfigs' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_DescribeConfigs_result_resources' => ['min' => '1.0.0', 'max' => '2.3.0'], 'rd_kafka_conf' => ['min' => '1.1.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_oauthbearer_token_refresh_cb' => ['min' => '1.1.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_ssl_cert_verify_cb' => ['min' => '1.1.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_ssl_cert' => ['min' => '1.1.0', 'max' => '2.3.0'], 'rd_kafka_event_config_string' => ['min' => '1.1.0', 'max' => '2.3.0'], 'rd_kafka_oauthbearer_set_token' => ['min' => '1.1.0', 'max' => '2.3.0'], 'rd_kafka_oauthbearer_set_token_failure' => ['min' => '1.1.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_thread_start_t' => ['min' => '1.2.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_thread_exit_t' => ['min' => '1.2.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_thread_start' => ['min' => '1.2.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_thread_exit' => ['min' => '1.2.0', 'max' => '2.3.0'], 'rd_kafka_mock_cluster_new' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_cluster_destroy' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_cluster_handle' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_cluster_bootstraps' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_push_request_errors' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_topic_set_error' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_partition_set_leader' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_partition_set_follower' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_partition_set_follower_wmarks' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_broker_set_rack' => ['min' => '1.3.0', 'max' => '2.3.0'], 'rd_kafka_error_code' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_error_name' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_error_string' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_error_is_fatal' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_error_is_retriable' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_error_txn_requires_abort' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_error_destroy' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_error_new' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_msg_partitioner_fnv1a' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_msg_partitioner_fnv1a_random' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_consumer_group_metadata' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_consumer_group_metadata_new' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_consumer_group_metadata_destroy' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_consumer_group_metadata_write' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_consumer_group_metadata_read' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_init_transactions' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_begin_transaction' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_send_offsets_to_transaction' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_commit_transaction' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_abort_transaction' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_handle_mock_cluster' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_mock_topic_create' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_mock_broker_set_down' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_mock_broker_set_up' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_mock_coordinator_set' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_mock_set_apiversion' => ['min' => '1.4.0', 'max' => '2.3.0'], 'rd_kafka_mock_broker_set_rtt' => ['min' => '1.4.4', 'max' => '2.3.0'], 'rd_kafka_message_errstr' => ['min' => '1.5.0', 'max' => '2.3.0'], 'rd_kafka_message_broker_id' => ['min' => '1.5.0', 'max' => '2.3.0'], 'rd_kafka_produceva' => ['min' => '1.5.0', 'max' => '2.3.0'], 'rd_kafka_event_debug_contexts' => ['min' => '1.5.0', 'max' => '2.3.0'], 'rd_kafka_mock_broker_push_request_errors' => ['min' => '1.5.0', 'max' => '1.6.2'], 'rd_kafka_conf_get_default_topic_conf' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_queue_yield' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_seek_partitions' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_incremental_assign' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_incremental_unassign' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_rebalance_protocol' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_assignment_lost' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_consumer_group_metadata_new_with_genid' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_event_DeleteRecords_result' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_event_DeleteGroups_result' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_event_DeleteConsumerGroupOffsets_result' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_group_result_error' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_group_result_name' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_group_result_partitions' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteRecords_new' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteRecords_destroy' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteRecords_destroy_array' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteRecords' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteRecords_result_offsets' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteGroup_new' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteGroup_destroy' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteGroup_destroy_array' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteGroups' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteGroups_result_groups' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteConsumerGroupOffsets_new' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteConsumerGroupOffsets_destroy' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteConsumerGroupOffsets_destroy_array' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteConsumerGroupOffsets' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_DeleteConsumerGroupOffsets_result_groups' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_mock_clear_request_errors' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_mock_push_request_errors_array' => ['min' => '1.6.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_response_received_t' => ['min' => '1.6.1', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_response_received' => ['min' => '1.6.1', 'max' => '2.3.0'], 'rd_kafka_conf_set_engine_callback_data' => ['min' => '1.7.0', 'max' => '2.3.0'], 'rd_kafka_mem_calloc' => ['min' => '1.7.0', 'max' => '2.3.0'], 'rd_kafka_mem_malloc' => ['min' => '1.7.0', 'max' => '2.3.0'], 'rd_kafka_mock_broker_push_request_error_rtts' => ['min' => '1.7.0', 'max' => '2.3.0'], 'rd_kafka_conf_enable_sasl_queue' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_queue_get_sasl' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_sasl_background_callbacks_enable' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_consumer_close_queue' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_consumer_closed' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_event_CreateAcls_result' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_event_DescribeAcls_result' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_event_DeleteAcls_result' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_ResourcePatternType_name' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_acl_result_error' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclOperation_name' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclPermissionType_name' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_new' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBindingFilter_new' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_restype' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_name' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_principal' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_host' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_operation' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_permission_type' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_resource_pattern_type' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_error' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_destroy' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_AclBinding_destroy_array' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_CreateAcls_result_acls' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_CreateAcls' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_DescribeAcls_result_acls' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_DescribeAcls' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_DeleteAcls_result_responses' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_DeleteAcls_result_response_error' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_DeleteAcls_result_response_matching_acls' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_DeleteAcls' => ['min' => '1.9.0', 'max' => '2.3.0'], 'rd_kafka_conf_set_resolve_cb' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_sasl_set_credentials' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_Node_id' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_Node_host' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_Node_port' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_consumer_group_state_name' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_consumer_group_state_code' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_event_ListConsumerGroups_result' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_event_DescribeConsumerGroups_result' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_event_AlterConsumerGroupOffsets_result' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_event_ListConsumerGroupOffsets_result' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_f_on_broker_state_change_t' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_interceptor_add_on_broker_state_change' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_require_stable_offsets' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_match_consumer_group_states' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ListConsumerGroups' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupListing_group_id' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupListing_is_simple_consumer_group' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupListing_state' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ListConsumerGroups_result_valid' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ListConsumerGroups_result_errors' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_DescribeConsumerGroups' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_DescribeConsumerGroups_result_groups' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_group_id' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_error' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_is_simple_consumer_group' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_partition_assignor' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_state' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_coordinator' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_member_count' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_member' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_MemberDescription_client_id' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_MemberDescription_group_instance_id' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_MemberDescription_consumer_id' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_MemberDescription_host' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_MemberDescription_assignment' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_MemberAssignment_partitions' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ListConsumerGroupOffsets_new' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ListConsumerGroupOffsets_destroy' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ListConsumerGroupOffsets_destroy_array' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ListConsumerGroupOffsets' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_ListConsumerGroupOffsets_result_groups' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_AlterConsumerGroupOffsets_new' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_AlterConsumerGroupOffsets_destroy' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_AlterConsumerGroupOffsets_destroy_array' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_AlterConsumerGroupOffsets' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_AlterConsumerGroupOffsets_result_groups' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_mock_broker_error_stack_cnt' => ['min' => '2.0.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_set_leader_epoch' => ['min' => '2.1.0', 'max' => '2.3.0'], 'rd_kafka_topic_partition_get_leader_epoch' => ['min' => '2.1.0', 'max' => '2.3.0'], 'rd_kafka_message_leader_epoch' => ['min' => '2.1.0', 'max' => '2.3.0'], 'rd_kafka_offset_store_message' => ['min' => '2.1.0', 'max' => '2.3.0'], 'rd_kafka_event_IncrementalAlterConfigs_result' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_event_DescribeUserScramCredentials_result' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_event_AlterUserScramCredentials_result' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_ConfigResource_add_incremental_config' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_IncrementalAlterConfigs' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_IncrementalAlterConfigs_result_resources' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_ScramCredentialInfo_mechanism' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_ScramCredentialInfo_iterations' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_UserScramCredentialsDescription_user' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_UserScramCredentialsDescription_error' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_UserScramCredentialsDescription_scramcredentialinfo' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_DescribeUserScramCredentials_result_descriptions' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_DescribeUserScramCredentials' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_UserScramCredentialUpsertion_new' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_UserScramCredentialDeletion_new' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_UserScramCredentialAlteration_destroy' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_UserScramCredentialAlteration_destroy_array' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_AlterUserScramCredentials_result_response_user' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_AlterUserScramCredentials_result_response_error' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_AlterUserScramCredentials_result_responses' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_AlterUserScramCredentials' => ['min' => '2.2.0', 'max' => '2.3.0'], 'rd_kafka_Uuid_base64str' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_Uuid_least_significant_bits' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_Uuid_most_significant_bits' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_Uuid_new' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_Uuid_copy' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_Uuid_destroy' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_Node_rack' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_event_DescribeTopics_result' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_event_DescribeCluster_result' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_event_ListOffsets_result' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_include_authorized_operations' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_AdminOptions_set_isolation_level' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicCollection_of_topic_names' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicCollection_destroy' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_DescribeTopics' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_DescribeTopics_result_topics' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicDescription_partitions' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicPartitionInfo_partition' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicPartitionInfo_leader' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicPartitionInfo_isr' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicPartitionInfo_replicas' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicDescription_authorized_operations' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicDescription_name' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicDescription_topic_id' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicDescription_is_internal' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_TopicDescription_error' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_DescribeCluster' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_DescribeCluster_result_nodes' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_DescribeCluster_result_authorized_operations' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_DescribeCluster_result_controller' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_DescribeCluster_result_cluster_id' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_ConsumerGroupDescription_authorized_operations' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_ListOffsetsResultInfo_topic_partition' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_ListOffsetsResultInfo_timestamp' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_ListOffsets_result_infos' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_ListOffsets' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_start_request_tracking' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_stop_request_tracking' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_request_destroy' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_request_id' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_request_api_key' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_request_timestamp' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_get_requests' => ['min' => '2.3.0', 'max' => '2.3.0'], 'rd_kafka_mock_clear_requests' => ['min' => '2.3.0', 'max' => '2.3.0']]
              +
              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 16777471
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 74
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 75
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 74,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *opaque), void *opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *opaque), void *opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), void *opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), void *opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque), void *opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 16777727
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 82
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *opaque), void *opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *opaque), void *opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), void *opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), void *opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque), void *opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 16843007
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 82
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *opaque), void *opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *opaque), void *opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), void *opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque), void *opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque), void *opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 16908543
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 82
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 16908799
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 82
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 16909055
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 82
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 16974079
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 82
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17039615
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 83
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17040127
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 83
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17040639
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 83
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17105151
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 83
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17105663
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 89
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17105919
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 89
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 6
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17170687
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 9
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17170943
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 9
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17171199
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 9
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17236223
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 9
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17301759
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 9
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17302271
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 9
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17367295
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 12
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17367551
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 12
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 17367807
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 12
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 33554687
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 16
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +struct addrinfo;
              +void rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, int (*resolve_cb)(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, const char *username, const char *password);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +typedef struct rd_kafka_Node_s rd_kafka_Node_t;
              +int rd_kafka_Node_id(const rd_kafka_Node_t *node);
              +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
              +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +typedef enum {
              +  RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
              +  RD_KAFKA_CONSUMER_GROUP_STATE__CNT,
              +} rd_kafka_consumer_group_state_t;
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +const char *rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
              +rd_kafka_consumer_group_state_t rd_kafka_consumer_group_state_code(const char *name);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroups_result_t *rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConsumerGroups_result_t *rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConsumerGroupOffsets_result_t *rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroupOffsets_result_t *rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_broker_state_change_t)(rd_kafka_t *rk, int32_t broker_id, const char *secproto, const char *name, int port, const char *state, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(rd_kafka_AdminOptions_t *options, int true_or_false);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(rd_kafka_AdminOptions_t *options, const rd_kafka_consumer_group_state_t *consumer_group_states, size_t consumer_group_states_cnt);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
              +typedef struct rd_kafka_ListConsumerGroupsResult_s rd_kafka_ListConsumerGroupsResult_t;
              +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const char *rd_kafka_ConsumerGroupListing_group_id(const rd_kafka_ConsumerGroupListing_t *grplist);
              +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(const rd_kafka_ConsumerGroupListing_t *grplist);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(const rd_kafka_ConsumerGroupListing_t *grplist);
              +const rd_kafka_ConsumerGroupListing_t **rd_kafka_ListConsumerGroups_result_valid(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ConsumerGroupDescription_s rd_kafka_ConsumerGroupDescription_t;
              +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
              +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
              +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, const char **groups, size_t groups_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConsumerGroupDescription_t **rd_kafka_DescribeConsumerGroups_result_groups(const rd_kafka_DescribeConsumerGroups_result_t *result, size_t *cntp);
              +const char *rd_kafka_ConsumerGroupDescription_group_id(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const char *rd_kafka_ConsumerGroupDescription_partition_assignor(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +size_t rd_kafka_ConsumerGroupDescription_member_count(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(const rd_kafka_ConsumerGroupDescription_t *grpdesc, size_t idx);
              +const char *rd_kafka_MemberDescription_client_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_group_instance_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_consumer_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(const rd_kafka_MemberAssignment_t *assignment);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ListConsumerGroupOffsets_s rd_kafka_ListConsumerGroupOffsets_t;
              +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_ListConsumerGroupOffsets_destroy(rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
              +void rd_kafka_ListConsumerGroupOffsets_destroy_array(rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffset_cnt);
              +void rd_kafka_ListConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(const rd_kafka_ListConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AlterConsumerGroupOffsets_s rd_kafka_AlterConsumerGroupOffsets_t;
              +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy(rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy_array(rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffset_cnt);
              +void rd_kafka_AlterConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_AlterConsumerGroupOffsets_result_groups(const rd_kafka_AlterConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t *cntp);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 33554687
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 16
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +struct addrinfo;
              +void rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, int (*resolve_cb)(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, const char *username, const char *password);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +typedef struct rd_kafka_Node_s rd_kafka_Node_t;
              +int rd_kafka_Node_id(const rd_kafka_Node_t *node);
              +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
              +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +typedef enum {
              +  RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
              +  RD_KAFKA_CONSUMER_GROUP_STATE__CNT,
              +} rd_kafka_consumer_group_state_t;
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +const char *rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
              +rd_kafka_consumer_group_state_t rd_kafka_consumer_group_state_code(const char *name);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroups_result_t *rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConsumerGroups_result_t *rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConsumerGroupOffsets_result_t *rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroupOffsets_result_t *rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_broker_state_change_t)(rd_kafka_t *rk, int32_t broker_id, const char *secproto, const char *name, int port, const char *state, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(rd_kafka_AdminOptions_t *options, int true_or_false);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(rd_kafka_AdminOptions_t *options, const rd_kafka_consumer_group_state_t *consumer_group_states, size_t consumer_group_states_cnt);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
              +typedef struct rd_kafka_ListConsumerGroupsResult_s rd_kafka_ListConsumerGroupsResult_t;
              +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const char *rd_kafka_ConsumerGroupListing_group_id(const rd_kafka_ConsumerGroupListing_t *grplist);
              +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(const rd_kafka_ConsumerGroupListing_t *grplist);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(const rd_kafka_ConsumerGroupListing_t *grplist);
              +const rd_kafka_ConsumerGroupListing_t **rd_kafka_ListConsumerGroups_result_valid(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ConsumerGroupDescription_s rd_kafka_ConsumerGroupDescription_t;
              +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
              +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
              +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, const char **groups, size_t groups_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConsumerGroupDescription_t **rd_kafka_DescribeConsumerGroups_result_groups(const rd_kafka_DescribeConsumerGroups_result_t *result, size_t *cntp);
              +const char *rd_kafka_ConsumerGroupDescription_group_id(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const char *rd_kafka_ConsumerGroupDescription_partition_assignor(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +size_t rd_kafka_ConsumerGroupDescription_member_count(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(const rd_kafka_ConsumerGroupDescription_t *grpdesc, size_t idx);
              +const char *rd_kafka_MemberDescription_client_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_group_instance_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_consumer_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(const rd_kafka_MemberAssignment_t *assignment);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ListConsumerGroupOffsets_s rd_kafka_ListConsumerGroupOffsets_t;
              +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_ListConsumerGroupOffsets_destroy(rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
              +void rd_kafka_ListConsumerGroupOffsets_destroy_array(rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffset_cnt);
              +void rd_kafka_ListConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(const rd_kafka_ListConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AlterConsumerGroupOffsets_s rd_kafka_AlterConsumerGroupOffsets_t;
              +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy(rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy_array(rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffset_cnt);
              +void rd_kafka_AlterConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_AlterConsumerGroupOffsets_result_groups(const rd_kafka_AlterConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t *cntp);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 33555199
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 16
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +struct addrinfo;
              +void rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, int (*resolve_cb)(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, const char *username, const char *password);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +typedef struct rd_kafka_Node_s rd_kafka_Node_t;
              +int rd_kafka_Node_id(const rd_kafka_Node_t *node);
              +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
              +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +typedef enum {
              +  RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
              +  RD_KAFKA_CONSUMER_GROUP_STATE__CNT,
              +} rd_kafka_consumer_group_state_t;
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +const char *rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
              +rd_kafka_consumer_group_state_t rd_kafka_consumer_group_state_code(const char *name);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroups_result_t *rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConsumerGroups_result_t *rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConsumerGroupOffsets_result_t *rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroupOffsets_result_t *rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_broker_state_change_t)(rd_kafka_t *rk, int32_t broker_id, const char *secproto, const char *name, int port, const char *state, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(rd_kafka_AdminOptions_t *options, int true_or_false);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(rd_kafka_AdminOptions_t *options, const rd_kafka_consumer_group_state_t *consumer_group_states, size_t consumer_group_states_cnt);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
              +typedef struct rd_kafka_ListConsumerGroupsResult_s rd_kafka_ListConsumerGroupsResult_t;
              +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const char *rd_kafka_ConsumerGroupListing_group_id(const rd_kafka_ConsumerGroupListing_t *grplist);
              +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(const rd_kafka_ConsumerGroupListing_t *grplist);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(const rd_kafka_ConsumerGroupListing_t *grplist);
              +const rd_kafka_ConsumerGroupListing_t **rd_kafka_ListConsumerGroups_result_valid(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ConsumerGroupDescription_s rd_kafka_ConsumerGroupDescription_t;
              +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
              +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
              +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, const char **groups, size_t groups_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConsumerGroupDescription_t **rd_kafka_DescribeConsumerGroups_result_groups(const rd_kafka_DescribeConsumerGroups_result_t *result, size_t *cntp);
              +const char *rd_kafka_ConsumerGroupDescription_group_id(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const char *rd_kafka_ConsumerGroupDescription_partition_assignor(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +size_t rd_kafka_ConsumerGroupDescription_member_count(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(const rd_kafka_ConsumerGroupDescription_t *grpdesc, size_t idx);
              +const char *rd_kafka_MemberDescription_client_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_group_instance_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_consumer_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(const rd_kafka_MemberAssignment_t *assignment);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ListConsumerGroupOffsets_s rd_kafka_ListConsumerGroupOffsets_t;
              +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_ListConsumerGroupOffsets_destroy(rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
              +void rd_kafka_ListConsumerGroupOffsets_destroy_array(rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffset_cnt);
              +void rd_kafka_ListConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(const rd_kafka_ListConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AlterConsumerGroupOffsets_s rd_kafka_AlterConsumerGroupOffsets_t;
              +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy(rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy_array(rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffset_cnt);
              +void rd_kafka_AlterConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_AlterConsumerGroupOffsets_result_groups(const rd_kafka_AlterConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t *cntp);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 33620223
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 16
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__LOG_TRUNCATION = (- 139),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +void rd_kafka_topic_partition_set_leader_epoch(rd_kafka_topic_partition_t *rktpar, int32_t leader_epoch);
              +int32_t rd_kafka_topic_partition_get_leader_epoch(const rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +struct addrinfo;
              +void rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, int (*resolve_cb)(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, const char *username, const char *password);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +typedef struct rd_kafka_Node_s rd_kafka_Node_t;
              +int rd_kafka_Node_id(const rd_kafka_Node_t *node);
              +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
              +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +typedef enum {
              +  RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
              +  RD_KAFKA_CONSUMER_GROUP_STATE__CNT,
              +} rd_kafka_consumer_group_state_t;
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +const char *rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
              +rd_kafka_consumer_group_state_t rd_kafka_consumer_group_state_code(const char *name);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroups_result_t *rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConsumerGroups_result_t *rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConsumerGroupOffsets_result_t *rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroupOffsets_result_t *rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_broker_state_change_t)(rd_kafka_t *rk, int32_t broker_id, const char *secproto, const char *name, int port, const char *state, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(rd_kafka_AdminOptions_t *options, int true_or_false);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(rd_kafka_AdminOptions_t *options, const rd_kafka_consumer_group_state_t *consumer_group_states, size_t consumer_group_states_cnt);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
              +typedef struct rd_kafka_ListConsumerGroupsResult_s rd_kafka_ListConsumerGroupsResult_t;
              +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const char *rd_kafka_ConsumerGroupListing_group_id(const rd_kafka_ConsumerGroupListing_t *grplist);
              +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(const rd_kafka_ConsumerGroupListing_t *grplist);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(const rd_kafka_ConsumerGroupListing_t *grplist);
              +const rd_kafka_ConsumerGroupListing_t **rd_kafka_ListConsumerGroups_result_valid(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ConsumerGroupDescription_s rd_kafka_ConsumerGroupDescription_t;
              +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
              +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
              +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, const char **groups, size_t groups_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConsumerGroupDescription_t **rd_kafka_DescribeConsumerGroups_result_groups(const rd_kafka_DescribeConsumerGroups_result_t *result, size_t *cntp);
              +const char *rd_kafka_ConsumerGroupDescription_group_id(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const char *rd_kafka_ConsumerGroupDescription_partition_assignor(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +size_t rd_kafka_ConsumerGroupDescription_member_count(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(const rd_kafka_ConsumerGroupDescription_t *grpdesc, size_t idx);
              +const char *rd_kafka_MemberDescription_client_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_group_instance_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_consumer_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(const rd_kafka_MemberAssignment_t *assignment);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ListConsumerGroupOffsets_s rd_kafka_ListConsumerGroupOffsets_t;
              +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_ListConsumerGroupOffsets_destroy(rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
              +void rd_kafka_ListConsumerGroupOffsets_destroy_array(rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffset_cnt);
              +void rd_kafka_ListConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(const rd_kafka_ListConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AlterConsumerGroupOffsets_s rd_kafka_AlterConsumerGroupOffsets_t;
              +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy(rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy_array(rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffset_cnt);
              +void rd_kafka_AlterConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_AlterConsumerGroupOffsets_result_groups(const rd_kafka_AlterConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t *cntp);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 33620479
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 16
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__LOG_TRUNCATION = (- 139),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +void rd_kafka_topic_partition_set_leader_epoch(rd_kafka_topic_partition_t *rktpar, int32_t leader_epoch);
              +int32_t rd_kafka_topic_partition_get_leader_epoch(const rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +struct addrinfo;
              +void rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, int (*resolve_cb)(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, const char *username, const char *password);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +typedef struct rd_kafka_Node_s rd_kafka_Node_t;
              +int rd_kafka_Node_id(const rd_kafka_Node_t *node);
              +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
              +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +typedef enum {
              +  RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
              +  RD_KAFKA_CONSUMER_GROUP_STATE__CNT,
              +} rd_kafka_consumer_group_state_t;
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +const char *rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
              +rd_kafka_consumer_group_state_t rd_kafka_consumer_group_state_code(const char *name);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroups_result_t *rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConsumerGroups_result_t *rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConsumerGroupOffsets_result_t *rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroupOffsets_result_t *rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_broker_state_change_t)(rd_kafka_t *rk, int32_t broker_id, const char *secproto, const char *name, int port, const char *state, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(rd_kafka_AdminOptions_t *options, int true_or_false);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(rd_kafka_AdminOptions_t *options, const rd_kafka_consumer_group_state_t *consumer_group_states, size_t consumer_group_states_cnt);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
              +typedef struct rd_kafka_ListConsumerGroupsResult_s rd_kafka_ListConsumerGroupsResult_t;
              +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const char *rd_kafka_ConsumerGroupListing_group_id(const rd_kafka_ConsumerGroupListing_t *grplist);
              +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(const rd_kafka_ConsumerGroupListing_t *grplist);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(const rd_kafka_ConsumerGroupListing_t *grplist);
              +const rd_kafka_ConsumerGroupListing_t **rd_kafka_ListConsumerGroups_result_valid(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ConsumerGroupDescription_s rd_kafka_ConsumerGroupDescription_t;
              +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
              +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
              +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, const char **groups, size_t groups_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConsumerGroupDescription_t **rd_kafka_DescribeConsumerGroups_result_groups(const rd_kafka_DescribeConsumerGroups_result_t *result, size_t *cntp);
              +const char *rd_kafka_ConsumerGroupDescription_group_id(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const char *rd_kafka_ConsumerGroupDescription_partition_assignor(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +size_t rd_kafka_ConsumerGroupDescription_member_count(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(const rd_kafka_ConsumerGroupDescription_t *grpdesc, size_t idx);
              +const char *rd_kafka_MemberDescription_client_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_group_instance_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_consumer_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(const rd_kafka_MemberAssignment_t *assignment);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ListConsumerGroupOffsets_s rd_kafka_ListConsumerGroupOffsets_t;
              +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_ListConsumerGroupOffsets_destroy(rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
              +void rd_kafka_ListConsumerGroupOffsets_destroy_array(rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffset_cnt);
              +void rd_kafka_ListConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(const rd_kafka_ListConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AlterConsumerGroupOffsets_s rd_kafka_AlterConsumerGroupOffsets_t;
              +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy(rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy_array(rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffset_cnt);
              +void rd_kafka_AlterConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_AlterConsumerGroupOffsets_result_groups(const rd_kafka_AlterConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t *cntp);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 33685759
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 19
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__LOG_TRUNCATION = (- 139),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +void rd_kafka_topic_partition_set_leader_epoch(rd_kafka_topic_partition_t *rktpar, int32_t leader_epoch);
              +int32_t rd_kafka_topic_partition_get_leader_epoch(const rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +struct addrinfo;
              +void rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, int (*resolve_cb)(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, const char *username, const char *password);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +typedef struct rd_kafka_Node_s rd_kafka_Node_t;
              +int rd_kafka_Node_id(const rd_kafka_Node_t *node);
              +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
              +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +typedef enum {
              +  RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
              +  RD_KAFKA_CONSUMER_GROUP_STATE__CNT,
              +} rd_kafka_consumer_group_state_t;
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +const char *rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
              +rd_kafka_consumer_group_state_t rd_kafka_consumer_group_state_code(const char *name);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_IncrementalAlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeUserScramCredentials_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterUserScramCredentials_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_IncrementalAlterConfigs_result_t *rd_kafka_event_IncrementalAlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroups_result_t *rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConsumerGroups_result_t *rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroupOffsets_result_t *rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConsumerGroupOffsets_result_t *rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeUserScramCredentials_result_t *rd_kafka_event_DescribeUserScramCredentials_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterUserScramCredentials_result_t *rd_kafka_event_AlterUserScramCredentials_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_broker_state_change_t)(rd_kafka_t *rk, int32_t broker_id, const char *secproto, const char *name, int port, const char *state, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS,
              +  RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(rd_kafka_AdminOptions_t *options, int true_or_false);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(rd_kafka_AdminOptions_t *options, const rd_kafka_consumer_group_state_t *consumer_group_states, size_t consumer_group_states_cnt);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +typedef enum rd_kafka_AlterConfigOpType_t {
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0,
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1,
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2,
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3,
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT,
              +} rd_kafka_AlterConfigOpType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +rd_kafka_error_t *rd_kafka_ConfigResource_add_incremental_config(rd_kafka_ConfigResource_t *config, const char *name, rd_kafka_AlterConfigOpType_t op_type, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_IncrementalAlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_IncrementalAlterConfigs_result_resources(const rd_kafka_IncrementalAlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
              +typedef struct rd_kafka_ListConsumerGroupsResult_s rd_kafka_ListConsumerGroupsResult_t;
              +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const char *rd_kafka_ConsumerGroupListing_group_id(const rd_kafka_ConsumerGroupListing_t *grplist);
              +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(const rd_kafka_ConsumerGroupListing_t *grplist);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(const rd_kafka_ConsumerGroupListing_t *grplist);
              +const rd_kafka_ConsumerGroupListing_t **rd_kafka_ListConsumerGroups_result_valid(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ConsumerGroupDescription_s rd_kafka_ConsumerGroupDescription_t;
              +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
              +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
              +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, const char **groups, size_t groups_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConsumerGroupDescription_t **rd_kafka_DescribeConsumerGroups_result_groups(const rd_kafka_DescribeConsumerGroups_result_t *result, size_t *cntp);
              +const char *rd_kafka_ConsumerGroupDescription_group_id(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const char *rd_kafka_ConsumerGroupDescription_partition_assignor(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +size_t rd_kafka_ConsumerGroupDescription_member_count(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(const rd_kafka_ConsumerGroupDescription_t *grpdesc, size_t idx);
              +const char *rd_kafka_MemberDescription_client_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_group_instance_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_consumer_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(const rd_kafka_MemberAssignment_t *assignment);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ListConsumerGroupOffsets_s rd_kafka_ListConsumerGroupOffsets_t;
              +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_ListConsumerGroupOffsets_destroy(rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
              +void rd_kafka_ListConsumerGroupOffsets_destroy_array(rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffset_cnt);
              +void rd_kafka_ListConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(const rd_kafka_ListConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AlterConsumerGroupOffsets_s rd_kafka_AlterConsumerGroupOffsets_t;
              +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy(rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy_array(rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffset_cnt);
              +void rd_kafka_AlterConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_AlterConsumerGroupOffsets_result_groups(const rd_kafka_AlterConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ScramMechanism_t {
              +  RD_KAFKA_SCRAM_MECHANISM_UNKNOWN = 0,
              +  RD_KAFKA_SCRAM_MECHANISM_SHA_256 = 1,
              +  RD_KAFKA_SCRAM_MECHANISM_SHA_512 = 2,
              +  RD_KAFKA_SCRAM_MECHANISM__CNT,
              +} rd_kafka_ScramMechanism_t;
              +typedef struct rd_kafka_ScramCredentialInfo_s rd_kafka_ScramCredentialInfo_t;
              +rd_kafka_ScramMechanism_t rd_kafka_ScramCredentialInfo_mechanism(const rd_kafka_ScramCredentialInfo_t *scram_credential_info);
              +int32_t rd_kafka_ScramCredentialInfo_iterations(const rd_kafka_ScramCredentialInfo_t *scram_credential_info);
              +typedef struct rd_kafka_UserScramCredentialsDescription_s rd_kafka_UserScramCredentialsDescription_t;
              +const char *rd_kafka_UserScramCredentialsDescription_user(const rd_kafka_UserScramCredentialsDescription_t *description);
              +const rd_kafka_error_t *rd_kafka_UserScramCredentialsDescription_error(const rd_kafka_UserScramCredentialsDescription_t *description);
              +size_t rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count(const rd_kafka_UserScramCredentialsDescription_t *description);
              +const rd_kafka_ScramCredentialInfo_t *rd_kafka_UserScramCredentialsDescription_scramcredentialinfo(const rd_kafka_UserScramCredentialsDescription_t *description, size_t idx);
              +const rd_kafka_UserScramCredentialsDescription_t **rd_kafka_DescribeUserScramCredentials_result_descriptions(const rd_kafka_DescribeUserScramCredentials_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeUserScramCredentials(rd_kafka_t *rk, const char **users, size_t user_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_UserScramCredentialAlteration_s rd_kafka_UserScramCredentialAlteration_t;
              +rd_kafka_UserScramCredentialAlteration_t *rd_kafka_UserScramCredentialUpsertion_new(const char *username, rd_kafka_ScramMechanism_t mechanism, int32_t iterations, const unsigned char *password, size_t password_size, const unsigned char *salt, size_t salt_size);
              +rd_kafka_UserScramCredentialAlteration_t *rd_kafka_UserScramCredentialDeletion_new(const char *username, rd_kafka_ScramMechanism_t mechanism);
              +void rd_kafka_UserScramCredentialAlteration_destroy(rd_kafka_UserScramCredentialAlteration_t *alteration);
              +void rd_kafka_UserScramCredentialAlteration_destroy_array(rd_kafka_UserScramCredentialAlteration_t **alterations, size_t alteration_cnt);
              +typedef struct rd_kafka_AlterUserScramCredentials_result_response_s rd_kafka_AlterUserScramCredentials_result_response_t;
              +const char *rd_kafka_AlterUserScramCredentials_result_response_user(const rd_kafka_AlterUserScramCredentials_result_response_t *response);
              +const rd_kafka_error_t *rd_kafka_AlterUserScramCredentials_result_response_error(const rd_kafka_AlterUserScramCredentials_result_response_t *response);
              +const rd_kafka_AlterUserScramCredentials_result_response_t **rd_kafka_AlterUserScramCredentials_result_responses(const rd_kafka_AlterUserScramCredentials_result_t *result, size_t *cntp);
              +void rd_kafka_AlterUserScramCredentials(rd_kafka_t *rk, rd_kafka_UserScramCredentialAlteration_t **alterations, size_t alteration_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t *cntp);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              +

              RD_KAFKA_VERSION

              +
              public RD_KAFKA_VERSION = 33751295
              +
              +

              librdkafka version

              +

              Interpreted as hex MM.mm.rr.xx:

              +
                +
              • MM = Major
              • +
              • mm = minor
              • +
              • rr = revision
              • +
              • xx = pre-release id (0xff is the final release)
              • +
              +

              E.g.: 0x000801ff = 0.8.1

              +
              Remarks
              This value should only be used during compile time, for runtime checks of version use rd_kafka_version()
              + + +

              RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE

              +
              public RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76
              +
              +

              Unsupported compression type

              + + +

              RD_KAFKA_RESP_ERR_END_ALL

              +

              public RD_KAFKA_RESP_ERR_END_ALL = 98
              +
              +enum rd_kafka_resp_err_t

              +

              RD_KAFKA_ADMIN_OP__CNT

              +
              public RD_KAFKA_ADMIN_OP__CNT = 22
              +
              +

              Number of ops defined

              + + +

              RD_KAFKA_CDEF

              +

              public RD_KAFKA_CDEF = 'typedef long int ssize_t;
              +typedef struct _IO_FILE FILE;
              +typedef long int mode_t;
              +typedef signed int int16_t;
              +typedef unsigned int uint16_t;
              +typedef signed int int32_t;
              +typedef signed long int int64_t;
              +int rd_kafka_version(void);
              +const char *rd_kafka_version_str(void);
              +typedef enum rd_kafka_type_t {
              +  RD_KAFKA_PRODUCER,
              +  RD_KAFKA_CONSUMER,
              +} rd_kafka_type_t;
              +typedef enum rd_kafka_timestamp_type_t {
              +  RD_KAFKA_TIMESTAMP_NOT_AVAILABLE,
              +  RD_KAFKA_TIMESTAMP_CREATE_TIME,
              +  RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME,
              +} rd_kafka_timestamp_type_t;
              +const char *rd_kafka_get_debug_contexts(void);
              +typedef struct rd_kafka_s rd_kafka_t;
              +typedef struct rd_kafka_topic_s rd_kafka_topic_t;
              +typedef struct rd_kafka_conf_s rd_kafka_conf_t;
              +typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
              +typedef struct rd_kafka_queue_s rd_kafka_queue_t;
              +typedef struct rd_kafka_op_s rd_kafka_event_t;
              +typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
              +typedef struct rd_kafka_consumer_group_metadata_s rd_kafka_consumer_group_metadata_t;
              +typedef struct rd_kafka_error_s {
              +  unsigned int code;
              +  char *errstr;
              +  unsigned char fatal;
              +  unsigned char retriable;
              +  unsigned char txn_requires_abort;
              +} rd_kafka_error_t;
              +typedef struct rd_kafka_headers_s rd_kafka_headers_t;
              +typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
              +typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
              +typedef struct rd_kafka_Uuid_s rd_kafka_Uuid_t;
              +typedef enum {
              +  RD_KAFKA_RESP_ERR__BEGIN = (- 200),
              +  RD_KAFKA_RESP_ERR__BAD_MSG = (- 199),
              +  RD_KAFKA_RESP_ERR__BAD_COMPRESSION = (- 198),
              +  RD_KAFKA_RESP_ERR__DESTROY = (- 197),
              +  RD_KAFKA_RESP_ERR__FAIL = (- 196),
              +  RD_KAFKA_RESP_ERR__TRANSPORT = (- 195),
              +  RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = (- 194),
              +  RD_KAFKA_RESP_ERR__RESOLVE = (- 193),
              +  RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = (- 192),
              +  RD_KAFKA_RESP_ERR__PARTITION_EOF = (- 191),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = (- 190),
              +  RD_KAFKA_RESP_ERR__FS = (- 189),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = (- 188),
              +  RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = (- 187),
              +  RD_KAFKA_RESP_ERR__INVALID_ARG = (- 186),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT = (- 185),
              +  RD_KAFKA_RESP_ERR__QUEUE_FULL = (- 184),
              +  RD_KAFKA_RESP_ERR__ISR_INSUFF = (- 183),
              +  RD_KAFKA_RESP_ERR__NODE_UPDATE = (- 182),
              +  RD_KAFKA_RESP_ERR__SSL = (- 181),
              +  RD_KAFKA_RESP_ERR__WAIT_COORD = (- 180),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = (- 179),
              +  RD_KAFKA_RESP_ERR__IN_PROGRESS = (- 178),
              +  RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = (- 177),
              +  RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = (- 176),
              +  RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = (- 175),
              +  RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = (- 174),
              +  RD_KAFKA_RESP_ERR__CONFLICT = (- 173),
              +  RD_KAFKA_RESP_ERR__STATE = (- 172),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = (- 171),
              +  RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = (- 170),
              +  RD_KAFKA_RESP_ERR__AUTHENTICATION = (- 169),
              +  RD_KAFKA_RESP_ERR__NO_OFFSET = (- 168),
              +  RD_KAFKA_RESP_ERR__OUTDATED = (- 167),
              +  RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = (- 166),
              +  RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = (- 165),
              +  RD_KAFKA_RESP_ERR__WAIT_CACHE = (- 164),
              +  RD_KAFKA_RESP_ERR__INTR = (- 163),
              +  RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = (- 162),
              +  RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = (- 161),
              +  RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = (- 160),
              +  RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = (- 159),
              +  RD_KAFKA_RESP_ERR__PARTIAL = (- 158),
              +  RD_KAFKA_RESP_ERR__READ_ONLY = (- 157),
              +  RD_KAFKA_RESP_ERR__NOENT = (- 156),
              +  RD_KAFKA_RESP_ERR__UNDERFLOW = (- 155),
              +  RD_KAFKA_RESP_ERR__INVALID_TYPE = (- 154),
              +  RD_KAFKA_RESP_ERR__RETRY = (- 153),
              +  RD_KAFKA_RESP_ERR__PURGE_QUEUE = (- 152),
              +  RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = (- 151),
              +  RD_KAFKA_RESP_ERR__FATAL = (- 150),
              +  RD_KAFKA_RESP_ERR__INCONSISTENT = (- 149),
              +  RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = (- 148),
              +  RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = (- 147),
              +  RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = (- 146),
              +  RD_KAFKA_RESP_ERR__NOT_CONFIGURED = (- 145),
              +  RD_KAFKA_RESP_ERR__FENCED = (- 144),
              +  RD_KAFKA_RESP_ERR__APPLICATION = (- 143),
              +  RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = (- 142),
              +  RD_KAFKA_RESP_ERR__NOOP = (- 141),
              +  RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = (- 140),
              +  RD_KAFKA_RESP_ERR__LOG_TRUNCATION = (- 139),
              +  RD_KAFKA_RESP_ERR__END = (- 100),
              +  RD_KAFKA_RESP_ERR_UNKNOWN = (- 1),
              +  RD_KAFKA_RESP_ERR_NO_ERROR = 0,
              +  RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
              +  RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
              +  RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
              +  RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
              +  RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
              +  RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
              +  RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
              +  RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
              +  RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
              +  RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
              +  RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
              +  RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
              +  RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
              +  RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
              +  RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
              +  RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
              +  RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
              +  RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
              +  RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
              +  RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
              +  RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
              +  RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
              +  RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
              +  RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
              +  RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
              +  RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
              +  RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
              +  RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
              +  RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
              +  RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
              +  RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
              +  RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
              +  RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
              +  RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
              +  RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
              +  RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
              +  RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
              +  RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
              +  RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
              +  RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
              +  RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
              +  RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
              +  RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
              +  RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
              +  RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
              +  RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
              +  RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
              +  RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
              +  RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
              +  RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
              +  RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
              +  RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
              +  RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
              +  RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
              +  RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
              +  RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
              +  RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
              +  RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
              +  RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
              +  RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
              +  RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
              +  RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
              +  RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
              +  RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
              +  RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
              +  RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
              +  RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
              +  RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
              +  RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
              +  RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
              +  RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
              +  RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
              +  RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
              +  RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
              +  RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
              +  RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
              +  RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
              +  RD_KAFKA_RESP_ERR_END_ALL,
              +} rd_kafka_resp_err_t;
              +struct rd_kafka_err_desc {
              +  rd_kafka_resp_err_t code;
              +  char *name;
              +  char *desc;
              +};
              +void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs, size_t *cntp);
              +const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
              +const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_last_error(void);
              +rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
              +int rd_kafka_errno(void);
              +rd_kafka_resp_err_t rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason);
              +rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_name(const rd_kafka_error_t *error);
              +const char *rd_kafka_error_string(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
              +int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
              +int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
              +void rd_kafka_error_destroy(rd_kafka_error_t *error);
              +rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...);
              +typedef struct rd_kafka_topic_partition_s {
              +  char *topic;
              +  int32_t partition;
              +  int64_t offset;
              +  void *metadata;
              +  size_t metadata_size;
              +  void *opaque;
              +  rd_kafka_resp_err_t err;
              +  void *_private;
              +} rd_kafka_topic_partition_t;
              +void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
              +void rd_kafka_topic_partition_set_leader_epoch(rd_kafka_topic_partition_t *rktpar, int32_t leader_epoch);
              +int32_t rd_kafka_topic_partition_get_leader_epoch(const rd_kafka_topic_partition_t *rktpar);
              +typedef struct rd_kafka_topic_partition_list_s {
              +  int cnt;
              +  int size;
              +  rd_kafka_topic_partition_t *elems;
              +} rd_kafka_topic_partition_list_t;
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
              +void rd_kafka_topic_partition_list_destroy(rd_kafka_topic_partition_list_t *rkparlist);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_add_range(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t start, int32_t stop);
              +int rd_kafka_topic_partition_list_del(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +int rd_kafka_topic_partition_list_del_by_idx(rd_kafka_topic_partition_list_t *rktparlist, int idx);
              +rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
              +rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition, int64_t offset);
              +rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(const rd_kafka_topic_partition_list_t *rktparlist, const char *topic, int32_t partition);
              +void rd_kafka_topic_partition_list_sort(rd_kafka_topic_partition_list_t *rktparlist, int (*cmp)(const void *a, const void *b, void *cmp_opaque), void *cmp_opaque);
              +typedef enum rd_kafka_vtype_t {
              +  RD_KAFKA_VTYPE_END,
              +  RD_KAFKA_VTYPE_TOPIC,
              +  RD_KAFKA_VTYPE_RKT,
              +  RD_KAFKA_VTYPE_PARTITION,
              +  RD_KAFKA_VTYPE_VALUE,
              +  RD_KAFKA_VTYPE_KEY,
              +  RD_KAFKA_VTYPE_OPAQUE,
              +  RD_KAFKA_VTYPE_MSGFLAGS,
              +  RD_KAFKA_VTYPE_TIMESTAMP,
              +  RD_KAFKA_VTYPE_HEADER,
              +  RD_KAFKA_VTYPE_HEADERS,
              +} rd_kafka_vtype_t;
              +typedef struct rd_kafka_vu_s {
              +  rd_kafka_vtype_t vtype;
              +  union {
              +    char *cstr;
              +    rd_kafka_topic_t *rkt;
              +    int i;
              +    int32_t i32;
              +    int64_t i64;
              +    struct {
              +      void *ptr;
              +      size_t size;
              +    } mem;
              +    struct {
              +      char *name;
              +      void *val;
              +      ssize_t size;
              +    } header;
              +    rd_kafka_headers_t *headers;
              +    void *ptr;
              +    char _pad[64];
              +  } u;
              +} rd_kafka_vu_t;
              +rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
              +void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
              +rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src);
              +rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs, const char *name, ssize_t name_size, const void *value, ssize_t value_size);
              +rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs, const char *name);
              +rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs, size_t idx, const char *name, const void **valuep, size_t *sizep);
              +rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs, size_t idx, const char **namep, const void **valuep, size_t *sizep);
              +typedef struct rd_kafka_message_s {
              +  rd_kafka_resp_err_t err;
              +  rd_kafka_topic_t *rkt;
              +  int32_t partition;
              +  void *payload;
              +  size_t len;
              +  void *key;
              +  size_t key_len;
              +  int64_t offset;
              +  void *_private;
              +} rd_kafka_message_t;
              +void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
              +int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage, rd_kafka_timestamp_type_t *tstype);
              +int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_message_headers(const rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +rd_kafka_resp_err_t rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t **hdrsp);
              +void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage, rd_kafka_headers_t *hdrs);
              +size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
              +typedef enum {
              +  RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
              +  RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
              +  RD_KAFKA_MSG_STATUS_PERSISTED = 2,
              +} rd_kafka_msg_status_t;
              +rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
              +int32_t rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage);
              +const char *rd_kafka_Uuid_base64str(const rd_kafka_Uuid_t *uuid);
              +int64_t rd_kafka_Uuid_least_significant_bits(const rd_kafka_Uuid_t *uuid);
              +int64_t rd_kafka_Uuid_most_significant_bits(const rd_kafka_Uuid_t *uuid);
              +rd_kafka_Uuid_t *rd_kafka_Uuid_new(int64_t most_significant_bits, int64_t least_significant_bits);
              +rd_kafka_Uuid_t *rd_kafka_Uuid_copy(const rd_kafka_Uuid_t *uuid);
              +void rd_kafka_Uuid_destroy(rd_kafka_Uuid_t *uuid);
              +typedef enum {
              +  RD_KAFKA_CONF_UNKNOWN = (- 2),
              +  RD_KAFKA_CONF_INVALID = (- 1),
              +  RD_KAFKA_CONF_OK = 0,
              +} rd_kafka_conf_res_t;
              +rd_kafka_conf_t *rd_kafka_conf_new(void);
              +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
              +rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf, size_t filter_cnt, const char **filter);
              +const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
              +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
              +void rd_kafka_conf_set_background_event_cb(rd_kafka_conf_t *conf, void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
              +void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf, void (*dr_cb)(rd_kafka_t *rk, void *payload, size_t len, rd_kafka_resp_err_t err, void *opaque, void *msg_opaque));
              +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, void (*dr_msg_cb)(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_consume_cb(rd_kafka_conf_t *conf, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
              +void rd_kafka_conf_set_rebalance_cb(rd_kafka_conf_t *conf, void (*rebalance_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *partitions, void *opaque));
              +void rd_kafka_conf_set_offset_commit_cb(rd_kafka_conf_t *conf, void (*offset_commit_cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *opaque));
              +void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf, void (*error_cb)(rd_kafka_t *rk, int err, const char *reason, void *opaque));
              +void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf, void (*throttle_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int throttle_time_ms, void *opaque));
              +void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, void (*log_cb)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf, int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
              +void rd_kafka_conf_set_oauthbearer_token_refresh_cb(rd_kafka_conf_t *conf, void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque));
              +void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
              +void rd_kafka_conf_set_socket_cb(rd_kafka_conf_t *conf, int (*socket_cb)(int domain, int type, int protocol, void *opaque));
              +void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf, int (*connect_cb)(int sockfd, const struct sockaddr *addr, int addrlen, const char *id, void *opaque));
              +void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf, int (*closesocket_cb)(int sockfd, void *opaque));
              +struct addrinfo;
              +void rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, int (*resolve_cb)(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res, void *opaque));
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(rd_kafka_conf_t *conf, int (*ssl_cert_verify_cb)(rd_kafka_t *rk, const char *broker_name, int32_t broker_id, int *x509_error, int depth, const char *buf, size_t size, char *errstr, size_t errstr_size, void *opaque));
              +typedef enum rd_kafka_cert_type_t {
              +  RD_KAFKA_CERT_PUBLIC_KEY,
              +  RD_KAFKA_CERT_PRIVATE_KEY,
              +  RD_KAFKA_CERT_CA,
              +  RD_KAFKA_CERT__CNT,
              +} rd_kafka_cert_type_t;
              +typedef enum rd_kafka_cert_enc_t {
              +  RD_KAFKA_CERT_ENC_PKCS12,
              +  RD_KAFKA_CERT_ENC_DER,
              +  RD_KAFKA_CERT_ENC_PEM,
              +  RD_KAFKA_CERT_ENC__CNT,
              +} rd_kafka_cert_enc_t;
              +rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, rd_kafka_cert_type_t cert_type, rd_kafka_cert_enc_t cert_enc, const void *buffer, size_t size, char *errstr, size_t errstr_size);
              +void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf, void *callback_data);
              +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
              +void *rd_kafka_opaque(const rd_kafka_t *rk);
              +void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf, rd_kafka_topic_conf_t *tconf);
              +rd_kafka_topic_conf_t *rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
              +rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf, const char *name, char *dest, size_t *dest_size);
              +const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
              +const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf, size_t *cntp);
              +void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
              +void rd_kafka_conf_properties_show(FILE *fp);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
              +rd_kafka_topic_conf_t *rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
              +rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
              +void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
              +rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf, const char *name, const char *value, char *errstr, size_t errstr_size);
              +void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf, void *rkt_opaque);
              +void rd_kafka_topic_conf_set_partitioner_cb(rd_kafka_topic_conf_t *topic_conf, int32_t (*partitioner)(const rd_kafka_topic_t *rkt, const void *keydata, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque));
              +void rd_kafka_topic_conf_set_msg_order_cmp(rd_kafka_topic_conf_t *topic_conf, int (*msg_order_cmp)(const rd_kafka_message_t *a, const rd_kafka_message_t *b));
              +int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt, int32_t partition);
              +int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, const void *key, size_t keylen, int32_t partition_cnt, void *rkt_opaque, void *msg_opaque);
              +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, char *errstr, size_t errstr_size);
              +void rd_kafka_destroy(rd_kafka_t *rk);
              +void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
              +const char *rd_kafka_name(const rd_kafka_t *rk);
              +rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
              +char *rd_kafka_memberid(const rd_kafka_t *rk);
              +char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
              +int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, rd_kafka_topic_conf_t *conf);
              +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
              +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
              +void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
              +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
              +void rd_kafka_yield(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_pause_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_resume_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk, const char *topic, int32_t partition, int64_t *low, int64_t *high);
              +rd_kafka_resp_err_t rd_kafka_offsets_for_times(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets, int timeout_ms);
              +void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
              +void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
              +void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
              +rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
              +void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
              +rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, const char *username, const char *password);
              +rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
              +rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk, const char *topic, int32_t partition);
              +rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
              +void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
              +rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
              +void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu, int fd, const void *payload, size_t size);
              +void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu, void (*event_cb)(rd_kafka_t *rk, void *qev_opaque), void *qev_opaque);
              +void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_start(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
              +rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_seek_partitions(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_message_t *rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
              +ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu, int timeout_ms);
              +ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu, int timeout_ms, rd_kafka_message_t **rkmessages, size_t rkmessages_size);
              +int rd_kafka_consume_callback_queue(rd_kafka_queue_t *rkqu, int timeout_ms, void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
              +rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets);
              +rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage);
              +rd_kafka_resp_err_t rd_kafka_subscribe(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *topics);
              +rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
              +rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk, rd_kafka_queue_t *rkqu);
              +int rd_kafka_consumer_closed(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_incremental_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_error_t *rd_kafka_incremental_unassign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_assign(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_resp_err_t rd_kafka_assignment(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **partitions);
              +int rd_kafka_assignment_lost(rd_kafka_t *rk);
              +rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, int async);
              +rd_kafka_resp_err_t rd_kafka_commit_queue(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_queue_t *rkqu, void (*cb)(rd_kafka_t *rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t *offsets, void *commit_opaque), void *commit_opaque);
              +rd_kafka_resp_err_t rd_kafka_committed(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new(const char *group_id);
              +rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, int32_t generation_id, const char *member_id, const char *group_instance_id);
              +void rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(const rd_kafka_consumer_group_metadata_t *cgmd, void **bufferp, size_t *sizep);
              +rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(rd_kafka_consumer_group_metadata_t **cgmdp, const void *buffer, size_t size);
              +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, void *payload, size_t len, const void *key, size_t keylen, void *msg_opaque);
              +rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
              +rd_kafka_error_t *rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
              +int rd_kafka_produce_batch(rd_kafka_topic_t *rkt, int32_t partition, int msgflags, rd_kafka_message_t *rkmessages, int message_cnt);
              +rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
              +typedef struct rd_kafka_metadata_broker {
              +  int32_t id;
              +  char *host;
              +  int port;
              +} rd_kafka_metadata_broker_t;
              +typedef struct rd_kafka_metadata_partition {
              +  int32_t id;
              +  rd_kafka_resp_err_t err;
              +  int32_t leader;
              +  int replica_cnt;
              +  int32_t *replicas;
              +  int isr_cnt;
              +  int32_t *isrs;
              +} rd_kafka_metadata_partition_t;
              +typedef struct rd_kafka_metadata_topic {
              +  char *topic;
              +  int partition_cnt;
              +  struct rd_kafka_metadata_partition *partitions;
              +  rd_kafka_resp_err_t err;
              +} rd_kafka_metadata_topic_t;
              +typedef struct rd_kafka_metadata {
              +  int broker_cnt;
              +  struct rd_kafka_metadata_broker *brokers;
              +  int topic_cnt;
              +  struct rd_kafka_metadata_topic *topics;
              +  int32_t orig_broker_id;
              +  char *orig_broker_name;
              +} rd_kafka_metadata_t;
              +rd_kafka_resp_err_t rd_kafka_metadata(rd_kafka_t *rk, int all_topics, rd_kafka_topic_t *only_rkt, const struct rd_kafka_metadata **metadatap, int timeout_ms);
              +void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
              +typedef struct rd_kafka_Node_s rd_kafka_Node_t;
              +int rd_kafka_Node_id(const rd_kafka_Node_t *node);
              +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
              +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
              +const char *rd_kafka_Node_rack(const rd_kafka_Node_t *node);
              +struct rd_kafka_group_member_info {
              +  char *member_id;
              +  char *client_id;
              +  char *client_host;
              +  void *member_metadata;
              +  int member_metadata_size;
              +  void *member_assignment;
              +  int member_assignment_size;
              +};
              +typedef enum {
              +  RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
              +  RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
              +  RD_KAFKA_CONSUMER_GROUP_STATE__CNT,
              +} rd_kafka_consumer_group_state_t;
              +struct rd_kafka_group_info {
              +  struct rd_kafka_metadata_broker broker;
              +  char *group;
              +  rd_kafka_resp_err_t err;
              +  char *state;
              +  char *protocol_type;
              +  char *protocol;
              +  struct rd_kafka_group_member_info *members;
              +  int member_cnt;
              +};
              +struct rd_kafka_group_list {
              +  struct rd_kafka_group_info *groups;
              +  int group_cnt;
              +};
              +rd_kafka_resp_err_t rd_kafka_list_groups(rd_kafka_t *rk, const char *group, const struct rd_kafka_group_list **grplistp, int timeout_ms);
              +const char *rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
              +rd_kafka_consumer_group_state_t rd_kafka_consumer_group_state_code(const char *name);
              +void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
              +int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
              +void rd_kafka_set_logger(rd_kafka_t *rk, void (*func)(const rd_kafka_t *rk, int level, const char *fac, const char *buf));
              +void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
              +void rd_kafka_log_print(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +void rd_kafka_log_syslog(const rd_kafka_t *rk, int level, const char *fac, const char *buf);
              +int rd_kafka_outq_len(rd_kafka_t *rk);
              +void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
              +int rd_kafka_thread_cnt(void);
              +typedef enum rd_kafka_thread_type_t {
              +  RD_KAFKA_THREAD_MAIN,
              +  RD_KAFKA_THREAD_BACKGROUND,
              +  RD_KAFKA_THREAD_BROKER,
              +} rd_kafka_thread_type_t;
              +int rd_kafka_wait_destroyed(int timeout_ms);
              +int rd_kafka_unittest(void);
              +rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
              +typedef int rd_kafka_event_type_t;
              +rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
              +void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
              +const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
              +size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev, const rd_kafka_message_t **rkmessages, size_t size);
              +size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
              +rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
              +const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
              +int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
              +void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
              +int rd_kafka_event_log(rd_kafka_event_t *rkev, const char **fac, const char **str, int *level);
              +int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev, char *dst, size_t dstsize);
              +const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_list_t *rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
              +rd_kafka_topic_partition_t *rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
              +typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
              +typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_IncrementalAlterConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
              +typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeTopics_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeCluster_result_t;
              +typedef rd_kafka_event_t rd_kafka_DescribeUserScramCredentials_result_t;
              +typedef rd_kafka_event_t rd_kafka_AlterUserScramCredentials_result_t;
              +typedef rd_kafka_event_t rd_kafka_ListOffsets_result_t;
              +const rd_kafka_CreateTopics_result_t *rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteTopics_result_t *rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreatePartitions_result_t *rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConfigs_result_t *rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_IncrementalAlterConfigs_result_t *rd_kafka_event_IncrementalAlterConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConfigs_result_t *rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteRecords_result_t *rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroups_result_t *rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeConsumerGroups_result_t *rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeTopics_result_t *rd_kafka_event_DescribeTopics_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeCluster_result_t *rd_kafka_event_DescribeCluster_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteGroups_result_t *rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteConsumerGroupOffsets_result_t *rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_CreateAcls_result_t *rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeAcls_result_t *rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DeleteAcls_result_t *rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListConsumerGroupOffsets_result_t *rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterConsumerGroupOffsets_result_t *rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_ListOffsets_result_t *rd_kafka_event_ListOffsets_result(rd_kafka_event_t *rkev);
              +const rd_kafka_DescribeUserScramCredentials_result_t *rd_kafka_event_DescribeUserScramCredentials_result(rd_kafka_event_t *rkev);
              +const rd_kafka_AlterUserScramCredentials_result_t *rd_kafka_event_AlterUserScramCredentials_result(rd_kafka_event_t *rkev);
              +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
              +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
              +typedef rd_kafka_resp_err_t (rd_kafka_plugin_f_conf_init_t)(rd_kafka_conf_t *conf, void **plug_opaquep, char *errstr, size_t errstr_size);
              +typedef rd_kafka_conf_res_t (rd_kafka_interceptor_f_on_conf_set_t)(rd_kafka_conf_t *conf, const char *name, const char *val, char *errstr, size_t errstr_size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_dup_t)(rd_kafka_conf_t *new_conf, const rd_kafka_conf_t *old_conf, size_t filter_cnt, const char **filter, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_conf_destroy_t)(void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_new_t)(rd_kafka_t *rk, const rd_kafka_conf_t *conf, void *ic_opaque, char *errstr, size_t errstr_size);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_send_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_acknowledgement_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_consume_t)(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_commit_t)(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_request_sent_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_response_received_t)(rd_kafka_t *rk, int sockfd, const char *brokername, int32_t brokerid, int16_t ApiKey, int16_t ApiVersion, int32_t CorrId, size_t size, int64_t rtt, rd_kafka_resp_err_t err, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_start_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_thread_exit_t)(rd_kafka_t *rk, rd_kafka_thread_type_t thread_type, const char *thread_name, void *ic_opaque);
              +typedef rd_kafka_resp_err_t (rd_kafka_interceptor_f_on_broker_state_change_t)(rd_kafka_t *rk, int32_t broker_id, const char *secproto, const char *name, int port, const char *state, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_set_t *on_conf_set, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, const char *ic_name, rd_kafka_interceptor_f_on_new_t *on_new, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_destroy_t *on_destroy, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_send_t *on_send, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_consume_t *on_consume, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_commit_t *on_commit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_request_sent_t *on_request_sent, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_response_received_t *on_response_received, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_start_t *on_thread_start, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(rd_kafka_t *rk, const char *ic_name, rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, void *ic_opaque);
              +rd_kafka_resp_err_t rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
              +const char *rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
              +const rd_kafka_error_t *rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
              +const char *rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
              +const rd_kafka_topic_partition_list_t *rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
              +typedef enum rd_kafka_admin_op_t {
              +  RD_KAFKA_ADMIN_OP_ANY = 0,
              +  RD_KAFKA_ADMIN_OP_CREATETOPICS,
              +  RD_KAFKA_ADMIN_OP_DELETETOPICS,
              +  RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS,
              +  RD_KAFKA_ADMIN_OP_DELETERECORDS,
              +  RD_KAFKA_ADMIN_OP_DELETEGROUPS,
              +  RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_CREATEACLS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEACLS,
              +  RD_KAFKA_ADMIN_OP_DELETEACLS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS,
              +  RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
              +  RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS,
              +  RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBETOPICS,
              +  RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER,
              +  RD_KAFKA_ADMIN_OP_LISTOFFSETS,
              +  RD_KAFKA_ADMIN_OP__CNT,
              +} rd_kafka_admin_op_t;
              +typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
              +typedef enum rd_kafka_IsolationLevel_t {
              +  RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED = 0,
              +  RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED = 1,
              +} rd_kafka_IsolationLevel_t;
              +rd_kafka_AdminOptions_t *rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
              +void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options, int timeout_ms, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, int true_or_false, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, int32_t broker_id, char *errstr, size_t errstr_size);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(rd_kafka_AdminOptions_t *options, int true_or_false);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_include_authorized_operations(rd_kafka_AdminOptions_t *options, int true_or_false);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(rd_kafka_AdminOptions_t *options, const rd_kafka_consumer_group_state_t *consumer_group_states, size_t consumer_group_states_cnt);
              +rd_kafka_error_t *rd_kafka_AdminOptions_set_isolation_level(rd_kafka_AdminOptions_t *options, rd_kafka_IsolationLevel_t value);
              +void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, void *ev_opaque);
              +typedef enum rd_kafka_AclOperation_t {
              +  RD_KAFKA_ACL_OPERATION_UNKNOWN = 0,
              +  RD_KAFKA_ACL_OPERATION_ANY = 1,
              +  RD_KAFKA_ACL_OPERATION_ALL = 2,
              +  RD_KAFKA_ACL_OPERATION_READ = 3,
              +  RD_KAFKA_ACL_OPERATION_WRITE = 4,
              +  RD_KAFKA_ACL_OPERATION_CREATE = 5,
              +  RD_KAFKA_ACL_OPERATION_DELETE = 6,
              +  RD_KAFKA_ACL_OPERATION_ALTER = 7,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE = 8,
              +  RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = 9,
              +  RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = 10,
              +  RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = 11,
              +  RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = 12,
              +  RD_KAFKA_ACL_OPERATION__CNT,
              +} rd_kafka_AclOperation_t;
              +typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
              +rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic, int num_partitions, int replication_factor, char *errstr, size_t errstr_size);
              +void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
              +void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic, int32_t partition, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic, const char *name, const char *value);
              +void rd_kafka_CreateTopics(rd_kafka_t *rk, rd_kafka_NewTopic_t **new_topics, size_t new_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(const rd_kafka_CreateTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
              +rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
              +void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
              +void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt);
              +void rd_kafka_DeleteTopics(rd_kafka_t *rk, rd_kafka_DeleteTopic_t **del_topics, size_t del_topic_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(const rd_kafka_DeleteTopics_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
              +rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic, size_t new_total_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
              +void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt);
              +rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *new_parts, int32_t new_partition_idx, int32_t *broker_ids, size_t broker_id_cnt, char *errstr, size_t errstr_size);
              +void rd_kafka_CreatePartitions(rd_kafka_t *rk, rd_kafka_NewPartitions_t **new_parts, size_t new_parts_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(const rd_kafka_CreatePartitions_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_ConfigSource_t {
              +  RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
              +  RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
              +  RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
              +  RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
              +  RD_KAFKA_CONFIG_SOURCE__CNT,
              +} rd_kafka_ConfigSource_t;
              +const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
              +typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
              +const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
              +const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
              +rd_kafka_ConfigSource_t rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
              +int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, size_t *cntp);
              +typedef enum rd_kafka_ResourceType_t {
              +  RD_KAFKA_RESOURCE_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_ANY = 1,
              +  RD_KAFKA_RESOURCE_TOPIC = 2,
              +  RD_KAFKA_RESOURCE_GROUP = 3,
              +  RD_KAFKA_RESOURCE_BROKER = 4,
              +  RD_KAFKA_RESOURCE__CNT,
              +} rd_kafka_ResourceType_t;
              +typedef enum rd_kafka_ResourcePatternType_t {
              +  RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
              +  RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
              +  RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
              +  RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
              +  RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
              +  RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
              +} rd_kafka_ResourcePatternType_t;
              +typedef enum rd_kafka_AlterConfigOpType_t {
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0,
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1,
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2,
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3,
              +  RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT,
              +} rd_kafka_AlterConfigOpType_t;
              +const char *rd_kafka_ResourcePatternType_name(rd_kafka_ResourcePatternType_t resource_pattern_type);
              +const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
              +typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
              +rd_kafka_ConfigResource_t *rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype, const char *resname);
              +void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
              +void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config, size_t config_cnt);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *name, const char *value);
              +rd_kafka_error_t *rd_kafka_ConfigResource_add_incremental_config(rd_kafka_ConfigResource_t *config, const char *name, rd_kafka_AlterConfigOpType_t op_type, const char *value);
              +const rd_kafka_ConfigEntry_t **rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config, size_t *cntp);
              +rd_kafka_ResourceType_t rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
              +rd_kafka_resp_err_t rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
              +const char *rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
              +void rd_kafka_AlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(const rd_kafka_AlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_IncrementalAlterConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_IncrementalAlterConfigs_result_resources(const rd_kafka_IncrementalAlterConfigs_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeConfigs(rd_kafka_t *rk, rd_kafka_ConfigResource_t **configs, size_t config_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(const rd_kafka_DescribeConfigs_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
              +rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(const rd_kafka_topic_partition_list_t *before_offsets);
              +void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
              +void rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt);
              +void rd_kafka_DeleteRecords(rd_kafka_t *rk, rd_kafka_DeleteRecords_t **del_records, size_t del_record_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(const rd_kafka_DeleteRecords_result_t *result);
              +typedef struct rd_kafka_TopicCollection_s rd_kafka_TopicCollection_t;
              +typedef struct rd_kafka_TopicPartitionInfo_s rd_kafka_TopicPartitionInfo_t;
              +typedef struct rd_kafka_TopicDescription_s rd_kafka_TopicDescription_t;
              +rd_kafka_TopicCollection_t *rd_kafka_TopicCollection_of_topic_names(const char **topics, size_t topics_cnt);
              +void rd_kafka_TopicCollection_destroy(rd_kafka_TopicCollection_t *topics);
              +void rd_kafka_DescribeTopics(rd_kafka_t *rk, const rd_kafka_TopicCollection_t *topics, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_TopicDescription_t **rd_kafka_DescribeTopics_result_topics(const rd_kafka_DescribeTopics_result_t *result, size_t *cntp);
              +const rd_kafka_TopicPartitionInfo_t **rd_kafka_TopicDescription_partitions(const rd_kafka_TopicDescription_t *topicdesc, size_t *cntp);
              +const int rd_kafka_TopicPartitionInfo_partition(const rd_kafka_TopicPartitionInfo_t *partition);
              +const rd_kafka_Node_t *rd_kafka_TopicPartitionInfo_leader(const rd_kafka_TopicPartitionInfo_t *partition);
              +const rd_kafka_Node_t **rd_kafka_TopicPartitionInfo_isr(const rd_kafka_TopicPartitionInfo_t *partition, size_t *cntp);
              +const rd_kafka_Node_t **rd_kafka_TopicPartitionInfo_replicas(const rd_kafka_TopicPartitionInfo_t *partition, size_t *cntp);
              +const rd_kafka_AclOperation_t *rd_kafka_TopicDescription_authorized_operations(const rd_kafka_TopicDescription_t *topicdesc, size_t *cntp);
              +const char *rd_kafka_TopicDescription_name(const rd_kafka_TopicDescription_t *topicdesc);
              +const rd_kafka_Uuid_t *rd_kafka_TopicDescription_topic_id(const rd_kafka_TopicDescription_t *topicdesc);
              +int rd_kafka_TopicDescription_is_internal(const rd_kafka_TopicDescription_t *topicdesc);
              +const rd_kafka_error_t *rd_kafka_TopicDescription_error(const rd_kafka_TopicDescription_t *topicdesc);
              +void rd_kafka_DescribeCluster(rd_kafka_t *rk, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_Node_t **rd_kafka_DescribeCluster_result_nodes(const rd_kafka_DescribeCluster_result_t *result, size_t *cntp);
              +const rd_kafka_AclOperation_t *rd_kafka_DescribeCluster_result_authorized_operations(const rd_kafka_DescribeCluster_result_t *result, size_t *cntp);
              +const rd_kafka_Node_t *rd_kafka_DescribeCluster_result_controller(const rd_kafka_DescribeCluster_result_t *result);
              +const char *rd_kafka_DescribeCluster_result_cluster_id(const rd_kafka_DescribeCluster_result_t *result);
              +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
              +typedef struct rd_kafka_ListConsumerGroupsResult_s rd_kafka_ListConsumerGroupsResult_t;
              +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const char *rd_kafka_ConsumerGroupListing_group_id(const rd_kafka_ConsumerGroupListing_t *grplist);
              +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(const rd_kafka_ConsumerGroupListing_t *grplist);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(const rd_kafka_ConsumerGroupListing_t *grplist);
              +const rd_kafka_ConsumerGroupListing_t **rd_kafka_ListConsumerGroups_result_valid(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(const rd_kafka_ListConsumerGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ConsumerGroupDescription_s rd_kafka_ConsumerGroupDescription_t;
              +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
              +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
              +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, const char **groups, size_t groups_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_ConsumerGroupDescription_t **rd_kafka_DescribeConsumerGroups_result_groups(const rd_kafka_DescribeConsumerGroups_result_t *result, size_t *cntp);
              +const char *rd_kafka_ConsumerGroupDescription_group_id(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const char *rd_kafka_ConsumerGroupDescription_partition_assignor(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_AclOperation_t *rd_kafka_ConsumerGroupDescription_authorized_operations(const rd_kafka_ConsumerGroupDescription_t *grpdesc, size_t *cntp);
              +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +size_t rd_kafka_ConsumerGroupDescription_member_count(const rd_kafka_ConsumerGroupDescription_t *grpdesc);
              +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(const rd_kafka_ConsumerGroupDescription_t *grpdesc, size_t idx);
              +const char *rd_kafka_MemberDescription_client_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_group_instance_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_consumer_id(const rd_kafka_MemberDescription_t *member);
              +const char *rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(const rd_kafka_MemberDescription_t *member);
              +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(const rd_kafka_MemberAssignment_t *assignment);
              +typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
              +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
              +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
              +void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt);
              +void rd_kafka_DeleteGroups(rd_kafka_t *rk, rd_kafka_DeleteGroup_t **del_groups, size_t del_group_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(const rd_kafka_DeleteGroups_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_ListConsumerGroupOffsets_s rd_kafka_ListConsumerGroupOffsets_t;
              +rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_ListConsumerGroupOffsets_destroy(rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
              +void rd_kafka_ListConsumerGroupOffsets_destroy_array(rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffset_cnt);
              +void rd_kafka_ListConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, size_t list_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(const rd_kafka_ListConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_AlterConsumerGroupOffsets_s rd_kafka_AlterConsumerGroupOffsets_t;
              +rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(const char *group_id, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy(rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
              +void rd_kafka_AlterConsumerGroupOffsets_destroy_array(rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffset_cnt);
              +void rd_kafka_AlterConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, size_t alter_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_AlterConsumerGroupOffsets_result_groups(const rd_kafka_AlterConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef struct rd_kafka_DeleteConsumerGroupOffsets_s rd_kafka_DeleteConsumerGroupOffsets_t;
              +rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(const char *group, const rd_kafka_topic_partition_list_t *partitions);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy(rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
              +void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffset_cnt);
              +void rd_kafka_DeleteConsumerGroupOffsets(rd_kafka_t *rk, rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets, size_t del_grpoffsets_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_group_result_t **rd_kafka_DeleteConsumerGroupOffsets_result_groups(const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp);
              +typedef enum rd_kafka_OffsetSpec_t {
              +  RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP = (- 3),
              +  RD_KAFKA_OFFSET_SPEC_EARLIEST = (- 2),
              +  RD_KAFKA_OFFSET_SPEC_LATEST = (- 1),
              +} rd_kafka_OffsetSpec_t;
              +typedef struct rd_kafka_ListOffsetsResultInfo_s rd_kafka_ListOffsetsResultInfo_t;
              +const rd_kafka_topic_partition_t *rd_kafka_ListOffsetsResultInfo_topic_partition(const rd_kafka_ListOffsetsResultInfo_t *result_info);
              +int64_t rd_kafka_ListOffsetsResultInfo_timestamp(const rd_kafka_ListOffsetsResultInfo_t *result_info);
              +const rd_kafka_ListOffsetsResultInfo_t **rd_kafka_ListOffsets_result_infos(const rd_kafka_ListOffsets_result_t *result, size_t *cntp);
              +void rd_kafka_ListOffsets(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *topic_partitions, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef enum rd_kafka_ScramMechanism_t {
              +  RD_KAFKA_SCRAM_MECHANISM_UNKNOWN = 0,
              +  RD_KAFKA_SCRAM_MECHANISM_SHA_256 = 1,
              +  RD_KAFKA_SCRAM_MECHANISM_SHA_512 = 2,
              +  RD_KAFKA_SCRAM_MECHANISM__CNT,
              +} rd_kafka_ScramMechanism_t;
              +typedef struct rd_kafka_ScramCredentialInfo_s rd_kafka_ScramCredentialInfo_t;
              +rd_kafka_ScramMechanism_t rd_kafka_ScramCredentialInfo_mechanism(const rd_kafka_ScramCredentialInfo_t *scram_credential_info);
              +int32_t rd_kafka_ScramCredentialInfo_iterations(const rd_kafka_ScramCredentialInfo_t *scram_credential_info);
              +typedef struct rd_kafka_UserScramCredentialsDescription_s rd_kafka_UserScramCredentialsDescription_t;
              +const char *rd_kafka_UserScramCredentialsDescription_user(const rd_kafka_UserScramCredentialsDescription_t *description);
              +const rd_kafka_error_t *rd_kafka_UserScramCredentialsDescription_error(const rd_kafka_UserScramCredentialsDescription_t *description);
              +size_t rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count(const rd_kafka_UserScramCredentialsDescription_t *description);
              +const rd_kafka_ScramCredentialInfo_t *rd_kafka_UserScramCredentialsDescription_scramcredentialinfo(const rd_kafka_UserScramCredentialsDescription_t *description, size_t idx);
              +const rd_kafka_UserScramCredentialsDescription_t **rd_kafka_DescribeUserScramCredentials_result_descriptions(const rd_kafka_DescribeUserScramCredentials_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeUserScramCredentials(rd_kafka_t *rk, const char **users, size_t user_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_UserScramCredentialAlteration_s rd_kafka_UserScramCredentialAlteration_t;
              +rd_kafka_UserScramCredentialAlteration_t *rd_kafka_UserScramCredentialUpsertion_new(const char *username, rd_kafka_ScramMechanism_t mechanism, int32_t iterations, const unsigned char *password, size_t password_size, const unsigned char *salt, size_t salt_size);
              +rd_kafka_UserScramCredentialAlteration_t *rd_kafka_UserScramCredentialDeletion_new(const char *username, rd_kafka_ScramMechanism_t mechanism);
              +void rd_kafka_UserScramCredentialAlteration_destroy(rd_kafka_UserScramCredentialAlteration_t *alteration);
              +void rd_kafka_UserScramCredentialAlteration_destroy_array(rd_kafka_UserScramCredentialAlteration_t **alterations, size_t alteration_cnt);
              +typedef struct rd_kafka_AlterUserScramCredentials_result_response_s rd_kafka_AlterUserScramCredentials_result_response_t;
              +const char *rd_kafka_AlterUserScramCredentials_result_response_user(const rd_kafka_AlterUserScramCredentials_result_response_t *response);
              +const rd_kafka_error_t *rd_kafka_AlterUserScramCredentials_result_response_error(const rd_kafka_AlterUserScramCredentials_result_response_t *response);
              +const rd_kafka_AlterUserScramCredentials_result_response_t **rd_kafka_AlterUserScramCredentials_result_responses(const rd_kafka_AlterUserScramCredentials_result_t *result, size_t *cntp);
              +void rd_kafka_AlterUserScramCredentials(rd_kafka_t *rk, rd_kafka_UserScramCredentialAlteration_t **alterations, size_t alteration_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
              +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
              +const rd_kafka_error_t *rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
              +const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
              +typedef enum rd_kafka_AclPermissionType_t {
              +  RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ANY = 1,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2,
              +  RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3,
              +  RD_KAFKA_ACL_PERMISSION_TYPE__CNT,
              +} rd_kafka_AclPermissionType_t;
              +const char *rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t acl_permission_type);
              +rd_kafka_AclBinding_t *rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(rd_kafka_ResourceType_t restype, const char *name, rd_kafka_ResourcePatternType_t resource_pattern_type, const char *principal, const char *host, rd_kafka_AclOperation_t operation, rd_kafka_AclPermissionType_t permission_type, char *errstr, size_t errstr_size);
              +rd_kafka_ResourceType_t rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
              +const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclOperation_t rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_AclPermissionType_t rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
              +rd_kafka_ResourcePatternType_t rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
              +const rd_kafka_error_t *rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
              +void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
              +void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings, size_t acl_bindings_cnt);
              +const rd_kafka_acl_result_t **rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result, size_t *cntp);
              +void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_AclBinding_t **new_acls, size_t new_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +const rd_kafka_AclBinding_t **rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result, size_t *cntp);
              +void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t *acl_filter, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +typedef struct rd_kafka_DeleteAcls_result_response_s rd_kafka_DeleteAcls_result_response_t;
              +const rd_kafka_DeleteAcls_result_response_t **rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result, size_t *cntp);
              +const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(const rd_kafka_DeleteAcls_result_response_t *result_response);
              +const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(const rd_kafka_DeleteAcls_result_response_t *result_response, size_t *matching_acls_cntp);
              +void rd_kafka_DeleteAcls(rd_kafka_t *rk, rd_kafka_AclBindingFilter_t **del_acls, size_t del_acls_cnt, const rd_kafka_AdminOptions_t *options, rd_kafka_queue_t *rkqu);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token(rd_kafka_t *rk, const char *token_value, int64_t md_lifetime_ms, const char *md_principal_name, const char **extensions, size_t extension_size, char *errstr, size_t errstr_size);
              +rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, const char *errstr);
              +rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
              +rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(rd_kafka_t *rk, const rd_kafka_topic_partition_list_t *offsets, const rd_kafka_consumer_group_metadata_t *cgmetadata, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
              +rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
              +typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
              +rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, int broker_cnt);
              +void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_t *rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
              +rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
              +const char *rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey);
              +void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, ...);
              +void rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, size_t cnt, const rd_kafka_resp_err_t *errors);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t cnt, ...);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int16_t ApiKey, size_t *cntp);
              +void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster, const char *topic, rd_kafka_resp_err_t err);
              +rd_kafka_resp_err_t rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster, const char *topic, int partition_cnt, int replication_factor);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster, const char *topic, int32_t partition, int64_t lo, int64_t hi);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, int rtt_ms);
              +rd_kafka_resp_err_t rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id, const char *rack);
              +rd_kafka_resp_err_t rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster, const char *key_type, const char *key, int32_t broker_id);
              +rd_kafka_resp_err_t rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, int16_t ApiKey, int16_t MinVersion, int16_t MaxVersion);
              +void rd_kafka_mock_start_request_tracking(rd_kafka_mock_cluster_t *mcluster);
              +void rd_kafka_mock_stop_request_tracking(rd_kafka_mock_cluster_t *mcluster);
              +typedef struct rd_kafka_mock_request_s rd_kafka_mock_request_t;
              +void rd_kafka_mock_request_destroy(rd_kafka_mock_request_t *mreq);
              +int32_t rd_kafka_mock_request_id(rd_kafka_mock_request_t *mreq);
              +int16_t rd_kafka_mock_request_api_key(rd_kafka_mock_request_t *mreq);
              +int64_t rd_kafka_mock_request_timestamp(rd_kafka_mock_request_t *mreq);
              +rd_kafka_mock_request_t **rd_kafka_mock_get_requests(rd_kafka_mock_cluster_t *mcluster, size_t *cntp);
              +void rd_kafka_mock_clear_requests(rd_kafka_mock_cluster_t *mcluster);
              +'
              +
              +rdkafka.h, rdkafka_mock.h

              + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/functions/index.html b/api/functions/index.html new file mode 100644 index 00000000..e7862f61 --- /dev/null +++ b/api/functions/index.html @@ -0,0 +1,2993 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Functions - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              + +
              + + + +
              +
              + + + + + + + +

              Functions

              +

              rd_kafka_err2name

              +
              \rd_kafka_err2name() ( 
              +    int $err
              + ): string
              +
              +
              +
              Parameters
              +
              err int Error code
              +
              Returns
              +
              string The error name as a string.
              +
              +

              rd_kafka_err2str

              +
              \rd_kafka_err2str() ( 
              +    int $err
              + ): string
              +
              +
              +
              Parameters
              +
              err int Error code
              +
              Returns
              +
              string The error description a string.
              +
              +

              rd_kafka_errno2err

              +
              \rd_kafka_errno2err() ( 
              +    int $errnox
              + ): int
              +
              +
              +
              Parameters
              +
              errnox int A system errno
              +
              Returns
              +
              int A kafka error code as an integer.
              +
              +
              +

              Deprecated

              +
              +

              rd_kafka_errno

              +
              \rd_kafka_errno() (  ): int
              +
              +
              +
              Returns
              +
              int The system errno as an integer.
              +
              +
              +

              Deprecated

              +
              +

              rd_kafka_offset_tail

              +
              \rd_kafka_offset_tail() ( 
              +    int $cnt
              + ): int
              +
              +
              +
              Parameters
              +
              cnt int
              +
              Returns
              +
              int Returns the special offset as an integer.
              +
              +

              rd_kafka_thread_cnt

              +
              \rd_kafka_thread_cnt() (  ): int
              +
              +
              +
              Returns
              +
              int Retrieve the current number of threads in use by librdkafka.
              +
              +

              rd_kafka_version

              +
              \rd_kafka_version() (  ): string
              +
              +
              +
              Returns
              +
              string The librdkafka version.
              +
              + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/api/index.html b/api/index.html new file mode 100644 index 00000000..5e34022e --- /dev/null +++ b/api/index.html @@ -0,0 +1,3369 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Overview - PHP Kafka Client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              + + + + Skip to content + + +
              +
              + +
              + + + + +
              + + +
              + +
              + + + + + + + + + +
              +
              + + + +
              +
              +
              + + + + + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              +
              + + + +
              +
              + + + + + + + +

              Overview

              +

              Traits

              + +

              Classes

              + +

              Functions

              + +

              Constants

              + +

              Test Coverage 💛

              +
                +
              • 💛 + Lines: 84.37% (1263 / 1497)
              • +
              • ❤️ + Classes: 44.64% (25 / 56)
              • +
              • 🧡 + Methods: 65.66% (195 / 297)
              • +
              + + + + + + + + + + + + + + + + +
              +
              + + + +
              + +
              + + + +
              +
              +
              +
              + + + + + + + + + + \ No newline at end of file diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.e1c3ead8.min.js b/assets/javascripts/bundle.e1c3ead8.min.js new file mode 100644 index 00000000..bfc23392 --- /dev/null +++ b/assets/javascripts/bundle.e1c3ead8.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var _i=Object.create;var br=Object.defineProperty;var Ai=Object.getOwnPropertyDescriptor;var Ci=Object.getOwnPropertyNames,Ft=Object.getOwnPropertySymbols,ki=Object.getPrototypeOf,vr=Object.prototype.hasOwnProperty,eo=Object.prototype.propertyIsEnumerable;var Zr=(e,t,r)=>t in e?br(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,F=(e,t)=>{for(var r in t||(t={}))vr.call(t,r)&&Zr(e,r,t[r]);if(Ft)for(var r of Ft(t))eo.call(t,r)&&Zr(e,r,t[r]);return e};var to=(e,t)=>{var r={};for(var o in e)vr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Ft)for(var o of Ft(e))t.indexOf(o)<0&&eo.call(e,o)&&(r[o]=e[o]);return r};var gr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Hi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Ci(t))!vr.call(e,n)&&n!==r&&br(e,n,{get:()=>t[n],enumerable:!(o=Ai(t,n))||o.enumerable});return e};var jt=(e,t,r)=>(r=e!=null?_i(ki(e)):{},Hi(t||!e||!e.__esModule?br(r,"default",{value:e,enumerable:!0}):r,e));var ro=(e,t,r)=>new Promise((o,n)=>{var i=c=>{try{a(r.next(c))}catch(p){n(p)}},s=c=>{try{a(r.throw(c))}catch(p){n(p)}},a=c=>c.done?o(c.value):Promise.resolve(c.value).then(i,s);a((r=r.apply(e,t)).next())});var no=gr((xr,oo)=>{(function(e,t){typeof xr=="object"&&typeof oo!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(xr,function(){"use strict";function e(r){var o=!0,n=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(C){return!!(C&&C!==document&&C.nodeName!=="HTML"&&C.nodeName!=="BODY"&&"classList"in C&&"contains"in C.classList)}function c(C){var ct=C.type,Ne=C.tagName;return!!(Ne==="INPUT"&&s[ct]&&!C.readOnly||Ne==="TEXTAREA"&&!C.readOnly||C.isContentEditable)}function p(C){C.classList.contains("focus-visible")||(C.classList.add("focus-visible"),C.setAttribute("data-focus-visible-added",""))}function l(C){C.hasAttribute("data-focus-visible-added")&&(C.classList.remove("focus-visible"),C.removeAttribute("data-focus-visible-added"))}function f(C){C.metaKey||C.altKey||C.ctrlKey||(a(r.activeElement)&&p(r.activeElement),o=!0)}function u(C){o=!1}function h(C){a(C.target)&&(o||c(C.target))&&p(C.target)}function w(C){a(C.target)&&(C.target.classList.contains("focus-visible")||C.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(C.target))}function A(C){document.visibilityState==="hidden"&&(n&&(o=!0),Z())}function Z(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(C){C.target.nodeName&&C.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),Z(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var zr=gr((kt,Vr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof kt=="object"&&typeof Vr=="object"?Vr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof kt=="object"?kt.ClipboardJS=r():t.ClipboardJS=r()})(kt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Li}});var s=i(279),a=i.n(s),c=i(370),p=i.n(c),l=i(817),f=i.n(l);function u(D){try{return document.execCommand(D)}catch(M){return!1}}var h=function(M){var O=f()(M);return u("cut"),O},w=h;function A(D){var M=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[M?"right":"left"]="-9999px";var I=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat(I,"px"),O.setAttribute("readonly",""),O.value=D,O}var Z=function(M,O){var I=A(M);O.container.appendChild(I);var W=f()(I);return u("copy"),I.remove(),W},te=function(M){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},I="";return typeof M=="string"?I=Z(M,O):M instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(M==null?void 0:M.type)?I=Z(M.value,O):(I=f()(M),u("copy")),I},J=te;function C(D){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?C=function(O){return typeof O}:C=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},C(D)}var ct=function(){var M=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=M.action,I=O===void 0?"copy":O,W=M.container,K=M.target,Ce=M.text;if(I!=="copy"&&I!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(K!==void 0)if(K&&C(K)==="object"&&K.nodeType===1){if(I==="copy"&&K.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(I==="cut"&&(K.hasAttribute("readonly")||K.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Ce)return J(Ce,{container:W});if(K)return I==="cut"?w(K):J(K,{container:W})},Ne=ct;function Pe(D){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Pe=function(O){return typeof O}:Pe=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Pe(D)}function xi(D,M){if(!(D instanceof M))throw new TypeError("Cannot call a class as a function")}function Xr(D,M){for(var O=0;O0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof W.action=="function"?W.action:this.defaultAction,this.target=typeof W.target=="function"?W.target:this.defaultTarget,this.text=typeof W.text=="function"?W.text:this.defaultText,this.container=Pe(W.container)==="object"?W.container:document.body}},{key:"listenClick",value:function(W){var K=this;this.listener=p()(W,"click",function(Ce){return K.onClick(Ce)})}},{key:"onClick",value:function(W){var K=W.delegateTarget||W.currentTarget,Ce=this.action(K)||"copy",It=Ne({action:Ce,container:this.container,target:this.target(K),text:this.text(K)});this.emit(It?"success":"error",{action:Ce,text:It,trigger:K,clearSelection:function(){K&&K.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(W){return hr("action",W)}},{key:"defaultTarget",value:function(W){var K=hr("target",W);if(K)return document.querySelector(K)}},{key:"defaultText",value:function(W){return hr("text",W)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(W){var K=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(W,K)}},{key:"cut",value:function(W){return w(W)}},{key:"isSupported",value:function(){var W=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],K=typeof W=="string"?[W]:W,Ce=!!document.queryCommandSupported;return K.forEach(function(It){Ce=Ce&&!!document.queryCommandSupported(It)}),Ce}}]),O}(a()),Li=Mi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==n;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}o.exports=s},438:function(o,n,i){var s=i(828);function a(l,f,u,h,w){var A=p.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function c(l,f,u,h,w){return typeof l.addEventListener=="function"?a.apply(null,arguments):typeof u=="function"?a.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return a(A,f,u,h,w)}))}function p(l,f,u,h){return function(w){w.delegateTarget=s(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=c},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(o,n,i){var s=i(879),a=i(438);function c(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!s.string(h))throw new TypeError("Second argument must be a String");if(!s.fn(w))throw new TypeError("Third argument must be a Function");if(s.node(u))return p(u,h,w);if(s.nodeList(u))return l(u,h,w);if(s.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function p(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return a(document.body,u,h,w)}o.exports=c},817:function(o){function n(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),p=document.createRange();p.selectNodeContents(i),c.removeAllRanges(),c.addRange(p),s=c.toString()}return s}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function p(){c.off(i,p),s.apply(a,arguments)}return p._=s,this.on(i,p,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,p=a.length;for(c;c{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var Va=/["'&<>]/;qn.exports=za;function za(e){var t=""+e,r=Va.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(p[0]===6||p[0]===2)){r=0;continue}if(p[0]===3&&(!i||p[1]>i[0]&&p[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function V(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],s;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(a){s={error:a}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(s)throw s.error}}return i}function z(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||a(u,h)})})}function a(u,h){try{c(o[u](h))}catch(w){f(i[0][3],w)}}function c(u){u.value instanceof ot?Promise.resolve(u.value.v).then(p,l):f(i[0][2],u)}function p(u){a("next",u)}function l(u){a("throw",u)}function f(u,h){u(h),i.shift(),i.length&&a(i[0][0],i[0][1])}}function so(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof ue=="function"?ue(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),n(a,c,s.done,s.value)})}}function n(i,s,a,c){Promise.resolve(c).then(function(p){i({value:p,done:a})},s)}}function k(e){return typeof e=="function"}function pt(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var Wt=pt(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Ve(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=ue(s),c=a.next();!c.done;c=a.next()){var p=c.value;p.remove(this)}}catch(A){t={error:A}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof Wt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=ue(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{co(w)}catch(A){i=i!=null?i:[],A instanceof Wt?i=z(z([],V(i)),V(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new Wt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)co(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ve(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ve(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Er=Ie.EMPTY;function Dt(e){return e instanceof Ie||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function co(e){k(e)?e():e.unsubscribe()}var ke={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var lt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,s=n.isStopped,a=n.observers;return i||s?Er:(this.currentObservers=null,a.push(r),new Ie(function(){o.currentObservers=null,Ve(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,s=o.isStopped;n?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new vo(r,o)},t}(j);var vo=function(e){se(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Er},t}(v);var St={now:function(){return(St.delegate||Date).now()},delegate:void 0};var Ot=function(e){se(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=St);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,s=o._infiniteTimeWindow,a=o._timestampProvider,c=o._windowTime;n||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,s=n._buffer,a=s.slice(),c=0;c0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var s=r.actions;o!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==o&&(ut.cancelAnimationFrame(o),r._scheduled=void 0)},t}(zt);var yo=function(e){se(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(qt);var de=new yo(xo);var L=new j(function(e){return e.complete()});function Kt(e){return e&&k(e.schedule)}function _r(e){return e[e.length-1]}function Je(e){return k(_r(e))?e.pop():void 0}function Ae(e){return Kt(_r(e))?e.pop():void 0}function Qt(e,t){return typeof _r(e)=="number"?e.pop():t}var dt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Yt(e){return k(e==null?void 0:e.then)}function Bt(e){return k(e[ft])}function Gt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Jt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Di(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Xt=Di();function Zt(e){return k(e==null?void 0:e[Xt])}function er(e){return ao(this,arguments,function(){var r,o,n,i;return Ut(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,ot(r.read())];case 3:return o=s.sent(),n=o.value,i=o.done,i?[4,ot(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,ot(n)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function tr(e){return k(e==null?void 0:e.getReader)}function N(e){if(e instanceof j)return e;if(e!=null){if(Bt(e))return Ni(e);if(dt(e))return Vi(e);if(Yt(e))return zi(e);if(Gt(e))return Eo(e);if(Zt(e))return qi(e);if(tr(e))return Ki(e)}throw Jt(e)}function Ni(e){return new j(function(t){var r=e[ft]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Vi(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?g(function(n,i){return e(n,i,o)}):ce,ye(1),r?Qe(t):jo(function(){return new or}))}}function $r(e){return e<=0?function(){return L}:x(function(t,r){var o=[];t.subscribe(S(r,function(n){o.push(n),e=2,!0))}function le(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new v}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(p){var l,f,u,h=0,w=!1,A=!1,Z=function(){f==null||f.unsubscribe(),f=void 0},te=function(){Z(),l=u=void 0,w=A=!1},J=function(){var C=l;te(),C==null||C.unsubscribe()};return x(function(C,ct){h++,!A&&!w&&Z();var Ne=u=u!=null?u:r();ct.add(function(){h--,h===0&&!A&&!w&&(f=Pr(J,c))}),Ne.subscribe(ct),!l&&h>0&&(l=new it({next:function(Pe){return Ne.next(Pe)},error:function(Pe){A=!0,Z(),f=Pr(te,n,Pe),Ne.error(Pe)},complete:function(){w=!0,Z(),f=Pr(te,s),Ne.complete()}}),N(C).subscribe(l))})(p)}}function Pr(e,t){for(var r=[],o=2;oe.next(document)),e}function R(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=me(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function me(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var la=T(d(document.body,"focusin"),d(document.body,"focusout")).pipe(be(1),q(void 0),m(()=>Re()||document.body),B(1));function vt(e){return la.pipe(m(t=>e.contains(t)),Y())}function Vo(e,t){return T(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?be(t):ce,q(!1))}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function zo(e){return T(d(window,"load"),d(window,"resize")).pipe(Me(0,de),m(()=>Ue(e)),q(Ue(e)))}function ir(e){return{x:e.scrollLeft,y:e.scrollTop}}function et(e){return T(d(e,"scroll"),d(window,"resize")).pipe(Me(0,de),m(()=>ir(e)),q(ir(e)))}function qo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)qo(e,r)}function E(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)qo(o,n);return o}function ar(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function gt(e){let t=E("script",{src:e});return H(()=>(document.head.appendChild(t),T(d(t,"load"),d(t,"error").pipe(b(()=>Ar(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),ye(1))))}var Ko=new v,ma=H(()=>typeof ResizeObserver=="undefined"?gt("https://unpkg.com/resize-observer-polyfill"):$(void 0)).pipe(m(()=>new ResizeObserver(e=>{for(let t of e)Ko.next(t)})),b(e=>T(qe,$(e)).pipe(_(()=>e.disconnect()))),B(1));function pe(e){return{width:e.offsetWidth,height:e.offsetHeight}}function Ee(e){return ma.pipe(y(t=>t.observe(e)),b(t=>Ko.pipe(g(({target:r})=>r===e),_(()=>t.unobserve(e)),m(()=>pe(e)))),q(pe(e)))}function xt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function sr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var Qo=new v,fa=H(()=>$(new IntersectionObserver(e=>{for(let t of e)Qo.next(t)},{threshold:0}))).pipe(b(e=>T(qe,$(e)).pipe(_(()=>e.disconnect()))),B(1));function yt(e){return fa.pipe(y(t=>t.observe(e)),b(t=>Qo.pipe(g(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function Yo(e,t=16){return et(e).pipe(m(({y:r})=>{let o=pe(e),n=xt(e);return r>=n.height-o.height-t}),Y())}var cr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function Bo(e){return cr[e].checked}function Be(e,t){cr[e].checked!==t&&cr[e].click()}function We(e){let t=cr[e];return d(t,"change").pipe(m(()=>t.checked),q(t.checked))}function ua(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function da(){return T(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(q(!1))}function Go(){let e=d(window,"keydown").pipe(g(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:Bo("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),g(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!ua(o,r)}return!0}),le());return da().pipe(b(t=>t?L:e))}function ve(){return new URL(location.href)}function st(e,t=!1){if(G("navigation.instant")&&!t){let r=E("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function Jo(){return new v}function Xo(){return location.hash.slice(1)}function Zo(e){let t=E("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function ha(e){return T(d(window,"hashchange"),e).pipe(m(Xo),q(Xo()),g(t=>t.length>0),B(1))}function en(e){return ha(e).pipe(m(t=>me(`[id="${t}"]`)),g(t=>typeof t!="undefined"))}function At(e){let t=matchMedia(e);return nr(r=>t.addListener(()=>r(t.matches))).pipe(q(t.matches))}function tn(){let e=matchMedia("print");return T(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(q(e.matches))}function Ur(e,t){return e.pipe(b(r=>r?t():L))}function Wr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let s=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+s*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function De(e,t){return Wr(e,t).pipe(b(r=>r.text()),m(r=>JSON.parse(r)),B(1))}function rn(e,t){let r=new DOMParser;return Wr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),B(1))}function on(e,t){let r=new DOMParser;return Wr(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),B(1))}function nn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function an(){return T(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(nn),q(nn()))}function sn(){return{width:innerWidth,height:innerHeight}}function cn(){return d(window,"resize",{passive:!0}).pipe(m(sn),q(sn()))}function pn(){return Q([an(),cn()]).pipe(m(([e,t])=>({offset:e,size:t})),B(1))}function pr(e,{viewport$:t,header$:r}){let o=t.pipe(X("size")),n=Q([o,r]).pipe(m(()=>Ue(e)));return Q([r,t,n]).pipe(m(([{height:i},{offset:s,size:a},{x:c,y:p}])=>({offset:{x:s.x-c,y:s.y-p+i},size:a})))}function ba(e){return d(e,"message",t=>t.data)}function va(e){let t=new v;return t.subscribe(r=>e.postMessage(r)),t}function ln(e,t=new Worker(e)){let r=ba(t),o=va(t),n=new v;n.subscribe(o);let i=o.pipe(ee(),oe(!0));return n.pipe(ee(),$e(r.pipe(U(i))),le())}var ga=P("#__config"),Et=JSON.parse(ga.textContent);Et.base=`${new URL(Et.base,ve())}`;function we(){return Et}function G(e){return Et.features.includes(e)}function ge(e,t){return typeof t!="undefined"?Et.translations[e].replace("#",t.toString()):Et.translations[e]}function Te(e,t=document){return P(`[data-md-component=${e}]`,t)}function ne(e,t=document){return R(`[data-md-component=${e}]`,t)}function xa(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function mn(e){if(!G("announce.dismiss")||!e.childElementCount)return L;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return H(()=>{let t=new v;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),xa(e).pipe(y(r=>t.next(r)),_(()=>t.complete()),m(r=>F({ref:e},r)))})}function ya(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function fn(e,t){let r=new v;return r.subscribe(({hidden:o})=>{e.hidden=o}),ya(e,t).pipe(y(o=>r.next(o)),_(()=>r.complete()),m(o=>F({ref:e},o)))}function Ct(e,t){return t==="inline"?E("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},E("div",{class:"md-tooltip__inner md-typeset"})):E("div",{class:"md-tooltip",id:e,role:"tooltip"},E("div",{class:"md-tooltip__inner md-typeset"}))}function un(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return E("aside",{class:"md-annotation",tabIndex:0},Ct(t),E("a",{href:r,class:"md-annotation__index",tabIndex:-1},E("span",{"data-md-annotation-id":e})))}else return E("aside",{class:"md-annotation",tabIndex:0},Ct(t),E("span",{class:"md-annotation__index",tabIndex:-1},E("span",{"data-md-annotation-id":e})))}function dn(e){return E("button",{class:"md-clipboard md-icon",title:ge("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function Dr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(c=>!e.terms[c]).reduce((c,p)=>[...c,E("del",null,p)," "],[]).slice(0,-1),i=we(),s=new URL(e.location,i.base);G("search.highlight")&&s.searchParams.set("h",Object.entries(e.terms).filter(([,c])=>c).reduce((c,[p])=>`${c} ${p}`.trim(),""));let{tags:a}=we();return E("a",{href:`${s}`,class:"md-search-result__link",tabIndex:-1},E("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&E("div",{class:"md-search-result__icon md-icon"}),r>0&&E("h1",null,e.title),r<=0&&E("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(c=>{let p=a?c in a?`md-tag-icon md-tag--${a[c]}`:"md-tag-icon":"";return E("span",{class:`md-tag ${p}`},c)}),o>0&&n.length>0&&E("p",{class:"md-search-result__terms"},ge("search.result.term.missing"),": ",...n)))}function hn(e){let t=e[0].score,r=[...e],o=we(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),s=r.findIndex(l=>l.scoreDr(l,1)),...c.length?[E("details",{class:"md-search-result__more"},E("summary",{tabIndex:-1},E("div",null,c.length>0&&c.length===1?ge("search.result.more.one"):ge("search.result.more.other",c.length))),...c.map(l=>Dr(l,1)))]:[]];return E("li",{class:"md-search-result__item"},p)}function bn(e){return E("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>E("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?ar(r):r)))}function Nr(e){let t=`tabbed-control tabbed-control--${e}`;return E("div",{class:t,hidden:!0},E("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function vn(e){return E("div",{class:"md-typeset__scrollwrap"},E("div",{class:"md-typeset__table"},e))}function Ea(e){let t=we(),r=new URL(`../${e.version}/`,t.base);return E("li",{class:"md-version__item"},E("a",{href:`${r}`,class:"md-version__link"},e.title))}function gn(e,t){return E("div",{class:"md-version"},E("button",{class:"md-version__current","aria-label":ge("select.version")},t.title),E("ul",{class:"md-version__list"},e.map(Ea)))}var wa=0;function Ta(e,t){document.body.append(e);let{width:r}=pe(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=sr(t),n=typeof o!="undefined"?et(o):$({x:0,y:0}),i=T(vt(t),Vo(t)).pipe(Y());return Q([i,n]).pipe(m(([s,a])=>{let{x:c,y:p}=Ue(t),l=pe(t),f=t.closest("table");return f&&t.parentElement&&(c+=f.offsetLeft+t.parentElement.offsetLeft,p+=f.offsetTop+t.parentElement.offsetTop),{active:s,offset:{x:c-a.x+l.width/2-r/2,y:p-a.y+l.height+8}}}))}function Ge(e){let t=e.title;if(!t.length)return L;let r=`__tooltip_${wa++}`,o=Ct(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,H(()=>{let i=new v;return i.subscribe({next({offset:s}){o.style.setProperty("--md-tooltip-x",`${s.x}px`),o.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),T(i.pipe(g(({active:s})=>s)),i.pipe(be(250),g(({active:s})=>!s))).subscribe({next({active:s}){s?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,de)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(_t(125,de),g(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?o.style.setProperty("--md-tooltip-0",`${-s}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ta(o,e).pipe(y(s=>i.next(s)),_(()=>i.complete()),m(s=>F({ref:e},s)))}).pipe(ze(ie))}function Sa(e,t){let r=H(()=>Q([zo(e),et(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:s,height:a}=pe(e);return{x:o-i.x+s/2,y:n-i.y+a/2}}));return vt(e).pipe(b(o=>r.pipe(m(n=>({active:o,offset:n})),ye(+!o||1/0))))}function xn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return H(()=>{let i=new v,s=i.pipe(ee(),oe(!0));return i.subscribe({next({offset:a}){e.style.setProperty("--md-tooltip-x",`${a.x}px`),e.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),yt(e).pipe(U(s)).subscribe(a=>{e.toggleAttribute("data-md-visible",a)}),T(i.pipe(g(({active:a})=>a)),i.pipe(be(250),g(({active:a})=>!a))).subscribe({next({active:a}){a?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,de)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(_t(125,de),g(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?e.style.setProperty("--md-tooltip-0",`${-a}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(s),g(a=>!(a.metaKey||a.ctrlKey))).subscribe(a=>{a.stopPropagation(),a.preventDefault()}),d(n,"mousedown").pipe(U(s),ae(i)).subscribe(([a,{active:c}])=>{var p;if(a.button!==0||a.metaKey||a.ctrlKey)a.preventDefault();else if(c){a.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(p=Re())==null||p.blur()}}),r.pipe(U(s),g(a=>a===o),Ye(125)).subscribe(()=>e.focus()),Sa(e,t).pipe(y(a=>i.next(a)),_(()=>i.complete()),m(a=>F({ref:e},a)))})}function Oa(e){return e.tagName==="CODE"?R(".c, .c1, .cm",e):[e]}function Ma(e){let t=[];for(let r of Oa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let s;for(;s=/(\(\d+\))(!)?/.exec(i.textContent);){let[,a,c]=s;if(typeof c=="undefined"){let p=i.splitText(s.index);i=p.splitText(a.length),t.push(p)}else{i.textContent=a,t.push(i);break}}}}return t}function yn(e,t){t.append(...Array.from(e.childNodes))}function lr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,s=new Map;for(let a of Ma(t)){let[,c]=a.textContent.match(/\((\d+)\)/);me(`:scope > li:nth-child(${c})`,e)&&(s.set(c,un(c,i)),a.replaceWith(s.get(c)))}return s.size===0?L:H(()=>{let a=new v,c=a.pipe(ee(),oe(!0)),p=[];for(let[l,f]of s)p.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(c)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of p)l?yn(f,u):yn(u,f)}),T(...[...s].map(([,l])=>xn(l,t,{target$:r}))).pipe(_(()=>a.complete()),le())})}function En(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return En(t)}}function wn(e,t){return H(()=>{let r=En(e);return typeof r!="undefined"?lr(r,e,t):L})}var Tn=jt(zr());var La=0;function Sn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Sn(t)}}function _a(e){return Ee(e).pipe(m(({width:t})=>({scrollable:xt(e).width>t})),X("scrollable"))}function On(e,t){let{matches:r}=matchMedia("(hover)"),o=H(()=>{let n=new v,i=n.pipe($r(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let s=[];if(Tn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${La++}`;let p=dn(c.id);c.insertBefore(p,e),G("content.tooltips")&&s.push(Ge(p))}let a=e.closest(".highlight");if(a instanceof HTMLElement){let c=Sn(a);if(typeof c!="undefined"&&(a.classList.contains("annotate")||G("content.code.annotate"))){let p=lr(c,e,t);s.push(Ee(a).pipe(U(i),m(({width:l,height:f})=>l&&f),Y(),b(l=>l?p:L)))}}return _a(e).pipe(y(c=>n.next(c)),_(()=>n.complete()),m(c=>F({ref:e},c)),$e(...s))});return G("content.lazy")?yt(e).pipe(g(n=>n),ye(1),b(()=>o)):o}function Aa(e,{target$:t,print$:r}){let o=!0;return T(t.pipe(m(n=>n.closest("details:not([open])")),g(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(g(n=>n||!o),y(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Mn(e,t){return H(()=>{let r=new v;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Aa(e,t).pipe(y(o=>r.next(o)),_(()=>r.complete()),m(o=>F({ref:e},o)))})}var Ln=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var qr,ka=0;function Ha(){return typeof mermaid=="undefined"||mermaid instanceof Element?gt("https://unpkg.com/mermaid@10.7.0/dist/mermaid.min.js"):$(void 0)}function _n(e){return e.classList.remove("mermaid"),qr||(qr=Ha().pipe(y(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Ln,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),B(1))),qr.subscribe(()=>ro(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${ka++}`,r=E("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),s=r.attachShadow({mode:"closed"});s.innerHTML=n,e.replaceWith(r),i==null||i(s)})),qr.pipe(m(()=>({ref:e})))}var An=E("table");function Cn(e){return e.replaceWith(An),An.replaceWith(vn(e)),$({ref:e})}function $a(e){let t=e.find(r=>r.checked)||e[0];return T(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function kn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=R(":scope > input",e),i=Nr("prev");e.append(i);let s=Nr("next");return e.append(s),H(()=>{let a=new v,c=a.pipe(ee(),oe(!0));Q([a,Ee(e)]).pipe(U(c),Me(1,de)).subscribe({next([{active:p},l]){let f=Ue(p),{width:u}=pe(p);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=ir(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),Q([et(o),Ee(o)]).pipe(U(c)).subscribe(([p,l])=>{let f=xt(o);i.hidden=p.x<16,s.hidden=p.x>f.width-l.width-16}),T(d(i,"click").pipe(m(()=>-1)),d(s,"click").pipe(m(()=>1))).pipe(U(c)).subscribe(p=>{let{width:l}=pe(o);o.scrollBy({left:l*p,behavior:"smooth"})}),r.pipe(U(c),g(p=>n.includes(p))).subscribe(p=>p.click()),o.classList.add("tabbed-labels--linked");for(let p of n){let l=P(`label[for="${p.id}"]`);l.replaceChildren(E("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(c),g(f=>!(f.metaKey||f.ctrlKey)),y(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&a.pipe(Le(1),ae(t)).subscribe(([{active:p},{offset:l}])=>{let f=p.innerText.trim();if(p.hasAttribute("data-md-switching"))p.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of R("[data-tabs]"))for(let A of R(":scope > input",w)){let Z=P(`label[for="${A.id}"]`);if(Z!==p&&Z.innerText.trim()===f){Z.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),a.pipe(U(c)).subscribe(()=>{for(let p of R("audio, video",e))p.pause()}),$a(n).pipe(y(p=>a.next(p)),_(()=>a.complete()),m(p=>F({ref:e},p)))}).pipe(ze(ie))}function Hn(e,{viewport$:t,target$:r,print$:o}){return T(...R(".annotate:not(.highlight)",e).map(n=>wn(n,{target$:r,print$:o})),...R("pre:not(.mermaid) > code",e).map(n=>On(n,{target$:r,print$:o})),...R("pre.mermaid",e).map(n=>_n(n)),...R("table:not([class])",e).map(n=>Cn(n)),...R("details",e).map(n=>Mn(n,{target$:r,print$:o})),...R("[data-tabs]",e).map(n=>kn(n,{viewport$:t,target$:r})),...R("[title]",e).filter(()=>G("content.tooltips")).map(n=>Ge(n)))}function Ra(e,{alert$:t}){return t.pipe(b(r=>T($(!0),$(!1).pipe(Ye(2e3))).pipe(m(o=>({message:r,active:o})))))}function $n(e,t){let r=P(".md-typeset",e);return H(()=>{let o=new v;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ra(e,t).pipe(y(n=>o.next(n)),_(()=>o.complete()),m(n=>F({ref:e},n)))})}function Pa({viewport$:e}){if(!G("header.autohide"))return $(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ke(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),Y()),o=We("search");return Q([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),Y(),b(n=>n?r:$(!1)),q(!1))}function Rn(e,t){return H(()=>Q([Ee(e),Pa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),Y((r,o)=>r.height===o.height&&r.hidden===o.hidden),B(1))}function Pn(e,{header$:t,main$:r}){return H(()=>{let o=new v,n=o.pipe(ee(),oe(!0));o.pipe(X("active"),je(t)).subscribe(([{active:s},{hidden:a}])=>{e.classList.toggle("md-header--shadow",s&&!a),e.hidden=a});let i=fe(R("[title]",e)).pipe(g(()=>G("content.tooltips")),re(s=>Ge(s)));return r.subscribe(o),t.pipe(U(n),m(s=>F({ref:e},s)),$e(i.pipe(U(n))))})}function Ia(e,{viewport$:t,header$:r}){return pr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=pe(e);return{active:o>=n}}),X("active"))}function In(e,t){return H(()=>{let r=new v;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=me(".md-content h1");return typeof o=="undefined"?L:Ia(o,t).pipe(y(n=>r.next(n)),_(()=>r.complete()),m(n=>F({ref:e},n)))})}function Fn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),Y()),n=o.pipe(b(()=>Ee(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),X("bottom"))));return Q([o,n,t]).pipe(m(([i,{top:s,bottom:a},{offset:{y:c},size:{height:p}}])=>(p=Math.max(0,p-Math.max(0,s-c,i)-Math.max(0,p+c-a)),{offset:s-i,height:p,active:s-i<=c})),Y((i,s)=>i.offset===s.offset&&i.height===s.height&&i.active===s.active))}function Fa(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return $(...e).pipe(re(o=>d(o,"change").pipe(m(()=>o))),q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),B(1))}function jn(e){let t=R("input",e),r=E("meta",{name:"theme-color"});document.head.appendChild(r);let o=E("meta",{name:"color-scheme"});document.head.appendChild(o);let n=At("(prefers-color-scheme: light)");return H(()=>{let i=new v;return i.subscribe(s=>{if(document.body.setAttribute("data-md-color-switching",""),s.color.media==="(prefers-color-scheme)"){let a=matchMedia("(prefers-color-scheme: light)"),c=document.querySelector(a.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");s.color.scheme=c.getAttribute("data-md-color-scheme"),s.color.primary=c.getAttribute("data-md-color-primary"),s.color.accent=c.getAttribute("data-md-color-accent")}for(let[a,c]of Object.entries(s.color))document.body.setAttribute(`data-md-color-${a}`,c);for(let a=0;a{let s=Te("header"),a=window.getComputedStyle(s);return o.content=a.colorScheme,a.backgroundColor.match(/\d+/g).map(c=>(+c).toString(16).padStart(2,"0")).join("")})).subscribe(s=>r.content=`#${s}`),i.pipe(Oe(ie)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Fa(t).pipe(U(n.pipe(Le(1))),at(),y(s=>i.next(s)),_(()=>i.complete()),m(s=>F({ref:e},s)))})}function Un(e,{progress$:t}){return H(()=>{let r=new v;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(y(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Kr=jt(zr());function ja(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Wn({alert$:e}){Kr.default.isSupported()&&new j(t=>{new Kr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ja(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(y(t=>{t.trigger.focus()}),m(()=>ge("clipboard.copied"))).subscribe(e)}function Dn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ua(e,t){let r=new Map;for(let o of R("url",e)){let n=P("loc",o),i=[Dn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let s of R("[rel=alternate]",o)){let a=s.getAttribute("href");a!=null&&i.push(Dn(new URL(a),t))}}return r}function mr(e){return on(new URL("sitemap.xml",e)).pipe(m(t=>Ua(t,new URL(e))),he(()=>$(new Map)))}function Wa(e,t){if(!(e.target instanceof Element))return L;let r=e.target.closest("a");if(r===null)return L;if(r.target||e.metaKey||e.ctrlKey)return L;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),$(new URL(r.href))):L}function Nn(e){let t=new Map;for(let r of R(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Vn(e){for(let t of R("[href], [src]",e))for(let r in["href","src"]){let o=t.getAttribute(r);/^(?:[a-z]+:)?\/\//i.test(o)||(t.href=t.href)}return $(e)}function Da(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=me(o),i=me(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Nn(document);for(let[o,n]of Nn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values())o.remove();let r=Te("container");return Fe(R("script",r)).pipe(b(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),L}),ee(),oe(e))}function zn({location$:e,viewport$:t,progress$:r}){let o=we();if(location.protocol==="file:")return L;let n=mr(o.base);$(document).subscribe(Vn);let i=d(document.body,"click").pipe(je(n),b(([c,p])=>Wa(c,p)),le()),s=d(window,"popstate").pipe(m(ve),le());i.pipe(ae(t)).subscribe(([c,{offset:p}])=>{history.replaceState(p,""),history.pushState(null,"",c)}),T(i,s).subscribe(e);let a=e.pipe(X("pathname"),b(c=>rn(c,{progress$:r}).pipe(he(()=>(st(c,!0),L)))),b(Vn),b(Da),le());return T(a.pipe(ae(e,(c,p)=>p)),e.pipe(X("pathname"),b(()=>e),X("hash")),e.pipe(Y((c,p)=>c.pathname===p.pathname&&c.hash===p.hash),b(()=>i),y(()=>history.back()))).subscribe(c=>{var p,l;history.state!==null||!c.hash?window.scrollTo(0,(l=(p=history.state)==null?void 0:p.y)!=null?l:0):(history.scrollRestoration="auto",Zo(c.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(X("offset"),be(100)).subscribe(({offset:c})=>{history.replaceState(c,"")}),a}var Qn=jt(Kn());function Yn(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,s)=>`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return s=>(0,Qn.default)(s).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function Ht(e){return e.type===1}function fr(e){return e.type===3}function Bn(e,t){let r=ln(e);return T($(location.protocol!=="file:"),We("search")).pipe(He(o=>o),b(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function Gn({document$:e}){let t=we(),r=De(new URL("../versions.json",t.base)).pipe(he(()=>L)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:s,aliases:a})=>s===i||a.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),b(n=>d(document.body,"click").pipe(g(i=>!i.metaKey&&!i.ctrlKey),ae(o),b(([i,s])=>{if(i.target instanceof Element){let a=i.target.closest("a");if(a&&!a.target&&n.has(a.href)){let c=a.href;return!i.target.closest(".md-version")&&n.get(c)===s?L:(i.preventDefault(),$(c))}}return L}),b(i=>{let{version:s}=n.get(i);return mr(new URL(i)).pipe(m(a=>{let p=ve().href.replace(t.base,"");return a.has(p.split("#")[0])?new URL(`../${s}/${p}`,t.base):new URL(i)}))})))).subscribe(n=>st(n,!0)),Q([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(gn(n,i))}),e.pipe(b(()=>o)).subscribe(n=>{var s;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let a=((s=t.version)==null?void 0:s.default)||"latest";Array.isArray(a)||(a=[a]);e:for(let c of a)for(let p of n.aliases.concat(n.version))if(new RegExp(c,"i").test(p)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let a of ne("outdated"))a.hidden=!1})}function Ka(e,{worker$:t}){let{searchParams:r}=ve();r.has("q")&&(Be("search",!0),e.value=r.get("q"),e.focus(),We("search").pipe(He(i=>!i)).subscribe(()=>{let i=ve();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=vt(e),n=T(t.pipe(He(Ht)),d(e,"keyup"),o).pipe(m(()=>e.value),Y());return Q([n,o]).pipe(m(([i,s])=>({value:i,focus:s})),B(1))}function Jn(e,{worker$:t}){let r=new v,o=r.pipe(ee(),oe(!0));Q([t.pipe(He(Ht)),r],(i,s)=>s).pipe(X("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(X("focus")).subscribe(({focus:i})=>{i&&Be("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),Ka(e,{worker$:t}).pipe(y(i=>r.next(i)),_(()=>r.complete()),m(i=>F({ref:e},i)),B(1))}function Xn(e,{worker$:t,query$:r}){let o=new v,n=Yo(e.parentElement).pipe(g(Boolean)),i=e.parentElement,s=P(":scope > :first-child",e),a=P(":scope > :last-child",e);We("search").subscribe(l=>a.setAttribute("role",l?"list":"presentation")),o.pipe(ae(r),Ir(t.pipe(He(Ht)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:s.textContent=f.length?ge("search.result.none"):ge("search.result.placeholder");break;case 1:s.textContent=ge("search.result.one");break;default:let u=ar(l.length);s.textContent=ge("search.result.other",u)}});let c=o.pipe(y(()=>a.innerHTML=""),b(({items:l})=>T($(...l.slice(0,10)),$(...l.slice(10)).pipe(Ke(4),jr(n),b(([f])=>f)))),m(hn),le());return c.subscribe(l=>a.appendChild(l)),c.pipe(re(l=>{let f=me("details",l);return typeof f=="undefined"?L:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(g(fr),m(({data:l})=>l)).pipe(y(l=>o.next(l)),_(()=>o.complete()),m(l=>F({ref:e},l)))}function Qa(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ve();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function Zn(e,t){let r=new v,o=r.pipe(ee(),oe(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),Qa(e,t).pipe(y(n=>r.next(n)),_(()=>r.complete()),m(n=>F({ref:e},n)))}function ei(e,{worker$:t,keyboard$:r}){let o=new v,n=Te("search-query"),i=T(d(n,"keydown"),d(n,"focus")).pipe(Oe(ie),m(()=>n.value),Y());return o.pipe(je(i),m(([{suggest:a},c])=>{let p=c.split(/([\s-]+)/);if(a!=null&&a.length&&p[p.length-1]){let l=a[a.length-1];l.startsWith(p[p.length-1])&&(p[p.length-1]=l)}else p.length=0;return p})).subscribe(a=>e.innerHTML=a.join("").replace(/\s/g," ")),r.pipe(g(({mode:a})=>a==="search")).subscribe(a=>{switch(a.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(g(fr),m(({data:a})=>a)).pipe(y(a=>o.next(a)),_(()=>o.complete()),m(()=>({ref:e})))}function ti(e,{index$:t,keyboard$:r}){let o=we();try{let n=Bn(o.search,t),i=Te("search-query",e),s=Te("search-result",e);d(e,"click").pipe(g(({target:c})=>c instanceof Element&&!!c.closest("a"))).subscribe(()=>Be("search",!1)),r.pipe(g(({mode:c})=>c==="search")).subscribe(c=>{let p=Re();switch(c.type){case"Enter":if(p===i){let l=new Map;for(let f of R(":first-child [href]",s)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}c.claim()}break;case"Escape":case"Tab":Be("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof p=="undefined")i.focus();else{let l=[i,...R(":not(details) > [href], summary, details[open] [href]",s)],f=Math.max(0,(Math.max(0,l.indexOf(p))+l.length+(c.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}c.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(g(({mode:c})=>c==="global")).subscribe(c=>{switch(c.type){case"f":case"s":case"/":i.focus(),i.select(),c.claim();break}});let a=Jn(i,{worker$:n});return T(a,Xn(s,{worker$:n,query$:a})).pipe($e(...ne("search-share",e).map(c=>Zn(c,{query$:a})),...ne("search-suggest",e).map(c=>ei(c,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,qe}}function ri(e,{index$:t,location$:r}){return Q([t,r.pipe(q(ve()),g(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>Yn(o.config)(n.searchParams.get("h"))),m(o=>{var s;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let a=i.nextNode();a;a=i.nextNode())if((s=a.parentElement)!=null&&s.offsetHeight){let c=a.textContent,p=o(c);p.length>c.length&&n.set(a,p)}for(let[a,c]of n){let{childNodes:p}=E("span",null,c);a.replaceWith(...Array.from(p))}return{ref:e,nodes:n}}))}function Ya(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return Q([r,t]).pipe(m(([{offset:i,height:s},{offset:{y:a}}])=>(s=s+Math.min(n,Math.max(0,a-i))-n,{height:s,locked:a>=i+n})),Y((i,s)=>i.height===s.height&&i.locked===s.locked))}function Qr(e,o){var n=o,{header$:t}=n,r=to(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:s}=Ue(i);return H(()=>{let a=new v,c=a.pipe(ee(),oe(!0)),p=a.pipe(Me(0,de));return p.pipe(ae(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*s}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),p.pipe(He()).subscribe(()=>{for(let l of R(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=pe(f);f.scrollTo({top:u-h/2})}}}),fe(R("label[tabindex]",e)).pipe(re(l=>d(l,"click").pipe(Oe(ie),m(()=>l),U(c)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),Ya(e,r).pipe(y(l=>a.next(l)),_(()=>a.complete()),m(l=>F({ref:e},l)))})}function oi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Lt(De(`${r}/releases/latest`).pipe(he(()=>L),m(o=>({version:o.tag_name})),Qe({})),De(r).pipe(he(()=>L),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Qe({}))).pipe(m(([o,n])=>F(F({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return De(r).pipe(m(o=>({repositories:o.public_repos})),Qe({}))}}function ni(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return De(r).pipe(he(()=>L),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Qe({}))}function ii(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return oi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ni(r,o)}return L}var Ba;function Ga(e){return Ba||(Ba=H(()=>{let t=__md_get("__source",sessionStorage);if(t)return $(t);if(ne("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return L}return ii(e.href).pipe(y(o=>__md_set("__source",o,sessionStorage)))}).pipe(he(()=>L),g(t=>Object.keys(t).length>0),m(t=>({facts:t})),B(1)))}function ai(e){let t=P(":scope > :last-child",e);return H(()=>{let r=new v;return r.subscribe(({facts:o})=>{t.appendChild(bn(o)),t.classList.add("md-source__repository--active")}),Ga(e).pipe(y(o=>r.next(o)),_(()=>r.complete()),m(o=>F({ref:e},o)))})}function Ja(e,{viewport$:t,header$:r}){return Ee(document.body).pipe(b(()=>pr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),X("hidden"))}function si(e,t){return H(()=>{let r=new v;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?$({hidden:!1}):Ja(e,t)).pipe(y(o=>r.next(o)),_(()=>r.complete()),m(o=>F({ref:e},o)))})}function Xa(e,{viewport$:t,header$:r}){let o=new Map,n=R("[href^=\\#]",e);for(let a of n){let c=decodeURIComponent(a.hash.substring(1)),p=me(`[id="${c}"]`);typeof p!="undefined"&&o.set(a,p)}let i=r.pipe(X("height"),m(({height:a})=>{let c=Te("main"),p=P(":scope > :first-child",c);return a+.8*(p.offsetTop-c.offsetTop)}),le());return Ee(document.body).pipe(X("height"),b(a=>H(()=>{let c=[];return $([...o].reduce((p,[l,f])=>{for(;c.length&&o.get(c[c.length-1]).tagName>=f.tagName;)c.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return p.set([...c=[...c,l]].reverse(),u)},new Map))}).pipe(m(c=>new Map([...c].sort(([,p],[,l])=>p-l))),je(i),b(([c,p])=>t.pipe(Rr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(a.height);for(;f.length;){let[,A]=f[0];if(A-p=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...c]]),Y((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([a,c])=>({prev:a.map(([p])=>p),next:c.map(([p])=>p)})),q({prev:[],next:[]}),Ke(2,1),m(([a,c])=>a.prev.length{let i=new v,s=i.pipe(ee(),oe(!0));if(i.subscribe(({prev:a,next:c})=>{for(let[p]of c)p.classList.remove("md-nav__link--passed"),p.classList.remove("md-nav__link--active");for(let[p,[l]]of a.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",p===a.length-1)}),G("toc.follow")){let a=T(t.pipe(be(1),m(()=>{})),t.pipe(be(250),m(()=>"smooth")));i.pipe(g(({prev:c})=>c.length>0),je(o.pipe(Oe(ie))),ae(a)).subscribe(([[{prev:c}],p])=>{let[l]=c[c.length-1];if(l.offsetHeight){let f=sr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=pe(f);f.scrollTo({top:u-h/2,behavior:p})}}})}return G("navigation.tracking")&&t.pipe(U(s),X("offset"),be(250),Le(1),U(n.pipe(Le(1))),at({delay:250}),ae(i)).subscribe(([,{prev:a}])=>{let c=ve(),p=a[a.length-1];if(p&&p.length){let[l]=p,{hash:f}=new URL(l.href);c.hash!==f&&(c.hash=f,history.replaceState({},"",`${c}`))}else c.hash="",history.replaceState({},"",`${c}`)}),Xa(e,{viewport$:t,header$:r}).pipe(y(a=>i.next(a)),_(()=>i.complete()),m(a=>F({ref:e},a)))})}function Za(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:s}})=>s),Ke(2,1),m(([s,a])=>s>a&&a>0),Y()),i=r.pipe(m(({active:s})=>s));return Q([i,n]).pipe(m(([s,a])=>!(s&&a)),Y(),U(o.pipe(Le(1))),oe(!0),at({delay:250}),m(s=>({hidden:s})))}function pi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new v,s=i.pipe(ee(),oe(!0));return i.subscribe({next({hidden:a}){e.hidden=a,a?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(s),X("height")).subscribe(({height:a})=>{e.style.top=`${a+16}px`}),d(e,"click").subscribe(a=>{a.preventDefault(),window.scrollTo({top:0})}),Za(e,{viewport$:t,main$:o,target$:n}).pipe(y(a=>i.next(a)),_(()=>i.complete()),m(a=>F({ref:e},a)))}function li({document$:e}){e.pipe(b(()=>R(".md-ellipsis")),re(t=>yt(t).pipe(U(e.pipe(Le(1))),g(r=>r),m(()=>t),ye(1))),g(t=>t.offsetWidth{let r=t.innerText,o=t.closest("a")||t;return o.title=r,Ge(o).pipe(U(e.pipe(Le(1))),_(()=>o.removeAttribute("title")))})).subscribe(),e.pipe(b(()=>R(".md-status")),re(t=>Ge(t))).subscribe()}function mi({document$:e,tablet$:t}){e.pipe(b(()=>R(".md-toggle--indeterminate")),y(r=>{r.indeterminate=!0,r.checked=!1}),re(r=>d(r,"change").pipe(Fr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ae(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function es(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function fi({document$:e}){e.pipe(b(()=>R("[data-md-scrollfix]")),y(t=>t.removeAttribute("data-md-scrollfix")),g(es),re(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function ui({viewport$:e,tablet$:t}){Q([We("search"),t]).pipe(m(([r,o])=>r&&!o),b(r=>$(r).pipe(Ye(r?400:100))),ae(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ts(){return location.protocol==="file:"?gt(`${new URL("search/search_index.js",Yr.base)}`).pipe(m(()=>__index),B(1)):De(new URL("search/search_index.json",Yr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var rt=No(),Rt=Jo(),wt=en(Rt),Br=Go(),_e=pn(),ur=At("(min-width: 960px)"),hi=At("(min-width: 1220px)"),bi=tn(),Yr=we(),vi=document.forms.namedItem("search")?ts():qe,Gr=new v;Wn({alert$:Gr});var Jr=new v;G("navigation.instant")&&zn({location$:Rt,viewport$:_e,progress$:Jr}).subscribe(rt);var di;((di=Yr.version)==null?void 0:di.provider)==="mike"&&Gn({document$:rt});T(Rt,wt).pipe(Ye(125)).subscribe(()=>{Be("drawer",!1),Be("search",!1)});Br.pipe(g(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=me("link[rel=prev]");typeof t!="undefined"&&st(t);break;case"n":case".":let r=me("link[rel=next]");typeof r!="undefined"&&st(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});li({document$:rt});mi({document$:rt,tablet$:ur});fi({document$:rt});ui({viewport$:_e,tablet$:ur});var tt=Rn(Te("header"),{viewport$:_e}),$t=rt.pipe(m(()=>Te("main")),b(e=>Fn(e,{viewport$:_e,header$:tt})),B(1)),rs=T(...ne("consent").map(e=>fn(e,{target$:wt})),...ne("dialog").map(e=>$n(e,{alert$:Gr})),...ne("header").map(e=>Pn(e,{viewport$:_e,header$:tt,main$:$t})),...ne("palette").map(e=>jn(e)),...ne("progress").map(e=>Un(e,{progress$:Jr})),...ne("search").map(e=>ti(e,{index$:vi,keyboard$:Br})),...ne("source").map(e=>ai(e))),os=H(()=>T(...ne("announce").map(e=>mn(e)),...ne("content").map(e=>Hn(e,{viewport$:_e,target$:wt,print$:bi})),...ne("content").map(e=>G("search.highlight")?ri(e,{index$:vi,location$:Rt}):L),...ne("header-title").map(e=>In(e,{viewport$:_e,header$:tt})),...ne("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Ur(hi,()=>Qr(e,{viewport$:_e,header$:tt,main$:$t})):Ur(ur,()=>Qr(e,{viewport$:_e,header$:tt,main$:$t}))),...ne("tabs").map(e=>si(e,{viewport$:_e,header$:tt})),...ne("toc").map(e=>ci(e,{viewport$:_e,header$:tt,main$:$t,target$:wt})),...ne("top").map(e=>pi(e,{viewport$:_e,header$:tt,main$:$t,target$:wt})))),gi=rt.pipe(b(()=>os),$e(rs),B(1));gi.subscribe();window.document$=rt;window.location$=Rt;window.target$=wt;window.keyboard$=Br;window.viewport$=_e;window.tablet$=ur;window.screen$=hi;window.print$=bi;window.alert$=Gr;window.progress$=Jr;window.component$=gi;})(); +//# sourceMappingURL=bundle.e1c3ead8.min.js.map + diff --git a/assets/javascripts/bundle.e1c3ead8.min.js.map b/assets/javascripts/bundle.e1c3ead8.min.js.map new file mode 100644 index 00000000..5449148c --- /dev/null +++ b/assets/javascripts/bundle.e1c3ead8.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
              \n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an