From 4b8cc8bf38ff895b442d61693fa084097ef4bc7a Mon Sep 17 00:00:00 2001 From: crccheck Date: Fri, 5 Aug 2016 14:44:59 -0500 Subject: [PATCH] Documentation edits --- README.rst | 5 ++--- aiokafka/fetcher.py | 2 +- aiokafka/group_coordinator.py | 2 +- aiokafka/producer.py | 4 ++-- docs/api.rst | 14 +++++++------- docs/index.rst | 18 +++++++++--------- setup.py | 4 ++-- 7 files changed, 24 insertions(+), 25 deletions(-) diff --git a/README.rst b/README.rst index 37490514..a37cac91 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ aiokafka :target: https://coveralls.io/r/aio-libs/aiokafka?branch=master :alt: |Coverage| -asyncio client for kafka +asyncio client for Kafka AIOKafkaProducer @@ -48,7 +48,7 @@ AIOKafkaConsumer **************** AIOKafkaConsumer is a high-level, asynchronous message consumer. -It interacts with the assigned kafka Group Coordinator node to allow multiple consumers to load balance consumption of topics (requires kafka >= 0.9.0.0). +It interacts with the assigned Kafka Group Coordinator node to allow multiple consumers to load balance consumption of topics (requires kafka >= 0.9.0.0). Example of AIOKafkaConsumer usage: @@ -97,4 +97,3 @@ Running tests:: To run tests with a specific version of Kafka (default one is 0.9.0.1) use KAFKA_VERSION variable:: make cov KAFKA_VERSION=0.8.2.1 - diff --git a/aiokafka/fetcher.py b/aiokafka/fetcher.py index c0cc0344..59dfac6c 100644 --- a/aiokafka/fetcher.py +++ b/aiokafka/fetcher.py @@ -206,7 +206,7 @@ def _fetch_requests_routine(self): we must perform a FetchRequest to as many partitions as we can in a node. - Original java Kafka client processes data differently, as it only + Original Java Kafka client processes data differently, as it only prefetches data if all messages were given to application (i.e. if `self._records` are empty). We don't use this method, cause we allow to process partitions separately (by passing `partitions` list to diff --git a/aiokafka/group_coordinator.py b/aiokafka/group_coordinator.py index 6ad4d494..e7dc9af6 100644 --- a/aiokafka/group_coordinator.py +++ b/aiokafka/group_coordinator.py @@ -542,7 +542,7 @@ def coordinator_unknown(self): @asyncio.coroutine def ensure_coordinator_known(self): """Block until the coordinator for this group is known - (and we have an active connection -- java client uses unsent queue). + (and we have an active connection -- Java client uses unsent queue). """ while (yield from self.coordinator_unknown()): node_id = self._client.get_random_node() diff --git a/aiokafka/producer.py b/aiokafka/producer.py index 66682113..44a9a2cd 100644 --- a/aiokafka/producer.py +++ b/aiokafka/producer.py @@ -99,7 +99,7 @@ class AIOKafkaProducer(object): each message is assigned to. Called (after key serialization): partitioner(key_bytes, all_partitions, available_partitions). The default partitioner implementation hashes each non-None key - using the same murmur2 algorithm as the java client so that + using the same murmur2 algorithm as the Java client so that messages with the same key are assigned to the same partition. When a key is None, the message is delivered to a random partition (filtered to partitions with available leaders only, if possible). @@ -124,7 +124,7 @@ class AIOKafkaProducer(object): probing various APIs. Default: auto Note: - Many configuration parameters are taken from Java Client: + Many configuration parameters are taken from the Java client: https://kafka.apache.org/documentation.html#producerconfigs """ _PRODUCER_CLIENT_ID_SEQUENCE = 0 diff --git a/docs/api.rst b/docs/api.rst index 8445d87d..2cfa4b46 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -15,19 +15,19 @@ AIOKafkaConsumer class .. autoclass:: aiokafka.AIOKafkaConsumer :members: -Errors handling ---------------- +Error handling +-------------- Both consumer and producer can raise exceptions that inherit from the `kafka.common.KafkaError` class -and declared in `kafka.common` module. +declared in the `kafka.common` module. -Example of exceptions handling: +Exception handling example: .. code:: python from kafka.common import KafkaError, KafkaTimeoutError - # ... + # ... try: send_future = yield from producer.send('foobar', b'test data') response = yield from send_future # wait until message is produced @@ -46,8 +46,8 @@ differently. Possible consumer errors include: Always raised * ``OffsetOutOfRangeError`` - if you don't specify `auto_offset_reset` policy and started cosumption from not valid offset. Always raised - * ``RecordTooLargeError`` - broker has a *MessageSet* larger than + * ``RecordTooLargeError`` - broker has a *MessageSet* larger than `max_partition_fetch_bytes`. **async for** - log error, **get*** will raise it. * ``InvalidMessageError`` - CRC check on MessageSet failed due to connection - failure or bug. **async for** - log error. **get*** will raise it. \ No newline at end of file + failure or bug. **async for** - log error. **get*** will raise it. diff --git a/docs/index.rst b/docs/index.rst index 43986cd7..499ffdb7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,9 +5,9 @@ Welcome to aiokafka's documentation! .. _kafka-python: https://github.com/dpkp/kafka-python .. _asyncio: http://docs.python.org/3.4/library/asyncio.html -**aiokafka** is a client for the Apache Kafka distributed stream processing system using the asyncio_. -It is based on kafka-python_ library and reuses it's internals for protocol parsing, errors, etc. -Client is designed to function much like the official java client, with a sprinkling of pythonic interfaces. +**aiokafka** is a client for the Apache Kafka distributed stream processing system using asyncio_. +It is based on the kafka-python_ library and reuses its internals for protocol parsing, errors, etc. +The client is designed to function much like the official Java client, with a sprinkling of Pythonic interfaces. **aiokafka** is used with 0.9 Kafka brokers and supports fully coordinated consumer groups -- i.e., dynamic partition assignment to multiple consumers in the same group. @@ -21,11 +21,11 @@ AIOKafkaConsumer ++++++++++++++++ :class:`~aiokafka.AIOKafkaConsumer` is a high-level message consumer, intended to -operate as similarly as possible to the official 0.9 java client. Full support -for coordinated consumer groups requires use of kafka brokers that support the +operate as similarly as possible to the official 0.9 Java client. Full support +for coordinated consumer groups requires use of Kafka brokers that support the 0.9 Group APIs. -See consumer example: +Here's a consumer example: .. code:: python @@ -59,9 +59,9 @@ AIOKafkaProducer ++++++++++++++++ :class:`~aiokafka.AIOKafkaProducer` is a high-level, asynchronous message producer. -The class is intended to operate as similarly as possible to the official java client. +The class is intended to operate as similarly as possible to the official Java client. -See producer example: +Here's a producer example: .. code:: python @@ -97,7 +97,7 @@ Installation pip3 install aiokafka -.. note:: *aiokafka* requires *python-kafka* library and heavily depands on it. +.. note:: *aiokafka* requires the *python-kafka* library. Optional LZ4 install diff --git a/setup.py b/setup.py index d9380cdf..fd10da34 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ elif PY_VER >= (3, 3): install_requires.append('asyncio') else: - raise RuntimeError("aiokafka doesn't suppport Python earllier than 3.3") + raise RuntimeError("aiokafka doesn't suppport Python earlier than 3.3") def read(f): @@ -62,4 +62,4 @@ def read_version(): packages=['aiokafka'], install_requires=install_requires, extras_require=extras_require, - include_package_data = True) + include_package_data=True)