diff --git a/adapter-base/src/main/java/org/eclipse/hono/adapter/AbstractProtocolAdapterBase.java b/adapter-base/src/main/java/org/eclipse/hono/adapter/AbstractProtocolAdapterBase.java index 0113829627..e84fa09685 100644 --- a/adapter-base/src/main/java/org/eclipse/hono/adapter/AbstractProtocolAdapterBase.java +++ b/adapter-base/src/main/java/org/eclipse/hono/adapter/AbstractProtocolAdapterBase.java @@ -782,20 +782,20 @@ public final Map getDownstreamMessageProperties(final TelemetryE @Override public void registerReadinessChecks(final HealthCheckHandler handler) { - if (commandConsumerFactory instanceof ServiceClient) { - ((ServiceClient) commandConsumerFactory).registerReadinessChecks(handler); + if (commandConsumerFactory instanceof ServiceClient client) { + client.registerReadinessChecks(handler); } - if (tenantClient instanceof ServiceClient) { - ((ServiceClient) tenantClient).registerReadinessChecks(handler); + if (tenantClient instanceof ServiceClient client) { + client.registerReadinessChecks(handler); } - if (registrationClient instanceof ServiceClient) { - ((ServiceClient) registrationClient).registerReadinessChecks(handler); + if (registrationClient instanceof ServiceClient client) { + client.registerReadinessChecks(handler); } - if (credentialsClient instanceof ServiceClient) { - ((ServiceClient) credentialsClient).registerReadinessChecks(handler); + if (credentialsClient instanceof ServiceClient client) { + client.registerReadinessChecks(handler); } - if (commandRouterClient instanceof ServiceClient) { - ((ServiceClient) commandRouterClient).registerReadinessChecks(handler); + if (commandRouterClient instanceof ServiceClient client) { + client.registerReadinessChecks(handler); } messagingClientProviders.registerReadinessChecks(handler); } @@ -810,20 +810,20 @@ public void registerReadinessChecks(final HealthCheckHandler handler) { public void registerLivenessChecks(final HealthCheckHandler handler) { registerEventLoopBlockedCheck(handler); - if (commandConsumerFactory instanceof ServiceClient) { - ((ServiceClient) commandConsumerFactory).registerLivenessChecks(handler); + if (commandConsumerFactory instanceof ServiceClient client) { + client.registerLivenessChecks(handler); } - if (tenantClient instanceof ServiceClient) { - ((ServiceClient) tenantClient).registerLivenessChecks(handler); + if (tenantClient instanceof ServiceClient client) { + client.registerLivenessChecks(handler); } - if (registrationClient instanceof ServiceClient) { - ((ServiceClient) registrationClient).registerLivenessChecks(handler); + if (registrationClient instanceof ServiceClient client) { + client.registerLivenessChecks(handler); } - if (credentialsClient instanceof ServiceClient) { - ((ServiceClient) credentialsClient).registerLivenessChecks(handler); + if (credentialsClient instanceof ServiceClient client) { + client.registerLivenessChecks(handler); } - if (commandRouterClient instanceof ServiceClient) { - ((ServiceClient) commandRouterClient).registerLivenessChecks(handler); + if (commandRouterClient instanceof ServiceClient client) { + client.registerLivenessChecks(handler); } messagingClientProviders.registerLivenessChecks(handler); } diff --git a/adapter-base/src/main/java/org/eclipse/hono/adapter/quarkus/AbstractProtocolAdapterApplication.java b/adapter-base/src/main/java/org/eclipse/hono/adapter/quarkus/AbstractProtocolAdapterApplication.java index 5a0f4f2be6..cac51c136e 100644 --- a/adapter-base/src/main/java/org/eclipse/hono/adapter/quarkus/AbstractProtocolAdapterApplication.java +++ b/adapter-base/src/main/java/org/eclipse/hono/adapter/quarkus/AbstractProtocolAdapterApplication.java @@ -78,11 +78,13 @@ import org.eclipse.hono.client.telemetry.kafka.KafkaBasedEventSender; import org.eclipse.hono.client.telemetry.kafka.KafkaBasedTelemetrySender; import org.eclipse.hono.client.util.MessagingClientProvider; +import org.eclipse.hono.client.util.ServiceClient; import org.eclipse.hono.notification.NotificationConstants; import org.eclipse.hono.notification.NotificationEventBusSupport; import org.eclipse.hono.notification.NotificationReceiver; import org.eclipse.hono.service.cache.Caches; import org.eclipse.hono.service.quarkus.AbstractServiceApplication; +import org.eclipse.hono.service.util.ServiceClientAdapter; import org.eclipse.hono.util.CredentialsObject; import org.eclipse.hono.util.CredentialsResult; import org.eclipse.hono.util.MessagingType; @@ -322,10 +324,10 @@ protected void setCollaborators(final AbstractProtocolAdapterBase adapter) { final DeviceRegistrationClient registrationClient = registrationClient(); - final MessagingClientProvider telemetrySenderProvider = new MessagingClientProvider<>(); - final MessagingClientProvider eventSenderProvider = new MessagingClientProvider<>(); - final MessagingClientProvider commandResponseSenderProvider = new MessagingClientProvider<>(); - final KafkaClientMetricsSupport kafkaClientMetricsSupport = kafkaClientMetricsSupport(kafkaMetricsOptions); + final var telemetrySenderProvider = new MessagingClientProvider(); + final var eventSenderProvider = new MessagingClientProvider(); + final var commandResponseSenderProvider = new MessagingClientProvider(); + final var kafkaClientMetricsSupport = kafkaClientMetricsSupport(kafkaMetricsOptions); final var tenantClient = tenantClient(); if (kafkaEventConfig.isConfigured()) { @@ -334,10 +336,18 @@ protected void setCollaborators(final AbstractProtocolAdapterBase adapter) { final KafkaProducerFactory factory = CachingKafkaProducerFactory.sharedFactory(vertx); factory.setMetricsSupport(kafkaClientMetricsSupport); - telemetrySenderProvider.setClient(new KafkaBasedTelemetrySender(vertx, factory, kafkaTelemetryConfig, - protocolAdapterProperties.isDefaultsEnabled(), tracer)); - eventSenderProvider.setClient(new KafkaBasedEventSender(vertx, factory, kafkaEventConfig, - protocolAdapterProperties.isDefaultsEnabled(), tracer)); + telemetrySenderProvider.setClient(new KafkaBasedTelemetrySender( + vertx, + factory, + kafkaTelemetryConfig, + protocolAdapterProperties.isDefaultsEnabled(), + tracer)); + eventSenderProvider.setClient(new KafkaBasedEventSender( + vertx, + factory, + kafkaEventConfig, + protocolAdapterProperties.isDefaultsEnabled(), + tracer)); commandResponseSenderProvider.setClient(new KafkaBasedCommandResponseSender( vertx, factory, @@ -354,8 +364,10 @@ protected void setCollaborators(final AbstractProtocolAdapterBase adapter) { protocolAdapterProperties.isJmsVendorPropsEnabled())); } - final MessagingClientProviders messagingClientProviders = new MessagingClientProviders(telemetrySenderProvider, - eventSenderProvider, commandResponseSenderProvider); + final MessagingClientProviders messagingClientProviders = new MessagingClientProviders( + telemetrySenderProvider, + eventSenderProvider, + commandResponseSenderProvider); if (commandRouterConfig.isHostConfigured()) { final CommandRouterClient commandRouterClient = commandRouterClient(); @@ -609,6 +621,9 @@ public NotificationReceiver notificationReceiver() { notificationConfig.setServerRole("Notification"); notificationReceiver = new ProtonBasedNotificationReceiver(HonoConnection.newConnection(vertx, notificationConfig, tracer)); } + if (notificationReceiver instanceof ServiceClient serviceClient) { + healthCheckServer.registerHealthCheckResources(ServiceClientAdapter.forClient(serviceClient)); + } final var notificationSender = NotificationEventBusSupport.getNotificationSender(vertx); NotificationConstants.DEVICE_REGISTRY_NOTIFICATION_TYPES.forEach(notificationType -> notificationReceiver.registerConsumer(notificationType, notificationSender::handle)); diff --git a/clients/application-kafka/src/main/java/org/eclipse/hono/application/client/kafka/impl/KafkaApplicationClientImpl.java b/clients/application-kafka/src/main/java/org/eclipse/hono/application/client/kafka/impl/KafkaApplicationClientImpl.java index b0a3413297..f217091b45 100644 --- a/clients/application-kafka/src/main/java/org/eclipse/hono/application/client/kafka/impl/KafkaApplicationClientImpl.java +++ b/clients/application-kafka/src/main/java/org/eclipse/hono/application/client/kafka/impl/KafkaApplicationClientImpl.java @@ -38,6 +38,7 @@ import io.vertx.core.CompositeFuture; import io.vertx.core.Future; import io.vertx.core.Handler; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; import io.vertx.kafka.client.consumer.KafkaConsumerRecord; @@ -106,7 +107,8 @@ public KafkaApplicationClientImpl( public Future stop() { // stop created consumers final List closeKafkaClientsTracker = consumersToCloseOnStop.stream() - .map(MessageConsumer::close).collect(Collectors.toList()); + .map(MessageConsumer::close) + .collect(Collectors.toList()); // add command sender related clients closeKafkaClientsTracker.add(super.stop()); return CompositeFuture.join(closeKafkaClientsTracker) @@ -165,12 +167,15 @@ private Future createKafkaBasedDownstreamMessageConsumer( final Handler> recordHandler = record -> { messageHandler.handle(new KafkaDownstreamMessage(record)); }; + final Promise readyTracker = Promise.promise(); final HonoKafkaConsumer consumer = new HonoKafkaConsumer<>(vertx, Set.of(topic), recordHandler, consumerConfig.getConsumerConfig(type.toString())); consumer.setPollTimeout(Duration.ofMillis(consumerConfig.getPollTimeout())); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); Optional.ofNullable(kafkaConsumerSupplier) .ifPresent(consumer::setKafkaConsumerSupplier); return consumer.start() + .compose(ok -> readyTracker.future()) .map(v -> (MessageConsumer) new MessageConsumer() { @Override public Future close() { diff --git a/clients/application-kafka/src/main/java/org/eclipse/hono/application/client/kafka/impl/KafkaBasedCommandSender.java b/clients/application-kafka/src/main/java/org/eclipse/hono/application/client/kafka/impl/KafkaBasedCommandSender.java index 72c760f73f..12b2bb7fef 100644 --- a/clients/application-kafka/src/main/java/org/eclipse/hono/application/client/kafka/impl/KafkaBasedCommandSender.java +++ b/clients/application-kafka/src/main/java/org/eclipse/hono/application/client/kafka/impl/KafkaBasedCommandSender.java @@ -112,17 +112,18 @@ public KafkaBasedCommandSender( @SuppressWarnings("rawtypes") @Override public Future stop() { - // assemble futures for closing the command response consumers - final List stopKafkaClientsTracker = commandResponseConsumers.values().stream() - .map(HonoKafkaConsumer::stop) - .collect(Collectors.toList()); - commandResponseConsumers.clear(); - // add future for closing command producer - stopKafkaClientsTracker.add(super.stop()); - - return CompositeFuture.join(stopKafkaClientsTracker) + return lifecycleStatus.runStopAttempt(() -> { + // assemble futures for closing the command response consumers + final List stopConsumersTracker = commandResponseConsumers.values().stream() + .map(HonoKafkaConsumer::stop) + .collect(Collectors.toList()); + commandResponseConsumers.clear(); + return CompositeFuture.join( + stopProducer(), + CompositeFuture.join(stopConsumersTracker)) .mapEmpty(); + }); } /** @@ -424,11 +425,14 @@ private Future subscribeForCommandResponse(final String tenantId, final Sp getCommandResponseHandler(tenantId) .handle(new KafkaDownstreamMessage(record)); }; + final Promise readyTracker = Promise.promise(); final HonoKafkaConsumer consumer = new HonoKafkaConsumer<>(vertx, Set.of(topic), recordHandler, consumerConfig); consumer.setPollTimeout(Duration.ofMillis(this.consumerConfig.getPollTimeout())); Optional.ofNullable(kafkaConsumerSupplier) .ifPresent(consumer::setKafkaConsumerSupplier); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); return consumer.start() + .compose(ok -> readyTracker.future()) .recover(error -> { LOGGER.debug("error creating command response consumer for tenant [{}]", tenantId, error); TracingHelper.logError(span, "error creating command response consumer", error); diff --git a/clients/application-kafka/src/test/java/org/eclipse/hono/application/client/kafka/impl/KafkaApplicationClientImplTest.java b/clients/application-kafka/src/test/java/org/eclipse/hono/application/client/kafka/impl/KafkaApplicationClientImplTest.java index 170ae528d4..c883698751 100644 --- a/clients/application-kafka/src/test/java/org/eclipse/hono/application/client/kafka/impl/KafkaApplicationClientImplTest.java +++ b/clients/application-kafka/src/test/java/org/eclipse/hono/application/client/kafka/impl/KafkaApplicationClientImplTest.java @@ -43,6 +43,7 @@ import io.vertx.core.Future; import io.vertx.core.Handler; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; import io.vertx.junit5.Timeout; @@ -57,6 +58,7 @@ public class KafkaApplicationClientImplTest { private static final String PARAMETERIZED_TEST_NAME_PATTERN = "{displayName} [{index}]; parameters: {argumentsWithNames}"; + private Promise clientReadyTracker; private KafkaApplicationClientImpl client; private KafkaMockConsumer mockConsumer; private String tenantId; @@ -94,8 +96,10 @@ void setUp(final Vertx vertx) { producerConfig.setCommonClientConfig(commonConfig); producerConfig.setProducerConfig(Map.of("client.id", "application-test-sender")); + clientReadyTracker = Promise.promise(); client = new KafkaApplicationClientImpl(vertx, consumerConfig, producerFactory, producerConfig); client.setKafkaConsumerFactory(() -> mockConsumer); + client.addOnKafkaProducerReadyHandler(clientReadyTracker); } /** @@ -119,11 +123,13 @@ void shutDown(final VertxTestContext context) { public void testCreateConsumer(final Type msgType, final VertxTestContext ctx) { // Verify that the consumer for the given tenant and the message type is successfully created - createConsumer(tenantId, msgType, m -> {}, t -> {}) - .onComplete(ctx.succeeding(consumer -> ctx.verify(() -> { - assertThat(consumer).isNotNull(); - ctx.completeNow(); - }))); + client.start() + .compose(ok -> clientReadyTracker.future()) + .compose(ok -> createConsumer(tenantId, msgType, m -> {}, t -> {})) + .onComplete(ctx.succeeding(consumer -> ctx.verify(() -> { + assertThat(consumer).isNotNull(); + ctx.completeNow(); + }))); } /** @@ -138,14 +144,15 @@ public void testCreateConsumer(final Type msgType, final VertxTestContext ctx) { public void testStopClosesConsumer(final Type msgType, final VertxTestContext ctx) { // Verify that the consumer for the given tenant and the message type is successfully created - createConsumer(tenantId, msgType, m -> {}, t -> {}) - // stop the application client - .compose(c -> client.stop()) - .onComplete(ctx.succeeding(v -> ctx.verify(() -> { - // verify that the Kafka mock consumer is closed - assertThat(mockConsumer.closed()).isTrue(); - ctx.completeNow(); - }))); + client.start() + .compose(ok -> clientReadyTracker.future()) + .compose(ok -> createConsumer(tenantId, msgType, m -> {}, t -> {})) + // stop the application client + .compose(c -> client.stop()) + .onComplete(ctx.succeeding(v -> { + ctx.verify(() -> assertThat(mockConsumer.closed()).isTrue()); + ctx.completeNow(); + })); } /** @@ -157,19 +164,24 @@ public void testStopClosesConsumer(final Type msgType, final VertxTestContext ct @ParameterizedTest(name = PARAMETERIZED_TEST_NAME_PATTERN) @MethodSource("messageTypes") public void testCloseConsumer(final Type msgType, final VertxTestContext ctx) { + // Given a consumer for the given tenant and the message type - createConsumer(tenantId, msgType, m -> {}, t -> {}) - // When the message consumer is closed - .compose(MessageConsumer::close) - .onComplete(ctx.succeeding(consumer -> ctx.verify(() -> { - // verify that the Kafka mock consumer is also closed - assertThat(mockConsumer.closed()).isTrue(); - ctx.completeNow(); - }))); + client.start() + .compose(ok -> clientReadyTracker.future()) + .compose(ok -> createConsumer(tenantId, msgType, m -> {}, t -> {})) + // When the message consumer is closed + .compose(MessageConsumer::close) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> assertThat(mockConsumer.closed()).isTrue()); + ctx.completeNow(); + })); } - private Future createConsumer(final String tenantId, final Type type, - final Handler> msgHandler, final Handler closeHandler) { + private Future createConsumer( + final String tenantId, + final Type type, + final Handler> msgHandler, + final Handler closeHandler) { final String topic = new HonoTopic(type, tenantId).toString(); final TopicPartition topicPartition = new TopicPartition(topic, 0); diff --git a/clients/application-kafka/src/test/java/org/eclipse/hono/application/client/kafka/impl/KafkaBasedCommandSenderTest.java b/clients/application-kafka/src/test/java/org/eclipse/hono/application/client/kafka/impl/KafkaBasedCommandSenderTest.java index 9c7404126a..7e42adfa01 100644 --- a/clients/application-kafka/src/test/java/org/eclipse/hono/application/client/kafka/impl/KafkaBasedCommandSenderTest.java +++ b/clients/application-kafka/src/test/java/org/eclipse/hono/application/client/kafka/impl/KafkaBasedCommandSenderTest.java @@ -82,6 +82,7 @@ public class KafkaBasedCommandSenderTest { private static final Logger LOG = LoggerFactory.getLogger(KafkaBasedCommandSenderTest.class); private KafkaBasedCommandSender commandSender; + private Promise commandSenderReadyTracker; private MessagingKafkaConsumerConfigProperties consumerConfig; private MessagingKafkaProducerConfigProperties producerConfig; private MockProducer mockProducer; @@ -112,12 +113,14 @@ void setUp(final Vertx vertx) { mockProducer = KafkaClientUnitTestHelper.newMockProducer(true); final var producerFactory = CachingKafkaProducerFactory .testFactory(vertx, (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); + commandSenderReadyTracker = Promise.promise(); commandSender = new KafkaBasedCommandSender( vertx, consumerConfig, producerFactory, producerConfig, tracer); + commandSender.addOnKafkaProducerReadyHandler(commandSenderReadyTracker); tenantId = UUID.randomUUID().toString(); deviceId = UUID.randomUUID().toString(); } @@ -142,15 +145,17 @@ public void testSendAsyncCommandSucceeds(final VertxTestContext ctx) { final String correlationId = UUID.randomUUID().toString(); final String subject = "setVolume"; - commandSender.sendAsyncCommand( - tenantId, - deviceId, - subject, - correlationId, - new JsonObject().put("value", 20).toBuffer(), - "application/json", - Map.of("foo", "bar"), - NoopSpan.INSTANCE.context()) + commandSender.start() + .compose(ok -> commandSenderReadyTracker.future()) + .compose(ok -> commandSender.sendAsyncCommand( + tenantId, + deviceId, + subject, + correlationId, + new JsonObject().put("value", 20).toBuffer(), + "application/json", + Map.of("foo", "bar"), + NoopSpan.INSTANCE.context())) .onComplete(ctx.succeeding(ok -> { ctx.verify(() -> { final ProducerRecord commandRecord = mockProducer.history().get(0); @@ -191,13 +196,15 @@ public void testSendAsyncCommandSucceeds(final VertxTestContext ctx) { public void testSendOneWayCommandSucceeds(final VertxTestContext ctx) { final String subject = "setVolume"; - commandSender.sendOneWayCommand( - tenantId, - deviceId, - subject, - new JsonObject().put("value", 20).toBuffer(), - "application/json", - NoopSpan.INSTANCE.context()) + commandSender.start() + .compose(ok -> commandSenderReadyTracker.future()) + .compose(ok -> commandSender.sendOneWayCommand( + tenantId, + deviceId, + subject, + new JsonObject().put("value", 20).toBuffer(), + "application/json", + NoopSpan.INSTANCE.context())) .onComplete(ctx.succeeding(ok -> { ctx.verify(() -> { final ProducerRecord commandRecord = mockProducer.history().get(0); @@ -231,15 +238,17 @@ public void testSendCommandAndReceiveResponseTimesOut(final VertxTestContext ctx final Context context = vertx.getOrCreateContext(); commandSender.setKafkaConsumerSupplier(() -> mockConsumer); context.runOnContext(v -> { - commandSender.sendCommand( - tenantId, - deviceId, - "testCommand", - Buffer.buffer("data"), - "text/plain", - (Map) null, // no failure header props - Duration.ofMillis(5), - null) + commandSender.start() + .compose(ok -> commandSenderReadyTracker.future()) + .compose(ok -> commandSender.sendCommand( + tenantId, + deviceId, + "testCommand", + Buffer.buffer("data"), + "text/plain", + (Map) null, // no failure header props + Duration.ofMillis(5), + null)) .onComplete(ctx.failing(error -> { ctx.verify(() -> { // VERIFY that the error is caused due to time out. @@ -311,14 +320,16 @@ public synchronized java.util.concurrent.Future send(final Produ }); } }; - final var producerFactory = CachingKafkaProducerFactory - .testFactory(vertx, (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); + final var producerFactory = CachingKafkaProducerFactory.testFactory( + vertx, + (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); commandSender = new KafkaBasedCommandSender( vertx, consumerConfig, producerFactory, producerConfig, NoopTracerFactory.create()); + commandSender.addOnKafkaProducerReadyHandler(commandSenderReadyTracker); final String command = "setVolume"; final ConsumerRecord commandResponseRecord = commandResponseRecord(tenantId, @@ -341,29 +352,29 @@ public synchronized java.util.concurrent.Future send(final Produ context.runOnContext(v -> { // Send a command to the device - commandSender.sendCommand(tenantId, deviceId, command, Buffer.buffer("test"), "text/plain") - .onComplete(ar -> { - ctx.verify(() -> { - if (expectSuccess) { - assertThat(ar.succeeded()).isTrue(); // assert that send operation succeeded - - // Verify the command response that has been received - final DownstreamMessage response = ar.result(); - assertThat(response.getDeviceId()).isEqualTo(deviceId); - assertThat(response.getStatus()).isEqualTo(responseStatus); - assertThat(response.getPayload().toString()).isEqualTo(responsePayload); - } else { - assertThat(ar.succeeded()).isFalse(); // assert that send operation failed - assertThat(ar.cause()).isInstanceOf(ServiceInvocationException.class); - assertThat(((ServiceInvocationException) ar.cause()).getErrorCode()) - .isEqualTo(expectedStatusCode); - assertThat(ar.cause().getMessage()).isEqualTo(responsePayload); - } - }); - ctx.completeNow(); - mockConsumer.close(); - commandSender.stop(); + commandSender.start() + .compose(ok -> commandSenderReadyTracker.future()) + .compose(ok -> commandSender.sendCommand(tenantId, deviceId, command, Buffer.buffer("test"), "text/plain")) + .onComplete(ar -> { + ctx.verify(() -> { + if (expectSuccess) { + assertThat(ar.succeeded()).isTrue(); // assert that send operation succeeded + + // Verify the command response that has been received + final DownstreamMessage response = ar.result(); + assertThat(response.getDeviceId()).isEqualTo(deviceId); + assertThat(response.getStatus()).isEqualTo(responseStatus); + assertThat(response.getPayload().toString()).isEqualTo(responsePayload); + } else { + assertThat(ar.succeeded()).isFalse(); // assert that send operation failed + assertThat(ar.cause()).isInstanceOf(ServiceInvocationException.class); + assertThat(((ServiceInvocationException) ar.cause()).getErrorCode()) + .isEqualTo(expectedStatusCode); + assertThat(ar.cause().getMessage()).isEqualTo(responsePayload); + } }); + ctx.completeNow(); + }); }); } diff --git a/clients/application-kafka/src/test/resources/logback-test.xml b/clients/application-kafka/src/test/resources/logback-test.xml index a002448233..4e1aae01c2 100644 --- a/clients/application-kafka/src/test/resources/logback-test.xml +++ b/clients/application-kafka/src/test/resources/logback-test.xml @@ -28,4 +28,7 @@ + + + diff --git a/clients/command-kafka/src/main/java/org/eclipse/hono/client/command/kafka/KafkaBasedInternalCommandConsumer.java b/clients/command-kafka/src/main/java/org/eclipse/hono/client/command/kafka/KafkaBasedInternalCommandConsumer.java index 614f733ae1..93d60836ce 100644 --- a/clients/command-kafka/src/main/java/org/eclipse/hono/client/command/kafka/KafkaBasedInternalCommandConsumer.java +++ b/clients/command-kafka/src/main/java/org/eclipse/hono/client/command/kafka/KafkaBasedInternalCommandConsumer.java @@ -46,15 +46,18 @@ import org.eclipse.hono.client.registry.TenantClient; import org.eclipse.hono.client.registry.TenantDisabledOrNotRegisteredException; import org.eclipse.hono.tracing.TracingHelper; +import org.eclipse.hono.util.LifecycleStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.opentracing.Span; import io.opentracing.SpanContext; import io.opentracing.Tracer; +import io.vertx.core.AsyncResult; import io.vertx.core.CompositeFuture; import io.vertx.core.Context; import io.vertx.core.Future; +import io.vertx.core.Handler; import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; @@ -84,12 +87,12 @@ public class KafkaBasedInternalCommandConsumer implements InternalCommandConsume private final Tracer tracer; private final CommandResponseSender commandResponseSender; private final TenantClient tenantClient; - private final AtomicBoolean isTopicCreated = new AtomicBoolean(false); private final AtomicBoolean retryCreateTopic = new AtomicBoolean(true); /** * Key is the tenant id, value is a Map with partition index as key and offset as value. */ private final Map> lastHandledPartitionOffsetsPerTenant = new HashMap<>(); + private final LifecycleStatus lifecycleStatus = new LifecycleStatus(); private KafkaConsumer consumer; private Admin adminClient; @@ -136,6 +139,7 @@ public KafkaBasedInternalCommandConsumer( final KafkaClientFactory kafkaClientFactory = new KafkaClientFactory(vertx); kafkaAdminClientCreator = () -> kafkaClientFactory.createClientWithRetries( () -> Admin.create(new HashMap<>(adminClientConfig)), + lifecycleStatus::isStarting, bootstrapServersConfig, KafkaClientFactory.UNLIMITED_RETRIES_DURATION); @@ -148,6 +152,7 @@ public KafkaBasedInternalCommandConsumer( this.clientId = consumerConfig.get(ConsumerConfig.CLIENT_ID_CONFIG); consumerCreator = () -> kafkaClientFactory.createClientWithRetries( () -> KafkaConsumer.create(vertx, consumerConfig), + lifecycleStatus::isStarting, consumerConfig.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), KafkaClientFactory.UNLIMITED_RETRIES_DURATION); } @@ -204,8 +209,39 @@ public final KafkaBasedInternalCommandConsumer setMetricsSupport(final KafkaClie return this; } + /** + * Adds a handler to be invoked with a succeeded future once the Kafka consumer is ready to be used. + * + * @param handler The handler to invoke. The handler will never be invoked with a failed future. + */ + public final void addOnKafkaConsumerReadyHandler(final Handler> handler) { + if (handler != null) { + lifecycleStatus.addOnStartedHandler(handler); + } + } + + /** + * {@inheritDoc} + *

+ * This methods triggers the creation of the internal command topic and a corresponding Kafka consumer in the + * background. A new attempt to create the topic and the consumer is made periodically until creation succeeds + * or the {@link #stop()} method has been invoked. + *

+ * Client code may {@linkplain #addOnKafkaConsumerReadyHandler(Handler) register a dedicated handler} + * to be notified once the consumer is up and running. + * + * @return A succeeded future. Note that the successful completion of the returned future does not + * mean that the consumer will be ready to receive messages from the broker. + */ @Override public Future start() { + + if (lifecycleStatus.isStarting()) { + return Future.succeededFuture(); + } else if (!lifecycleStatus.setStarting()) { + return Future.failedFuture(new IllegalStateException("consumer is already started/stopping")); + } + if (context == null) { context = Vertx.currentContext(); if (context == null) { @@ -213,33 +249,32 @@ public Future start() { } } // trigger creation of admin client, adapter specific topic and consumer - return kafkaAdminClientCreator.get() - .onFailure(thr -> LOG.error("admin client creation failed", thr)) - .compose(client -> { - adminClient = client; - return createTopic(); - }) - .recover(e -> retryCreateTopic()) - .compose(v -> { - isTopicCreated.set(true); - // create consumer - return consumerCreator.get() - .onFailure(thr -> LOG.error("consumer creation failed", thr)); - }) - .compose(client -> { - consumer = client; - Optional.ofNullable(metricsSupport).ifPresent(ms -> ms.registerKafkaConsumer(consumer.unwrap())); - return subscribeToTopic(); - }); + kafkaAdminClientCreator.get() + .onFailure(thr -> LOG.error("admin client creation failed", thr)) + .compose(client -> { + adminClient = client; + return createTopic(); + }) + .recover(e -> retryCreateTopic()) + .compose(v -> consumerCreator.get() + .onFailure(thr -> LOG.error("consumer creation failed", thr))) + .compose(client -> { + consumer = client; + Optional.ofNullable(metricsSupport).ifPresent(ms -> ms.registerKafkaConsumer(consumer.unwrap())); + return subscribeToTopic(); + }) + .onSuccess(ok -> lifecycleStatus.setStarted()); + + return Future.succeededFuture(); } @Override public void registerReadinessChecks(final HealthCheckHandler readinessHandler) { LOG.trace("registering readiness check using kafka based internal command consumer [adapter instance id: {}]", adapterInstanceId); - readinessHandler.register(String.format("internal-command-consumer[%s]-readiness", adapterInstanceId), + readinessHandler.register("internal-command-consumer[%s]-readiness".formatted(adapterInstanceId), status -> { - if (isTopicCreated.get()) { + if (lifecycleStatus.isStarted()) { status.tryComplete(Status.OK()); } else { LOG.debug("readiness check failed [internal command topic is not created]"); @@ -321,20 +356,30 @@ private String getTopicName() { return new HonoTopic(HonoTopic.Type.COMMAND_INTERNAL, adapterInstanceId).toString(); } + /** + * {@inheritDoc} + *

+ * Closes the Kafka admin client and consumer. + * + * @return A future indicating the outcome of the operation. + * The future will be succeeded once this component is stopped. + */ @Override public Future stop() { - retryCreateTopic.set(false); - vertx.cancelTimer(retryCreateTopicTimerId); - if (consumer == null) { - return Future.failedFuture("not started"); - } + return lifecycleStatus.runStopAttempt(() -> { + retryCreateTopic.set(false); + vertx.cancelTimer(retryCreateTopicTimerId); - return CompositeFuture.all(closeAdminClient(), closeConsumer()) + return CompositeFuture.all(closeAdminClient(), closeConsumer()) .mapEmpty(); + }); } private Future closeAdminClient() { + if (adminClient == null) { + return Future.succeededFuture(); + } final Promise adminClientClosePromise = Promise.promise(); LOG.debug("stop: close admin client"); context.executeBlocking(future -> { @@ -346,14 +391,14 @@ private Future closeAdminClient() { } private Future closeConsumer() { - final Promise consumerClosePromise = Promise.promise(); + if (consumer == null) { + return Future.succeededFuture(); + } LOG.debug("stop: close consumer"); - consumer.close(consumerClosePromise); - consumerClosePromise.future().onComplete(ar -> { + return consumer.close().onComplete(ar -> { LOG.debug("consumer closed"); Optional.ofNullable(metricsSupport).ifPresent(ms -> ms.unregisterKafkaConsumer(consumer.unwrap())); }); - return consumerClosePromise.future(); } void handleCommandMessage(final KafkaConsumerRecord record) { diff --git a/clients/command-kafka/src/test/java/org/eclipse/hono/client/command/kafka/KafkaBasedCommandResponseSenderTest.java b/clients/command-kafka/src/test/java/org/eclipse/hono/client/command/kafka/KafkaBasedCommandResponseSenderTest.java index a9c4aea5ba..e1d22a5cb3 100644 --- a/clients/command-kafka/src/test/java/org/eclipse/hono/client/command/kafka/KafkaBasedCommandResponseSenderTest.java +++ b/clients/command-kafka/src/test/java/org/eclipse/hono/client/command/kafka/KafkaBasedCommandResponseSenderTest.java @@ -49,6 +49,7 @@ import io.opentracing.Span; import io.opentracing.Tracer; import io.opentracing.noop.NoopSpan; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; import io.vertx.core.eventbus.EventBus; @@ -99,75 +100,79 @@ void testIfValidCommandResponseKafkaRecordIsSent(final VertxTestContext ctx, fin status); commandResponse.setAdditionalProperties(additionalProperties); + final TenantObject tenant = TenantObject.from(tenantId); + tenant.setResourceLimits(new ResourceLimits().setMaxTtlCommandResponse(10L)); final Span span = TracingMockSupport.mockSpan(); final Tracer tracer = TracingMockSupport.mockTracer(span); final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(true); final var factory = CachingKafkaProducerFactory .testFactory(vertx, (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); + final Promise readyTracker = Promise.promise(); final var sender = new KafkaBasedCommandResponseSender(vertx, factory, kafkaProducerConfig, tracer); - final TenantObject tenant = TenantObject.from(tenantId); - tenant.setResourceLimits(new ResourceLimits().setMaxTtlCommandResponse(10L)); + sender.addOnKafkaProducerReadyHandler(readyTracker); // WHEN sending a command response - sender.sendCommandResponse( + sender.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> sender.sendCommandResponse( tenant, new RegistrationAssertion(deviceId), - commandResponse, NoopSpan.INSTANCE.context()) - .onComplete(ctx.succeeding(t -> { - ctx.verify(() -> { - // THEN the producer record is created from the given values... - final ProducerRecord record = mockProducer.history().get(0); - - assertThat(record.key()).isEqualTo(deviceId); - assertThat(record.topic()) - .isEqualTo(new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString()); - assertThat(record.value().toString()).isEqualTo(payload); - - //Verify if the record contains the necessary headers. - final Headers headers = record.headers(); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - headers, - MessageHelper.APP_PROPERTY_TENANT_ID, - tenantId); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - headers, - MessageHelper.APP_PROPERTY_DEVICE_ID, - deviceId); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - headers, - MessageHelper.SYS_PROPERTY_CORRELATION_ID, - correlationId); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - headers, - MessageHelper.APP_PROPERTY_STATUS, - status); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - headers, - MessageHelper.SYS_PROPERTY_CONTENT_TYPE, - contentType); - - final var creationTimeHeader = headers.headers(MessageHelper.SYS_PROPERTY_CREATION_TIME); - assertThat(creationTimeHeader).hasSize(1); - final Long creationTimeMillis = Json.decodeValue( - Buffer.buffer(creationTimeHeader.iterator().next().value()), - Long.class); - assertThat(creationTimeMillis).isGreaterThan(0L); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - headers, - MessageHelper.SYS_HEADER_PROPERTY_TTL, - 10000L); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - headers, - additionalHeader1Name, - additionalHeader1Value); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - headers, - additionalHeader2Name, - additionalHeader2Value); - verify(span).finish(); - }); - ctx.completeNow(); - })); + commandResponse, NoopSpan.INSTANCE.context())) + .onComplete(ctx.succeeding(t -> { + ctx.verify(() -> { + // THEN the producer record is created from the given values... + final ProducerRecord record = mockProducer.history().get(0); + + assertThat(record.key()).isEqualTo(deviceId); + assertThat(record.topic()) + .isEqualTo(new HonoTopic(HonoTopic.Type.COMMAND_RESPONSE, tenantId).toString()); + assertThat(record.value().toString()).isEqualTo(payload); + + //Verify if the record contains the necessary headers. + final Headers headers = record.headers(); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + headers, + MessageHelper.APP_PROPERTY_TENANT_ID, + tenantId); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + headers, + MessageHelper.APP_PROPERTY_DEVICE_ID, + deviceId); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + headers, + MessageHelper.SYS_PROPERTY_CORRELATION_ID, + correlationId); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + headers, + MessageHelper.APP_PROPERTY_STATUS, + status); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + headers, + MessageHelper.SYS_PROPERTY_CONTENT_TYPE, + contentType); + + final var creationTimeHeader = headers.headers(MessageHelper.SYS_PROPERTY_CREATION_TIME); + assertThat(creationTimeHeader).hasSize(1); + final Long creationTimeMillis = Json.decodeValue( + Buffer.buffer(creationTimeHeader.iterator().next().value()), + Long.class); + assertThat(creationTimeMillis).isGreaterThan(0L); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + headers, + MessageHelper.SYS_HEADER_PROPERTY_TTL, + 10000L); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + headers, + additionalHeader1Name, + additionalHeader1Value); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + headers, + additionalHeader2Name, + additionalHeader2Value); + verify(span).finish(); + }); + ctx.completeNow(); + })); } /** diff --git a/clients/kafka-common/pom.xml b/clients/kafka-common/pom.xml index 33cb516811..12878fcaca 100644 --- a/clients/kafka-common/pom.xml +++ b/clients/kafka-common/pom.xml @@ -49,6 +49,10 @@ io.micrometer micrometer-core + + io.smallrye + smallrye-health + io.quarkus quarkus-core diff --git a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/KafkaClientFactory.java b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/KafkaClientFactory.java index 6b329110a5..a61c878b3e 100644 --- a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/KafkaClientFactory.java +++ b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/KafkaClientFactory.java @@ -35,7 +35,6 @@ import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.kafka.admin.KafkaAdminClient; -import io.vertx.kafka.client.consumer.KafkaConsumer; /** * A factory to create Kafka clients. @@ -50,7 +49,7 @@ public class KafkaClientFactory { /** * The number of milliseconds to wait before retrying to create a client. */ - public static final int CLIENT_CREATION_RETRY_DELAY_MILLIS = 100; + public static final int CLIENT_CREATION_RETRY_DELAY_MILLIS = 1000; private static final Logger LOG = LoggerFactory.getLogger(KafkaClientFactory.class); @@ -87,10 +86,8 @@ void setClock(final Clock clock) { /** * Creates a new Kafka client. *

- * If creation fails because the {@value CommonClientConfigs#BOOTSTRAP_SERVERS_CONFIG} config property contains a - * (non-empty) list of URLs that are not (yet) resolvable, further creation attempts are done with a delay of - * {@value #CLIENT_CREATION_RETRY_DELAY_MILLIS}ms in between. These retries are done until the given - * retriesTimeout has elapsed. + * Simply invokes {@link #createClientWithRetries(Supplier, Supplier, String, Duration)} with a keep retrying + * condition that always returns {@code true}. * * @param The type of client. * @param clientSupplier The action that will create the client. @@ -105,45 +102,42 @@ public Future createClientWithRetries( final Supplier clientSupplier, final String bootstrapServersConfig, final Duration retriesTimeout) { - Objects.requireNonNull(clientSupplier); - - final Promise resultPromise = Promise.promise(); - createClientWithRetries( - clientSupplier, - () -> true, - getRetriesTimeLimit(retriesTimeout), - () -> containsValidServerEntries(bootstrapServersConfig), - resultPromise); - return resultPromise.future(); + return createClientWithRetries(clientSupplier, () -> true, bootstrapServersConfig, retriesTimeout); } /** - * Creates a new KafkaConsumer. + * Creates a new Kafka client. *

* If creation fails because the {@value CommonClientConfigs#BOOTSTRAP_SERVERS_CONFIG} config property contains a * (non-empty) list of URLs that are not (yet) resolvable, further creation attempts are done with a delay of * {@value #CLIENT_CREATION_RETRY_DELAY_MILLIS}ms in between. These retries are done until the given * retriesTimeout has elapsed. * - * @param consumerConfig The consumer configuration properties. + * @param The type of client. + * @param clientSupplier The action that will create the client. + * @param keepTrying A guard condition that controls whether another attempt to create the client should be + * started. Client code can set this to {@code false} in order to prevent any further attempts. + * @param bootstrapServersConfig The {@value CommonClientConfigs#BOOTSTRAP_SERVERS_CONFIG} config property value. + * A {@code null} value will lead to a failed result future. * @param retriesTimeout The maximum time for which retries are done. Using a negative duration or {@code null} * here is interpreted as an unlimited timeout value. - * @param The class type for the key deserialization. - * @param The class type for the value deserialization. * @return A future indicating the outcome of the creation attempt. - * @throws NullPointerException if any of the parameters except retriesTimeout is {@code null}. + * @throws NullPointerException if clientSupplier or keepTrying are {@code null}. */ - public Future> createKafkaConsumerWithRetries( - final Map consumerConfig, + public Future createClientWithRetries( + final Supplier clientSupplier, + final Supplier keepTrying, + final String bootstrapServersConfig, final Duration retriesTimeout) { - Objects.requireNonNull(consumerConfig); + Objects.requireNonNull(clientSupplier); + Objects.requireNonNull(keepTrying); - final Promise> resultPromise = Promise.promise(); + final Promise resultPromise = Promise.promise(); createClientWithRetries( - () -> KafkaConsumer.create(vertx, consumerConfig), - () -> true, + clientSupplier, + keepTrying, getRetriesTimeLimit(retriesTimeout), - () -> containsValidServerEntries(consumerConfig.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)), + () -> containsValidServerEntries(bootstrapServersConfig), resultPromise); return resultPromise.future(); } diff --git a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/consumer/AsyncHandlingAutoCommitKafkaConsumer.java b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/consumer/AsyncHandlingAutoCommitKafkaConsumer.java index 1b03ee4e64..5710787e4e 100644 --- a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/consumer/AsyncHandlingAutoCommitKafkaConsumer.java +++ b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/consumer/AsyncHandlingAutoCommitKafkaConsumer.java @@ -327,9 +327,10 @@ protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord< @Override public Future start() { - return super.start().onComplete(v -> { + addOnKafkaConsumerReadyHandler(ready -> { periodicCommitTimerId = vertx.setPeriodic(commitIntervalMillis, tid -> doPeriodicCommit()); }); + return super.start(); } @Override diff --git a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/consumer/HonoKafkaConsumer.java b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/consumer/HonoKafkaConsumer.java index 59cbc77db2..7e508de527 100644 --- a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/consumer/HonoKafkaConsumer.java +++ b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/consumer/HonoKafkaConsumer.java @@ -33,6 +33,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; @@ -42,19 +43,25 @@ import org.eclipse.hono.client.kafka.KafkaClientFactory; import org.eclipse.hono.client.kafka.KafkaRecordHelper; import org.eclipse.hono.client.kafka.metrics.KafkaClientMetricsSupport; +import org.eclipse.hono.client.util.ServiceClient; import org.eclipse.hono.util.Futures; import org.eclipse.hono.util.Lifecycle; +import org.eclipse.hono.util.LifecycleStatus; import org.eclipse.hono.util.Pair; +import org.eclipse.microprofile.health.HealthCheckResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.quarkus.runtime.annotations.RegisterForReflection; +import io.vertx.core.AsyncResult; import io.vertx.core.CompositeFuture; import io.vertx.core.Context; import io.vertx.core.Future; import io.vertx.core.Handler; import io.vertx.core.Promise; import io.vertx.core.Vertx; +import io.vertx.ext.healthchecks.HealthCheckHandler; +import io.vertx.ext.healthchecks.Status; import io.vertx.kafka.client.common.TopicPartition; import io.vertx.kafka.client.common.impl.Helper; import io.vertx.kafka.client.consumer.KafkaConsumer; @@ -71,7 +78,7 @@ * @param The type of record payload this consumer supports. */ @RegisterForReflection(targets = io.vertx.kafka.client.consumer.impl.KafkaReadStreamImpl.class) -public class HonoKafkaConsumer implements Lifecycle { +public class HonoKafkaConsumer implements Lifecycle, ServiceClient { /** * The default timeout to use when polling the broker for messages. @@ -102,11 +109,14 @@ public class HonoKafkaConsumer implements Lifecycle { * The pattern of topic names that this consumer subscribes to. */ protected final Pattern topicPattern; + /** + * This component's current life cycle state. + */ + protected final LifecycleStatus lifecycleStatus = new LifecycleStatus(); private final AtomicReference, Promise>> subscriptionUpdatedAndPartitionsAssignedPromiseRef = new AtomicReference<>(); private final AtomicBoolean pollingPaused = new AtomicBoolean(); private final AtomicBoolean recordFetchingPaused = new AtomicBoolean(); - private final AtomicBoolean stopCalled = new AtomicBoolean(); private Handler> recordHandler; private KafkaConsumer kafkaConsumer; @@ -225,7 +235,7 @@ protected HonoKafkaConsumer( */ public final void setRecordHandler(final Handler> handler) { Objects.requireNonNull(handler); - if (isStarted()) { + if (!lifecycleStatus.isStopped()) { throw new IllegalStateException("Record handler can only be set if consumer has not been started yet"); } this.recordHandler = handler; @@ -239,8 +249,8 @@ public final void setRecordHandler(final Handler> * @throws IllegalStateException if this consumer is already started. */ protected final void addTopic(final String topicName) { - Objects.requireNonNull(topics); - if (isStarted()) { + Objects.requireNonNull(topicName); + if (!lifecycleStatus.isStopped()) { throw new IllegalStateException("Topics can only be set if consumer has not been started yet"); } else if (topics == null) { throw new IllegalStateException("Cannot add topic on consumer which has been created with a topic pattern"); @@ -248,6 +258,17 @@ protected final void addTopic(final String topicName) { this.topics.add(topicName); } + /** + * Adds a handler to be invoked with a succeeded future once the Kafka consumer is ready to be used. + * + * @param handler The handler to invoke. The handler will never be invoked with a failed future. + */ + public final void addOnKafkaConsumerReadyHandler(final Handler> handler) { + if (handler != null) { + lifecycleStatus.addOnStartedHandler(handler); + } + } + /** * Sets a handler to be invoked on the vert.x event loop thread when partitions have been assigned * as part of a rebalance. @@ -492,74 +513,132 @@ protected final String getClientId() { } /** - * Checks if this consumer has been started already. + * {@inheritDoc} + *

+ * Does nothing. + */ + @Override + public void registerLivenessChecks(final HealthCheckHandler livenessHandler) { + } + + /** + * {@inheritDoc} + *

+ * Registers a check for the Kafka consumer being ready to be used. + */ + @Override + public void registerReadinessChecks(final HealthCheckHandler readinessHandler) { + readinessHandler.register( + "notification-kafka-consumer-creation-%s".formatted(UUID.randomUUID()), + status -> status.tryComplete(new Status().setOk(lifecycleStatus.isStarted()))); + } + + /** + * Checks if this consumer is ready to be used. * - * @return {@code true} if this consumer's start method has been invoked already. + * @return A response indicating if this consumer is ready to be used (UP) or not (DOWN). */ - protected final boolean isStarted() { - return context != null; + public final HealthCheckResponse checkReadiness() { + return HealthCheckResponse.builder() + .name("kafka-consumer-status") + .status(lifecycleStatus.isStarted()) + .build(); + } + + private Future> initConsumer(final KafkaConsumer consumer) { + + final Promise> initResult = Promise.promise(); + + Optional.ofNullable(metricsSupport).ifPresent(ms -> ms.registerKafkaConsumer(consumer.unwrap())); + consumer.handler(record -> { + if (!initResult.future().isComplete()) { + LOG.debug(""" + postponing record handling until consumer has been initialized \ + [topic: {}, partition: {}, offset: {}] + """, + record.topic(), record.partition(), record.offset()); + } + initResult.future().onSuccess(ok -> { + if (respectTtl && KafkaRecordHelper.isTtlElapsed(record.headers())) { + onRecordHandlerSkippedForExpiredRecord(record); + } else { + try { + recordHandler.handle(record); + } catch (final Exception e) { + LOG.warn("error handling record [topic: {}, partition: {}, offset: {}, headers: {}]", + record.topic(), record.partition(), record.offset(), record.headers(), e); + } + } + }); + }); + consumer.batchHandler(this::onBatchOfRecordsReceived); + consumer.exceptionHandler(error -> LOG.error("consumer error occurred [client-id: {}]", getClientId(), error)); + installRebalanceListeners(); + // let polls finish quickly until initConsumer() is completed + consumer.asStream().pollTimeout(Duration.ofMillis(10)); + // subscribe and wait for re-balance to make sure that when initConsumer() completes, + // the consumer is actually ready to receive records already + subscribeAndWaitForRebalance() + .onSuccess(ok -> { + consumer.asStream().pollTimeout(pollTimeout); + logSubscribedTopicsWhenConsumerIsReady(); + initResult.complete(consumer); + }) + .onFailure(initResult::fail); + return initResult.future(); } + /** + * {@inheritDoc} + *

+ * This methods triggers the creation of a Kafka consumer in the background. A new attempt to create the + * consumer is made periodically until creation succeeds or the {@link #stop()} method has been invoked. + *

+ * Client code may {@linkplain #addOnKafkaConsumerReadyHandler(Handler) register a dedicated handler} + * to be notified once the consumer is up and running. + * + * @return A future indicating the outcome of the operation. + * The future will be failed with an {@link IllegalStateException} if the record handler is not set + * or if this component is already started or is in the process of being stopped. + * Note that the successful completion of the returned future does not mean that the consumer will be + * ready to receive messages from the broker. + */ @Override public Future start() { + if (recordHandler == null) { throw new IllegalStateException("Record handler must be set"); } + if (lifecycleStatus.isStarting()) { + LOG.debug("already starting consumer"); + return Future.succeededFuture(); + } else if (!lifecycleStatus.setStarting()) { + return Future.failedFuture(new IllegalStateException("consumer is already started/stopping")); + } + context = vertx.getOrCreateContext(); - final Promise startPromise = Promise.promise(); + final Supplier> consumerSupplier = () -> Optional.ofNullable(kafkaConsumerSupplier) + .map(s -> KafkaConsumer.create(vertx, s.get())) + .orElseGet(() -> KafkaConsumer.create(vertx, consumerConfig)); + runOnContext(v -> { - // create KafkaConsumer here so that it is created in the Vert.x context of the start() method (KafkaConsumer uses vertx.getOrCreateContext()) - Optional.ofNullable(kafkaConsumerSupplier) - .map(supplier -> Future.succeededFuture(KafkaConsumer.create(vertx, supplier.get()))) - .orElseGet(() -> { - final KafkaClientFactory kafkaClientFactory = new KafkaClientFactory(vertx); - return kafkaClientFactory.createKafkaConsumerWithRetries(consumerConfig, - consumerCreationRetriesTimeout); - }) - .onFailure(thr -> { - LOG.error("error creating consumer [client-id: {}]", getClientId(), thr); - startPromise.fail(thr); - }) - .onSuccess(consumer -> { - kafkaConsumer = consumer; - Optional.ofNullable(metricsSupport).ifPresent(ms -> ms.registerKafkaConsumer(kafkaConsumer.unwrap())); - kafkaConsumer.handler(record -> { - if (!startPromise.future().isComplete()) { - LOG.debug("postponing record handling until start() is completed [topic: {}, partition: {}, offset: {}]", - record.topic(), record.partition(), record.offset()); - } - startPromise.future().onSuccess(v2 -> { - if (respectTtl && KafkaRecordHelper.isTtlElapsed(record.headers())) { - onRecordHandlerSkippedForExpiredRecord(record); - } else { - try { - recordHandler.handle(record); - } catch (final Exception e) { - LOG.warn("error handling record [topic: {}, partition: {}, offset: {}, headers: {}]", - record.topic(), record.partition(), record.offset(), record.headers(), e); - } - } - }); - }); - kafkaConsumer.batchHandler(this::onBatchOfRecordsReceived); - kafkaConsumer.exceptionHandler(error -> LOG.error("consumer error occurred [client-id: {}]", - getClientId(), error)); - installRebalanceListeners(); - // subscribe and wait for rebalance to make sure that when start() completes, - // the consumer is actually ready to receive records already - kafkaConsumer.asStream().pollTimeout(Duration.ofMillis(10)); // let polls finish quickly until start() is completed - subscribeAndWaitForRebalance() - .onSuccess(v2 -> { - kafkaConsumer.asStream().pollTimeout(pollTimeout); - logSubscribedTopicsOnStartComplete(); - }) - .onComplete(startPromise); - }); + // create KafkaConsumer here so that it is created in the Vert.x context of the start() method + // (KafkaConsumer uses vertx.getOrCreateContext()) + final KafkaClientFactory kafkaClientFactory = new KafkaClientFactory(vertx); + kafkaClientFactory.createClientWithRetries( + consumerSupplier, + lifecycleStatus::isStarting, + consumerConfig.get(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG), + consumerCreationRetriesTimeout) + .onFailure(t -> LOG.error("error creating consumer [client-id: {}]", getClientId(), t)) + .onSuccess(consumer -> kafkaConsumer = consumer) + .compose(this::initConsumer) + .onSuccess(c -> lifecycleStatus.setStarted()); }); - return startPromise.future(); + return Future.succeededFuture(); } - private void logSubscribedTopicsOnStartComplete() { + private void logSubscribedTopicsWhenConsumerIsReady() { if (topicPattern != null) { if (subscribedTopicPatternTopics.size() <= 5) { LOG.debug("consumer started, subscribed to topic pattern [{}], matching topics: {}", topicPattern, @@ -753,7 +832,7 @@ private void updateSubscribedTopicPatternTopicsAndRemoveMetrics() { } private Future subscribeAndWaitForRebalance() { - if (stopCalled.get()) { + if (lifecycleStatus.isStopping() || lifecycleStatus.isStopped()) { return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "already stopped")); } final Promise partitionAssignmentDone = Promise.promise(); @@ -868,21 +947,32 @@ private void onPartitionsLost(final Set partitionsSet) { } } + /** + * {@inheritDoc} + *

+ * Closes the Kafka consumer. + * + * @return A future indicating the outcome of the operation. + * The future will be succeeded once this component is stopped. + */ @Override public Future stop() { - if (pollPauseTimeoutTimerId != null) { - vertx.cancelTimer(pollPauseTimeoutTimerId); - pollPauseTimeoutTimerId = null; - } - if (kafkaConsumer == null) { - return Future.failedFuture("not started"); - } else if (!stopCalled.compareAndSet(false, true)) { - LOG.trace("stop already called"); - return Future.succeededFuture(); - } - return kafkaConsumer.close() - .onComplete(ar -> Optional.ofNullable(metricsSupport) - .ifPresent(ms -> ms.unregisterKafkaConsumer(kafkaConsumer.unwrap()))); + + return lifecycleStatus.runStopAttempt(() -> { + if (pollPauseTimeoutTimerId != null) { + vertx.cancelTimer(pollPauseTimeoutTimerId); + pollPauseTimeoutTimerId = null; + } + + return Optional.ofNullable(kafkaConsumer) + .map(consumer -> consumer.close() + .onComplete(ar -> { + Optional.ofNullable(metricsSupport) + .ifPresent(ms -> ms.unregisterKafkaConsumer(kafkaConsumer.unwrap())); + })) + .orElseGet(Future::succeededFuture) + .onFailure(t -> LOG.info("error stopping Kafka consumer", t)); + }); } /** @@ -915,9 +1005,9 @@ protected void runOnKafkaWorkerThread(final Handler handler) { if (kafkaConsumerWorker == null) { throw new IllegalStateException(MSG_CONSUMER_NOT_INITIALIZED_STARTED); } - if (!stopCalled.get()) { + if (lifecycleStatus.isStarted()) { kafkaConsumerWorker.submit(() -> { - if (!stopCalled.get()) { + if (lifecycleStatus.isStarted()) { try { handler.handle(null); } catch (final Exception ex) { @@ -996,10 +1086,8 @@ public final Future ensureTopicIsAmongSubscribedTopicPatternTopics(final S } else if (!topicPattern.matcher(topic).find()) { throw new IllegalArgumentException("topic doesn't match pattern"); } - if (kafkaConsumer == null) { + if (!lifecycleStatus.isStarted()) { return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_INTERNAL_ERROR, "not started")); - } else if (stopCalled.get()) { - return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "already stopped")); } // check whether topic exists and its existence has been applied to the wildcard subscription yet; // use previously updated topics list (less costly than invoking kafkaConsumer.subscription() here) diff --git a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/AbstractKafkaBasedMessageSender.java b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/AbstractKafkaBasedMessageSender.java index 001cab8822..3b07505436 100644 --- a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/AbstractKafkaBasedMessageSender.java +++ b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/AbstractKafkaBasedMessageSender.java @@ -29,6 +29,7 @@ import org.eclipse.hono.client.util.ServiceClient; import org.eclipse.hono.tracing.TracingHelper; import org.eclipse.hono.util.Lifecycle; +import org.eclipse.hono.util.LifecycleStatus; import org.eclipse.hono.util.MessageHelper; import org.eclipse.hono.util.MessagingClient; import org.eclipse.hono.util.MessagingType; @@ -41,7 +42,9 @@ import io.opentracing.SpanContext; import io.opentracing.Tracer; import io.opentracing.tag.Tags; +import io.vertx.core.AsyncResult; import io.vertx.core.Future; +import io.vertx.core.Handler; import io.vertx.core.json.EncodeException; import io.vertx.core.json.Json; import io.vertx.ext.healthchecks.HealthCheckHandler; @@ -53,6 +56,8 @@ /** * A client for publishing messages to a Kafka cluster. + *

+ * Wraps a vert.x Kafka producer that is created during startup. * * @param The type of payload supported by this sender. */ @@ -67,14 +72,15 @@ public abstract class AbstractKafkaBasedMessageSender implements MessagingCli * An OpenTracing tracer to be shared with subclasses. */ protected final Tracer tracer; + /** + * This component's current life cycle state. + */ + protected final LifecycleStatus lifecycleStatus = new LifecycleStatus(); private final KafkaProducerConfigProperties config; private final KafkaProducerFactory producerFactory; private final String producerName; - private boolean stopped = false; - private boolean producerCreated = false; - /** * Creates a new Kafka-based message sender. * @@ -105,48 +111,78 @@ public final MessagingType getMessagingType() { return MessagingType.kafka; } + /** + * Adds a handler to be invoked with a succeeded future once the Kafka producer is ready to be used. + * + * @param handler The handler to invoke. The handler will never be invoked with a failed future. + */ + public final void addOnKafkaProducerReadyHandler(final Handler> handler) { + if (handler != null) { + lifecycleStatus.addOnStartedHandler(handler); + } + } + /** * {@inheritDoc} *

- * Starts the producer. + * Registers a procedure for checking if this client's initial Kafka client creation succeeded. */ @Override - public Future start() { - stopped = false; - - return Future.succeededFuture() - .map(v -> getOrCreateProducer()) // enclosed in map() to catch exceptions - .onSuccess(v -> producerCreated = true) - .recover(thr -> { - if (KafkaClientFactory.isRetriableClientCreationError(thr, config.getBootstrapServers())) { - // retry client creation in the background - getOrCreateProducerWithRetries() - .onSuccess(v -> producerCreated = true); - return Future.succeededFuture(); - } - return Future.failedFuture(thr); - }) - .mapEmpty(); + public void registerReadinessChecks(final HealthCheckHandler readinessHandler) { + // verify that client creation succeeded + readinessHandler.register( + "%s-kafka-producer-creation-%s".formatted(producerName, UUID.randomUUID()), + status -> status.tryComplete(new Status().setOk(lifecycleStatus.isStarted()))); + } + + @Override + public void registerLivenessChecks(final HealthCheckHandler livenessHandler) { + // no liveness checks to be added } /** * {@inheritDoc} *

- * Closes the producer. + * This methods triggers the creation of a Kafka producer in the background. A new attempt to create the + * producer is made once a second until creation succeeds or the {@link #stop()} method has been invoked. + *

+ * Client code may {@linkplain #addOnKafkaProducerReadyHandler(Handler) register a dedicated handler} + * to be notified once the producer has been created successfully. + * + * @return A succeeded future. Note that the successful completion of the returned future does not + * mean that the producer will be ready to send messages to the broker. */ @Override - public Future stop() { - stopped = true; - return producerFactory.closeProducer(producerName); + public Future start() { + + if (lifecycleStatus.isStarting()) { + return Future.succeededFuture(); + } else if (!lifecycleStatus.setStarting()) { + return Future.failedFuture(new IllegalStateException("sender is already started/stopping")); + } + + producerFactory.getOrCreateProducerWithRetries( + producerName, + config, + lifecycleStatus::isStarting, + KafkaClientFactory.UNLIMITED_RETRIES_DURATION) + .onSuccess(producer -> lifecycleStatus.setStarted()); + + return Future.succeededFuture(); } /** - * Checks if this sender has been stopped. + * {@inheritDoc} + *

+ * Closes the producer. * - * @return {@code true} if this sender's {@link #stop()} method has been called already. + * @return A future indicating the outcome of the operation. + * The future will be succeeded once this component is stopped. */ - protected final boolean isStopped() { - return stopped; + @Override + public Future stop() { + + return lifecycleStatus.runStopAttempt(this::stopProducer); } /** @@ -220,8 +256,8 @@ protected final Future sendAndWaitForOutcome( Objects.requireNonNull(headers); Objects.requireNonNull(currentSpan); - if (isStopped()) { - return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "sender already stopped")); + if (!lifecycleStatus.isStarted()) { + return Future.failedFuture(new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "sender not started")); } final KafkaProducerRecord record = KafkaProducerRecord.create(topic, deviceId, payload); @@ -249,27 +285,13 @@ protected final KafkaProducer getOrCreateProducer() { return producerFactory.getOrCreateProducer(producerName, config); } - private Future> getOrCreateProducerWithRetries() { - return producerFactory.getOrCreateProducerWithRetries(producerName, config, - KafkaClientFactory.UNLIMITED_RETRIES_DURATION); - } - /** - * {@inheritDoc} - *

- * Registers a procedure for checking if this client's initial Kafka client creation succeeded. + * Closes the producer used for sending records. + * + * @return The outcome of the attempt to close the producer. */ - @Override - public void registerReadinessChecks(final HealthCheckHandler readinessHandler) { - // verify that client creation succeeded - readinessHandler.register( - String.format("%s-kafka-client-creation-%s", producerName, UUID.randomUUID()), - status -> status.tryComplete(producerCreated ? Status.OK() : Status.KO())); - } - - @Override - public void registerLivenessChecks(final HealthCheckHandler livenessHandler) { - // no liveness checks to be added + protected final Future stopProducer() { + return producerFactory.closeProducer(producerName); } /** diff --git a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/CachingKafkaProducerFactory.java b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/CachingKafkaProducerFactory.java index dab222f71f..80e4e8cfe5 100644 --- a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/CachingKafkaProducerFactory.java +++ b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/CachingKafkaProducerFactory.java @@ -15,10 +15,12 @@ import java.time.Duration; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; +import java.util.function.Supplier; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.errors.AuthorizationException; @@ -183,24 +185,34 @@ public KafkaProducer getOrCreateProducer(final String producerName, public Future> getOrCreateProducerWithRetries( final String producerName, final KafkaProducerConfigProperties config, + final Supplier keepTrying, final Duration retriesTimeout) { + Objects.requireNonNull(producerName); + Objects.requireNonNull(config); + Objects.requireNonNull(keepTrying); + final String bootstrapServersConfig = config.getBootstrapServers(); return kafkaClientFactory.createClientWithRetries( () -> getOrCreateProducer(producerName, config), + keepTrying, bootstrapServersConfig, retriesTimeout); } - private Handler getExceptionHandler(final String producerName, final KafkaProducer producer, final String clientId) { + private Handler getExceptionHandler( + final String producerName, + final KafkaProducer producer, + final String clientId) { + return t -> { // this is executed asynchronously after the send operation has finished if (isFatalError(t)) { LOG.error("fatal producer error occurred, closing producer [clientId: {}]", clientId, t); activeProducers.remove(producerName); producer.close() - .onComplete(ar -> Optional.ofNullable(metricsSupport) - .ifPresent(ms -> ms.unregisterKafkaProducer(producer.unwrap()))); + .onComplete(ar -> Optional.ofNullable(metricsSupport) + .ifPresent(ms -> ms.unregisterKafkaProducer(producer.unwrap()))); } else { LOG.error("producer error occurred [clientId: {}]", clientId, t); } diff --git a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/KafkaProducerFactory.java b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/KafkaProducerFactory.java index 9811df1bbf..65d8f252f0 100644 --- a/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/KafkaProducerFactory.java +++ b/clients/kafka-common/src/main/java/org/eclipse/hono/client/kafka/producer/KafkaProducerFactory.java @@ -15,6 +15,7 @@ import java.time.Duration; import java.util.Optional; +import java.util.function.Supplier; import org.apache.kafka.clients.CommonClientConfigs; import org.eclipse.hono.client.kafka.metrics.KafkaClientMetricsSupport; @@ -82,12 +83,18 @@ public interface KafkaProducerFactory { * * @param producerName The name to identify the producer. * @param config The Kafka configuration with which the producer is to be created. + * @param keepTrying A guard condition that controls whether another attempt to create the producer should be + * started. Client code can set this to {@code false} in order to prevent any further attempts. * @param retriesTimeout The maximum time for which retries are done. Using a negative duration or {@code null} * here is interpreted as an unlimited timeout value. * @return A future with an existing or new producer. + * @throws NullPointerException if any of the parameters except retries timeout are {@code null}. */ - Future> getOrCreateProducerWithRetries(String producerName, - KafkaProducerConfigProperties config, Duration retriesTimeout); + Future> getOrCreateProducerWithRetries( + String producerName, + KafkaProducerConfigProperties config, + Supplier keepTrying, + Duration retriesTimeout); /** * Closes the producer with the given producer name if it exists. diff --git a/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/KafkaClientFactoryTest.java b/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/KafkaClientFactoryTest.java index 163e49c55a..f045453cdf 100644 --- a/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/KafkaClientFactoryTest.java +++ b/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/KafkaClientFactoryTest.java @@ -21,6 +21,7 @@ import java.time.Duration; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; @@ -62,11 +63,11 @@ public void testClientCreationIsDoneWithRetries() { return client; }; kafkaClientFactory.createClientWithRetries(clientSupplier, bootstrapServers, Duration.ofSeconds(20)) - .onComplete(ar -> { - assertThat(ar.succeeded()).isTrue(); - assertThat(ar.result()).isEqualTo(client); - assertThat(creationAttempts.get()).isEqualTo(expectedCreationAttempts); - }); + .onComplete(ar -> { + assertThat(ar.succeeded()).isTrue(); + assertThat(ar.result()).isEqualTo(client); + assertThat(creationAttempts.get()).isEqualTo(expectedCreationAttempts); + }); } /** @@ -79,7 +80,7 @@ public void testClientCreationFailsForInvalidServersEntry() { // contains entry with missing port final String invalidBootstrapServers = "some.hostname.that.is.invalid, some.hostname.that.is.invalid:9094"; - final Map clientConfig = Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, invalidBootstrapServers); + final var clientConfig = Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, invalidBootstrapServers); final AtomicInteger creationAttempts = new AtomicInteger(); final Supplier clientSupplier = () -> { @@ -128,6 +129,40 @@ public void testClientCreationFailsAfterTimeoutReached() { }); } + /** + * Verifies that trying to create a client fails after client code has canceled further attempts. + */ + @Test + public void testClientCreationFailsAfterCanceling() { + VertxMockSupport.runTimersImmediately(vertx); + final KafkaClientFactory kafkaClientFactory = new KafkaClientFactory(vertx); + + final String bootstrapServers = "some.hostname.that.is.invalid:9094"; + final var clientConfig = Map.of(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); + + final var keepTrying = new AtomicBoolean(true); + final var creationAttempts = new AtomicInteger(); + final int expectedCreationAttempts = 3; + final Supplier clientSupplier = () -> { + if (creationAttempts.incrementAndGet() == expectedCreationAttempts) { + // let following retries be skipped by canceling further attempts + keepTrying.set(false); + } + Admin.create(new HashMap<>(clientConfig)); + throw new AssertionError("admin client creation should have thrown exception"); + }; + kafkaClientFactory.createClientWithRetries( + clientSupplier, + keepTrying::get, + bootstrapServers, + KafkaClientFactory.UNLIMITED_RETRIES_DURATION) + .onComplete(ar -> { + assertThat(ar.succeeded()).isFalse(); + // only the number of retries before changing the clock should have been done here + assertThat(creationAttempts.get()).isEqualTo(expectedCreationAttempts); + }); + } + /** * Verifies that trying to create a client with an unresolvable URL fails if the retry timeout is set to zero. */ diff --git a/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/consumer/AsyncHandlingAutoCommitKafkaConsumerTest.java b/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/consumer/AsyncHandlingAutoCommitKafkaConsumerTest.java index 3a687e43a2..4b31cc1e12 100644 --- a/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/consumer/AsyncHandlingAutoCommitKafkaConsumerTest.java +++ b/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/consumer/AsyncHandlingAutoCommitKafkaConsumerTest.java @@ -140,6 +140,7 @@ public void testConsumerCreationWithTopicListSucceeds(final VertxTestContext ctx final Function, Future> handler = record -> Future.succeededFuture(); final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -147,7 +148,10 @@ public void testConsumerCreationWithTopicListSucceeds(final VertxTestContext ctx mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION)); consumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, Set.of(TOPIC), handler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeedingThenComplete()); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeedingThenComplete()); } /** @@ -160,6 +164,7 @@ public void testConsumerCreationWithTopicPatternSucceeds(final VertxTestContext final Function, Future> handler = record -> Future.succeededFuture(); final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -167,7 +172,10 @@ public void testConsumerCreationWithTopicPatternSucceeds(final VertxTestContext mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION)); consumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, TOPIC_PATTERN, handler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeedingThenComplete()); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeedingThenComplete()); } /** @@ -220,6 +228,7 @@ public void testConsumerRespectsMaxRecordsInProcessingLimit(final VertxTestConte final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); consumerConfig.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Integer.toString(maxPollRecords)); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -227,24 +236,27 @@ public void testConsumerRespectsMaxRecordsInProcessingLimit(final VertxTestConte mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION)); consumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, Set.of(TOPIC), recordHandler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(v2 -> { - // schedule the poll tasks - schedulePollTasksWithConsumerPausedCheck(offsetCounter, numRecordsPerBatch, testBatchesToAdd); - final long timerId = vertx.setTimer(8000, tid -> { - LOG.info("received records:\n{}", - receivedRecords.stream().map(Object::toString).collect(Collectors.joining(",\n"))); - allRecordsReceivedPromise.tryFail(String.format("only received %d out of %d expected messages after 8s", - uncompletedRecordHandlingPromises.size(), numRecords)); - }); - allRecordsReceivedPromise.future().onComplete(ctx.succeeding(v -> { - vertx.cancelTimer(timerId); - ctx.verify(() -> { - assertWithMessage("observed max no. of records in processing") - .that(observedMaxRecordsInProcessing.get()).isEqualTo(maxRecordsInProcessing); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v2 -> { + // schedule the poll tasks + schedulePollTasksWithConsumerPausedCheck(offsetCounter, numRecordsPerBatch, testBatchesToAdd); + final long timerId = vertx.setTimer(8000, tid -> { + LOG.info("received records:\n{}", + receivedRecords.stream().map(Object::toString).collect(Collectors.joining(",\n"))); + allRecordsReceivedPromise.tryFail(String.format("only received %d out of %d expected messages after 8s", + uncompletedRecordHandlingPromises.size(), numRecords)); }); - ctx.completeNow(); + allRecordsReceivedPromise.future().onComplete(ctx.succeeding(v -> { + vertx.cancelTimer(timerId); + ctx.verify(() -> { + assertWithMessage("observed max no. of records in processing") + .that(observedMaxRecordsInProcessing.get()).isEqualTo(maxRecordsInProcessing); + }); + ctx.completeNow(); + })); })); - })); } /** @@ -305,6 +317,7 @@ public void testConsumerCommitsOffsetsOnRebalance(final VertxTestContext ctx) th consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000"); // periodic commit shall not play a role here consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0"); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -312,13 +325,16 @@ public void testConsumerCommitsOffsetsOnRebalance(final VertxTestContext ctx) th mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION)); consumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, Set.of(TOPIC), handler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(v2 -> { - mockConsumer.schedulePollTask(() -> { - IntStream.range(0, numTestRecords).forEach(offset -> { - mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v2 -> { + mockConsumer.schedulePollTask(() -> { + IntStream.range(0, numTestRecords).forEach(offset -> { + mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + }); }); - }); - })); + })); assertWithMessage("records received in 5s") .that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)) .isTrue(); @@ -412,6 +428,7 @@ public void testConsumerCommitsOffsetsOnRebalanceAfterWaitingForRecordCompletion consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000"); // periodic commit shall not play a role here consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "21000"); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -429,16 +446,19 @@ protected void onPartitionsRevokedBlocking( } }; consumer.setKafkaConsumerSupplier(() -> mockConsumer); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); final Context consumerVertxContext = vertx.getOrCreateContext(); consumerVertxContext.runOnContext(v -> { - consumer.start().onComplete(ctx.succeeding(v2 -> { - mockConsumer.schedulePollTask(() -> { - IntStream.range(0, numTestRecords).forEach(offset -> { - mockConsumer.addRecord( - new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v2 -> { + mockConsumer.schedulePollTask(() -> { + IntStream.range(0, numTestRecords).forEach(offset -> { + mockConsumer.addRecord( + new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + }); }); - }); - })); + })); }); assertWithMessage("records received in 5s") .that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)) @@ -504,6 +524,7 @@ public void testConsumerCommitsOffsetsOnStop(final VertxTestContext ctx) throws consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000"); // periodic commit shall not play a role here consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0"); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -511,13 +532,16 @@ public void testConsumerCommitsOffsetsOnStop(final VertxTestContext ctx) throws mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION)); consumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, Set.of(TOPIC), handler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(v2 -> { - mockConsumer.schedulePollTask(() -> { - IntStream.range(0, numTestRecords).forEach(offset -> { - mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v2 -> { + mockConsumer.schedulePollTask(() -> { + IntStream.range(0, numTestRecords).forEach(offset -> { + mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + }); }); - }); - })); + })); assertWithMessage("records received in 5s") .that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)) .isTrue(); @@ -563,6 +587,7 @@ public void testConsumerCommitsOffsetsPeriodically(final VertxTestContext ctx) { // 1000ms commit interval - keep the value not too low, // otherwise the frequent commit task on the event loop thread will prevent the test main thread from getting things done consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -571,11 +596,14 @@ public void testConsumerCommitsOffsetsPeriodically(final VertxTestContext ctx) { consumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, Set.of(TOPIC), handler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(v2 -> { - mockConsumer.schedulePollTask(() -> { - mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, 0, "key_0", Buffer.buffer())); - }); - })); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v2 -> { + mockConsumer.schedulePollTask(() -> { + mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, 0, "key_0", Buffer.buffer())); + }); + })); testRecordsReceived.future().onComplete(v -> { // we have no hook to integrate into for the commit check // therefore do the check multiple times with some delay in between @@ -621,6 +649,7 @@ public void testConsumerCommitsInitialOffset(final VertxTestContext ctx) throws // 1000ms commit interval - keep the value not too low, // otherwise the frequent commit task on the event loop thread will prevent the test main thread from getting things done consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -632,9 +661,12 @@ public void testConsumerCommitsInitialOffset(final VertxTestContext ctx) throws consumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, Set.of(TOPIC), handler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); consumer.setOnRebalanceDoneHandler(s -> consumerStartedCheckpoint.flag()); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); vertx.getOrCreateContext().runOnContext(v -> { - consumer.start().onSuccess(v2 -> consumerStartedCheckpoint.flag()); - }); + consumer.start() + .compose(ok -> readyTracker.future()) + .onSuccess(v2 -> consumerStartedCheckpoint.flag()); + }); assertWithMessage("consumer started in 5s") .that(consumerStartedCtx.awaitCompletion(5, TimeUnit.SECONDS)) .isTrue(); @@ -723,6 +755,7 @@ public void testScenarioWithPartitionRevokedWhileHandlingIncomplete(final VertxT consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000"); // periodic commit shall not play a role here consumerConfig.put(AsyncHandlingAutoCommitKafkaConsumer.CONFIG_HONO_OFFSETS_COMMIT_RECORD_COMPLETION_TIMEOUT_MILLIS, "0"); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -730,14 +763,17 @@ public void testScenarioWithPartitionRevokedWhileHandlingIncomplete(final VertxT mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(TOPIC_PARTITION)); consumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, Set.of(TOPIC), handler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(v2 -> { - mockConsumer.schedulePollTask(() -> { - IntStream.range(0, numTestRecords).forEach(offset -> { - mockConsumer.addRecord( - new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v2 -> { + mockConsumer.schedulePollTask(() -> { + IntStream.range(0, numTestRecords).forEach(offset -> { + mockConsumer.addRecord( + new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + }); }); - }); - })); + })); assertWithMessage("records received in 5s") .that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)) .isTrue(); @@ -791,6 +827,7 @@ public void testConsumerCommitsOffsetsOfSkippedExpiredRecords(final VertxTestCon final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); consumerConfig.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "300000"); // periodic commit shall not play a role here + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(TOPIC_PARTITION, 0L)); mockConsumer.updateEndOffsets(Map.of(TOPIC_PARTITION, 0L)); @@ -804,17 +841,20 @@ protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord< } }; consumer.setKafkaConsumerSupplier(() -> mockConsumer); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); final Context consumerVertxContext = vertx.getOrCreateContext(); consumerVertxContext.runOnContext(v -> { - consumer.start().onComplete(ctx.succeeding(v2 -> { - mockConsumer.schedulePollTask(() -> { - // add record with elapsed ttl - mockConsumer.addRecord(createRecordWithElapsedTtl()); - IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> { - mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v2 -> { + mockConsumer.schedulePollTask(() -> { + // add record with elapsed ttl + mockConsumer.addRecord(createRecordWithElapsedTtl()); + IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> { + mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + }); }); - }); - })); + })); }); assertWithMessage("records received in 5s") .that(receivedRecordsCtx.awaitCompletion(5, TimeUnit.SECONDS)) diff --git a/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/consumer/HonoKafkaConsumerTest.java b/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/consumer/HonoKafkaConsumerTest.java index 7360459ca7..5e64c54ac0 100644 --- a/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/consumer/HonoKafkaConsumerTest.java +++ b/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/consumer/HonoKafkaConsumerTest.java @@ -13,6 +13,10 @@ package org.eclipse.hono.client.kafka.consumer; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static com.google.common.truth.Truth.assertThat; @@ -22,17 +26,23 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import java.util.regex.Pattern; import java.util.stream.IntStream; +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.OffsetResetStrategy; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.record.TimestampType; +import org.eclipse.hono.client.kafka.KafkaClientFactory; import org.eclipse.hono.kafka.test.KafkaMockConsumer; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -44,6 +54,7 @@ import org.slf4j.LoggerFactory; import io.vertx.core.Handler; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; import io.vertx.core.json.Json; @@ -89,7 +100,7 @@ public void setUp(final Vertx vertx, final TestInfo testInfo) { mockConsumer = new KafkaMockConsumer<>(OffsetResetStrategy.LATEST); consumerConfigProperties = new MessagingKafkaConsumerConfigProperties(); - consumerConfigProperties.setConsumerConfig(Map.of(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "servers")); + consumerConfigProperties.setConsumerConfig(Map.of(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "server:9092")); } /** @@ -108,12 +119,49 @@ public void stopConsumer() { */ @Test public void testConsumerCreationFailsForMissingGroupId() { - final Handler> handler = record -> {}; - final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); + final var consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); assertThrows(IllegalArgumentException.class, - () -> new HonoKafkaConsumer<>(vertx, Set.of("test"), handler, consumerConfig)); + () -> new HonoKafkaConsumer<>(vertx, Set.of("test"), r -> {}, consumerConfig)); + } + + @Test + void testStartSucceedsEvenIfConsumerIsNotReadyYet(final VertxTestContext ctx) { + final var consumerConfig = consumerConfigProperties.getConsumerConfig("test"); + consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + final Promise readyTracker = Promise.promise(); + + mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L)); + mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L)); + mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition)); + + final var error = new KafkaException(new ConfigException(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)); + @SuppressWarnings("unchecked") + final Supplier> consumerSupplier = mock(Supplier.class); + when(consumerSupplier.get()) + // first attempt to create Kafka client fails because broker names cannot be resolved + .thenThrow(error) + // second attempt succeeds + .thenReturn(mockConsumer); + + consumer = new HonoKafkaConsumer<>(vertx, Set.of(TOPIC), r -> {}, consumerConfig); + consumer.setKafkaConsumerSupplier(consumerSupplier); + consumer.setConsumerCreationRetriesTimeout(KafkaClientFactory.UNLIMITED_RETRIES_DURATION); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .onFailure(ctx::failNow) + .compose(ok -> { + ctx.verify(() -> assertThat(readyTracker.future().isComplete()).isFalse()); + return readyTracker.future(); + }) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> { + assertThat(consumer.getKafkaConsumer()).isNotNull(); + verify(consumerSupplier, times(2)).get(); + }); + ctx.completeNow(); + })); } @Test @@ -170,16 +218,25 @@ void testSetRecordHandlerFailsIfConsumerIsStarted() { */ @Test public void testConsumerCreationWithTopicListSucceeds(final VertxTestContext ctx) { - final Handler> handler = record -> {}; - final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); + final var consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L)); mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L)); mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition)); - consumer = new HonoKafkaConsumer<>(vertx, Set.of(TOPIC), handler, consumerConfig); + + consumer = new HonoKafkaConsumer<>(vertx, Set.of(TOPIC), r -> {}, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeedingThenComplete()); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> { + assertThat(consumer.getKafkaConsumer()).isNotNull(); + }); + ctx.completeNow(); + })); } /** @@ -189,27 +246,32 @@ public void testConsumerCreationWithTopicListSucceeds(final VertxTestContext ctx */ @Test public void testConsumerCreationWithTopicPatternSucceeds(final VertxTestContext ctx) { - final Handler> handler = record -> {}; - final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); + final var consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L, topic2Partition, 0L)); mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L, topic2Partition, 0L)); mockConsumer.updatePartitions(topicPartition, KafkaMockConsumer.DEFAULT_NODE); mockConsumer.updatePartitions(topic2Partition, KafkaMockConsumer.DEFAULT_NODE); mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition, topic2Partition)); - consumer = new HonoKafkaConsumer<>(vertx, TOPIC_PATTERN, handler, consumerConfig); + + consumer = new HonoKafkaConsumer<>(vertx, TOPIC_PATTERN, r -> {}, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(ok -> { - ctx.verify(() -> { - assertThat(consumer.getSubscribedTopicPatternTopics()).isEqualTo(Set.of(TOPIC, TOPIC2)); - assertThat(consumer.isAmongKnownSubscribedTopics(TOPIC)).isTrue(); - assertThat(consumer.isAmongKnownSubscribedTopics(TOPIC2)).isTrue(); - assertThat(consumer.ensureTopicIsAmongSubscribedTopicPatternTopics(TOPIC).succeeded()).isTrue(); - assertThat(consumer.ensureTopicIsAmongSubscribedTopicPatternTopics(TOPIC2).succeeded()).isTrue(); - }); - ctx.completeNow(); - })); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> { + assertThat(consumer.getKafkaConsumer()).isNotNull(); + assertThat(consumer.getSubscribedTopicPatternTopics()).containsExactly(TOPIC, TOPIC2); + assertThat(consumer.isAmongKnownSubscribedTopics(TOPIC)).isTrue(); + assertThat(consumer.isAmongKnownSubscribedTopics(TOPIC2)).isTrue(); + assertThat(consumer.ensureTopicIsAmongSubscribedTopicPatternTopics(TOPIC).succeeded()).isTrue(); + assertThat(consumer.ensureTopicIsAmongSubscribedTopicPatternTopics(TOPIC2).succeeded()).isTrue(); + }); + ctx.completeNow(); + })); } /** @@ -220,34 +282,39 @@ public void testConsumerCreationWithTopicPatternSucceeds(final VertxTestContext */ @Test public void testEnsureTopicIsAmongSubscribedTopicsSucceedsForAddedTopic(final VertxTestContext ctx) { - final Handler> handler = record -> {}; - final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); + final var consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L, topic2Partition, 0L)); mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L, topic2Partition, 0L)); mockConsumer.updatePartitions(topicPartition, KafkaMockConsumer.DEFAULT_NODE); mockConsumer.updatePartitions(topic2Partition, KafkaMockConsumer.DEFAULT_NODE); mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition, topic2Partition)); - consumer = new HonoKafkaConsumer<>(vertx, TOPIC_PATTERN, handler, consumerConfig); + + consumer = new HonoKafkaConsumer<>(vertx, TOPIC_PATTERN, r -> {}, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(ok -> { - ctx.verify(() -> { - assertThat(consumer.getSubscribedTopicPatternTopics()).isEqualTo(Set.of(TOPIC, TOPIC2)); - }); - // now update partitions with the one for topic3 - mockConsumer.updatePartitions(topic3Partition, KafkaMockConsumer.DEFAULT_NODE); - mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition, topic2Partition, topic3Partition)); - mockConsumer.updateBeginningOffsets(Map.of(topic3Partition, 0L)); - mockConsumer.updateEndOffsets(Map.of(topic3Partition, 0L)); - - consumer.ensureTopicIsAmongSubscribedTopicPatternTopics(TOPIC3).onComplete(ctx.succeeding(v3 -> { + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> { ctx.verify(() -> { - assertThat(consumer.getSubscribedTopicPatternTopics()).isEqualTo(Set.of(TOPIC, TOPIC2, TOPIC3)); + assertThat(consumer.getKafkaConsumer()).isNotNull(); + assertThat(consumer.getSubscribedTopicPatternTopics()).containsExactly(TOPIC, TOPIC2); }); - ctx.completeNow(); + // now update partitions with the one for topic3 + mockConsumer.updatePartitions(topic3Partition, KafkaMockConsumer.DEFAULT_NODE); + mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition, topic2Partition, topic3Partition)); + mockConsumer.updateBeginningOffsets(Map.of(topic3Partition, 0L)); + mockConsumer.updateEndOffsets(Map.of(topic3Partition, 0L)); + return consumer.ensureTopicIsAmongSubscribedTopicPatternTopics(TOPIC3); + }) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> { + assertThat(consumer.getSubscribedTopicPatternTopics()).containsExactly(TOPIC, TOPIC2, TOPIC3); + }); + ctx.completeNow(); })); - })); } /** @@ -262,21 +329,31 @@ public void testConsumerInvokesHandlerOnReceivedRecords(final VertxTestContext c final Handler> handler = record -> { receivedRecordsCheckpoint.flag(); }; - final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); + final var consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L)); mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L)); mockConsumer.setRebalancePartitionAssignmentAfterSubscribe(List.of(topicPartition)); consumer = new HonoKafkaConsumer<>(vertx, Set.of(TOPIC), handler, consumerConfig); consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(ok -> { - mockConsumer.schedulePollTask(() -> { - IntStream.range(0, numTestRecords).forEach(offset -> { - mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> assertThat(consumer.getKafkaConsumer()).isNotNull()); + mockConsumer.schedulePollTask(() -> { + IntStream.range(0, numTestRecords).forEach(offset -> { + mockConsumer.addRecord(new ConsumerRecord<>( + TOPIC, + PARTITION, + offset, + "key_" + offset, + Buffer.buffer("payload " + offset))); + }); }); - }); - })); + })); } /** @@ -292,8 +369,9 @@ public void testConsumerSkipsHandlerInvocationOnReceivingExpiredRecords(final Ve receivedRecordsCheckpoint.flag(); }; final Checkpoint expiredRecordCheckpoint = ctx.checkpoint(1); - final Map consumerConfig = consumerConfigProperties.getConsumerConfig("test"); + final var consumerConfig = consumerConfigProperties.getConsumerConfig("test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + final Promise readyTracker = Promise.promise(); mockConsumer.updateBeginningOffsets(Map.of(topicPartition, 0L)); mockConsumer.updateEndOffsets(Map.of(topicPartition, 0L)); @@ -305,15 +383,24 @@ protected void onRecordHandlerSkippedForExpiredRecord(final KafkaConsumerRecord< } }; consumer.setKafkaConsumerSupplier(() -> mockConsumer); - consumer.start().onComplete(ctx.succeeding(ok -> { - mockConsumer.schedulePollTask(() -> { - // add record with elapsed ttl - mockConsumer.addRecord(createRecordWithElapsedTtl()); - IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> { - mockConsumer.addRecord(new ConsumerRecord<>(TOPIC, PARTITION, offset, "key_" + offset, Buffer.buffer())); + consumer.addOnKafkaConsumerReadyHandler(readyTracker); + consumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> assertThat(consumer.getKafkaConsumer()).isNotNull()); + mockConsumer.schedulePollTask(() -> { + // add record with elapsed ttl + mockConsumer.addRecord(createRecordWithElapsedTtl()); + IntStream.range(1, numNonExpiredTestRecords + 1).forEach(offset -> { + mockConsumer.addRecord(new ConsumerRecord<>( + TOPIC, + PARTITION, + offset, + "key_" + offset, + Buffer.buffer("payload " + offset))); + }); }); - }); - })); + })); } private ConsumerRecord createRecordWithElapsedTtl() { diff --git a/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/producer/AbstractKafkaBasedMessageSenderTest.java b/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/producer/AbstractKafkaBasedMessageSenderTest.java index 3f70eddc10..2d2fb96489 100644 --- a/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/producer/AbstractKafkaBasedMessageSenderTest.java +++ b/clients/kafka-common/src/test/java/org/eclipse/hono/client/kafka/producer/AbstractKafkaBasedMessageSenderTest.java @@ -14,12 +14,19 @@ package org.eclipse.hono.client.kafka.producer; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import static com.google.common.truth.Truth.assertThat; +import java.net.HttpURLConnection; +import java.time.Duration; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.common.errors.AuthorizationException; @@ -35,10 +42,13 @@ import io.opentracing.Tracer; import io.opentracing.noop.NoopSpan; import io.opentracing.noop.NoopTracerFactory; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; +import io.vertx.junit5.Timeout; import io.vertx.junit5.VertxExtension; import io.vertx.junit5.VertxTestContext; +import io.vertx.kafka.client.producer.KafkaProducer; /** @@ -46,6 +56,7 @@ * */ @ExtendWith(VertxExtension.class) +@Timeout(value = 5, timeUnit = TimeUnit.SECONDS) public class AbstractKafkaBasedMessageSenderTest { private static final String PRODUCER_NAME = "test-producer"; @@ -65,12 +76,9 @@ public void setUp() { private CachingKafkaProducerFactory newProducerFactory( final MockProducer mockProducer) { - return CachingKafkaProducerFactory - .testFactory(vertxMock, (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); - } - - private AbstractKafkaBasedMessageSender newSender(final MockProducer mockProducer) { - return newSender(newProducerFactory(mockProducer)); + return CachingKafkaProducerFactory.testFactory( + vertxMock, + (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); } private AbstractKafkaBasedMessageSender newSender(final KafkaProducerFactory factory) { @@ -80,100 +88,103 @@ private AbstractKafkaBasedMessageSender newSender(final KafkaProducerFac /** * Verifies that on start up a producer is created which is closed on shut down. + * + * @param ctx The vert.x test context. */ @Test - public void testLifecycle() { + public void testLifecycle(final VertxTestContext ctx) { + final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(true); final var factory = newProducerFactory(mockProducer); final var sender = newSender(factory); + final Promise readyTracker = Promise.promise(); + sender.addOnKafkaProducerReadyHandler(readyTracker); assertThat(factory.getProducer(PRODUCER_NAME).isEmpty()).isTrue(); - sender.start(); - assertThat(factory.getProducer(PRODUCER_NAME).isPresent()).isTrue(); - sender.stop(); - assertThat(factory.getProducer(PRODUCER_NAME).isEmpty()).isTrue(); + sender.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> { + ctx.verify(() -> assertThat(factory.getProducer(PRODUCER_NAME).isPresent()) + .isTrue()); + return sender.stop(); + }) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> assertThat(factory.getProducer(PRODUCER_NAME).isPresent()).isFalse()); + ctx.completeNow(); + })); } - /** - * Verifies that the send method returns the underlying error wrapped in a {@link ServerErrorException}. - * - * @param ctx The vert.x test context. - */ + @SuppressWarnings("unchecked") @Test - public void testSendFailsWithTheExpectedError(final VertxTestContext ctx) { + void testStartSucceedsEvenIfProducerIsNotReadyYet(final VertxTestContext ctx) { + + final Promise> producer = Promise.promise(); + final KafkaProducerFactory factory = mock(KafkaProducerFactory.class); + when(factory.getOrCreateProducerWithRetries( + anyString(), + any(KafkaProducerConfigProperties.class), + any(Supplier.class), + any(Duration.class))) + .thenReturn(producer.future()); + final var sender = newSender(factory); + final Promise readyTracker = Promise.promise(); + sender.addOnKafkaProducerReadyHandler(readyTracker); + + sender.start() + .compose(ok -> { + ctx.verify(() -> assertThat(readyTracker.future().isComplete()).isFalse()); + // WHEN the creation of the producer finally succeeds + producer.complete(mock(KafkaProducer.class)); + return readyTracker.future(); + }) + .onComplete(ctx.succeedingThenComplete()); - // GIVEN a sender sending a message - final RuntimeException expectedError = new RuntimeException("boom"); + } + + private void testSendFails( + final RuntimeException sendError, + final int expectedErrorCode, + final boolean expectProducerToBeClosed) { + + final VertxTestContext ctx = new VertxTestContext(); + // GIVEN a sender final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(false); - final var sender = newSender(mockProducer); - - sender.sendAndWaitForOutcome("topic", "tenant", "device", null, Map.of(), NoopSpan.INSTANCE) - .onComplete(ctx.failing(t -> { - ctx.verify(() -> { - // THEN it fails with the expected error - assertThat(t).isInstanceOf(ServerErrorException.class); - assertThat(ServiceInvocationException.extractStatusCode(t)).isEqualTo(503); - assertThat(t.getCause()).isEqualTo(expectedError); - }); - ctx.completeNow(); - })); - - // WHEN the send operation fails - mockProducer.errorNext(expectedError); + final var factory = newProducerFactory(mockProducer); + final var sender = newSender(factory); + + // WHEN sending a message fails + final var result = sender.sendAndWaitForOutcome("topic", "tenant", "device", null, Map.of(), NoopSpan.INSTANCE); + mockProducer.errorNext(sendError); + + result.onComplete(ctx.failing(t -> { + ctx.verify(() -> { + // THEN client sees the expected error + assertThat(t).isInstanceOf(ServerErrorException.class); + assertThat(ServiceInvocationException.extractStatusCode(t)).isEqualTo(expectedErrorCode); + assertThat(t.getCause()).isEqualTo(sendError); + assertThat(factory.getProducer(PRODUCER_NAME).isEmpty()).isEqualTo(expectProducerToBeClosed); + assertThat(mockProducer.closed()).isEqualTo(expectProducerToBeClosed); + }); + ctx.completeNow(); + })); } /** - * Verifies that the producer is closed when sending of a message fails with a fatal error. - * - * @param ctx The vert.x test context. + * Verifies that the send method returns the underlying error wrapped in a {@link ServerErrorException}. */ @Test - public void testProducerIsClosedOnFatalError(final VertxTestContext ctx) { + public void testSendFailsWithTheExpectedError() { - final AuthorizationException expectedError = new AuthorizationException("go away"); - - // GIVEN a sender sending a message - final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(false); - final var factory = newProducerFactory(mockProducer); - newSender(factory).sendAndWaitForOutcome("topic", "tenant", "device", null, Map.of(), NoopSpan.INSTANCE) - .onComplete(ctx.failing(t -> { - ctx.verify(() -> { - // THEN the producer is removed and closed - assertThat(factory.getProducer(PRODUCER_NAME).isEmpty()).isTrue(); - assertThat(mockProducer.closed()).isTrue(); - }); - ctx.completeNow(); - })); - - // WHEN the send operation fails - mockProducer.errorNext(expectedError); + testSendFails(new RuntimeException("boom"), HttpURLConnection.HTTP_UNAVAILABLE, false); } /** - * Verifies that the producer is not closed when sending of a message fails with a non-fatal error. - * - * @param ctx The vert.x test context. + * Verifies that the producer is closed when sending of a message fails with a fatal error. */ @Test - public void testProducerIsNotClosedOnNonFatalError(final VertxTestContext ctx) { - - final RuntimeException expectedError = new RuntimeException("foo"); + public void testProducerIsClosedOnFatalError() { - // GIVEN a sender sending a message - final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(false); - final var factory = newProducerFactory(mockProducer); - newSender(factory).sendAndWaitForOutcome("topic", "tenant", "device", null, Map.of(), NoopSpan.INSTANCE) - .onComplete(ctx.failing(t -> { - ctx.verify(() -> { - // THEN the producer is present and still open - assertThat(factory.getProducer(PRODUCER_NAME).isPresent()).isTrue(); - assertThat(mockProducer.closed()).isFalse(); - }); - ctx.completeNow(); - })); - - // WHEN the send operation fails - mockProducer.errorNext(expectedError); + testSendFails(new AuthorizationException("go away"), HttpURLConnection.HTTP_UNAVAILABLE, true); } /** @@ -187,8 +198,12 @@ public void testThatCreationTimeIsAddedWhenNotSet(final VertxTestContext ctx) { // WHEN sending a message with no properties final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(true); final var factory = newProducerFactory(mockProducer); - - newSender(factory).sendAndWaitForOutcome("topic", "tenant", "device", null, Map.of(), NoopSpan.INSTANCE) + final Promise readyTracker = Promise.promise(); + final var sender = newSender(factory); + sender.addOnKafkaProducerReadyHandler(readyTracker); + sender.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> sender.sendAndWaitForOutcome("topic", "tenant", "device", null, Map.of(), NoopSpan.INSTANCE)) .onComplete(ctx.succeeding(t -> { ctx.verify(() -> { final Headers headers = mockProducer.history().get(0).headers(); @@ -216,8 +231,12 @@ public void testThatCreationTimeIsNotChanged(final VertxTestContext ctx) { // WHEN sending the message final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(true); final var factory = newProducerFactory(mockProducer); - - newSender(factory).sendAndWaitForOutcome("topic", "tenant", "device", null, properties, NoopSpan.INSTANCE) + final Promise readyTracker = Promise.promise(); + final var sender = newSender(factory); + sender.addOnKafkaProducerReadyHandler(readyTracker); + sender.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> sender.sendAndWaitForOutcome("topic", "tenant", "device", null, properties, NoopSpan.INSTANCE)) .onComplete(ctx.succeeding(t -> { ctx.verify(() -> { // THEN the creation time is preserved diff --git a/clients/notification-kafka/src/main/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationReceiver.java b/clients/notification-kafka/src/main/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationReceiver.java index 40d2bb8c41..79bdb4bea4 100644 --- a/clients/notification-kafka/src/main/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationReceiver.java +++ b/clients/notification-kafka/src/main/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationReceiver.java @@ -72,8 +72,8 @@ public void registerConsumer( final NotificationType notificationType, final Handler consumer) { - if (isStarted()) { - throw new IllegalStateException("consumers cannot be added when receiver is already started."); + if (!lifecycleStatus.isStopped()) { + throw new IllegalStateException("consumers cannot be added when receiver is already started"); } addTopic(NotificationTopicHelper.getTopicName(notificationType)); diff --git a/clients/notification-kafka/src/main/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationSender.java b/clients/notification-kafka/src/main/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationSender.java index b063d76aaf..d7e7705646 100644 --- a/clients/notification-kafka/src/main/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationSender.java +++ b/clients/notification-kafka/src/main/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationSender.java @@ -56,9 +56,9 @@ public Future publish(final AbstractNotification notification) { Objects.requireNonNull(notification); - if (isStopped()) { + if (!lifecycleStatus.isStarted()) { return Future.failedFuture( - new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "sender already stopped")); + new ServerErrorException(HttpURLConnection.HTTP_UNAVAILABLE, "sender not started")); } return createProducerRecord(notification) diff --git a/clients/notification-kafka/src/test/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationReceiverTest.java b/clients/notification-kafka/src/test/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationReceiverTest.java index b44ef94736..441e08c136 100644 --- a/clients/notification-kafka/src/test/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationReceiverTest.java +++ b/clients/notification-kafka/src/test/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationReceiverTest.java @@ -36,6 +36,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.json.JsonObject; import io.vertx.junit5.Checkpoint; @@ -80,19 +81,22 @@ void setUp(final Vertx vertx) { @Test public void testCreateConsumer(final VertxTestContext ctx) { + final Promise readyTracker = Promise.promise(); final var receiver = createReceiver(); - receiver.registerConsumer(TenantChangeNotification.TYPE, notification -> {}); - + receiver.addOnKafkaConsumerReadyHandler(readyTracker); receiver.start() - .onComplete(ctx.succeeding(v -> ctx.verify(() -> { + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v -> { + ctx.verify(() -> { final Set subscription = mockConsumer.subscription(); assertThat(subscription).isNotNull(); assertThat(subscription) .contains(NotificationTopicHelper.getTopicName(TenantChangeNotification.TYPE)); assertThat(mockConsumer.closed()).isFalse(); - ctx.completeNow(); - }))); + }); + ctx.completeNow(); + })); } /** @@ -103,16 +107,17 @@ public void testCreateConsumer(final VertxTestContext ctx) { @Test public void testStopClosesConsumer(final VertxTestContext ctx) { + final Promise readyTracker = Promise.promise(); final var receiver = createReceiver(); - receiver.registerConsumer(TenantChangeNotification.TYPE, notification -> {}); - + receiver.addOnKafkaConsumerReadyHandler(readyTracker); receiver.start() - .compose(v -> receiver.stop()) - .onComplete(ctx.succeeding(v -> ctx.verify(() -> { - assertThat(mockConsumer.closed()).isTrue(); - ctx.completeNow(); - }))); + .compose(ok -> readyTracker.future()) + .compose(v -> receiver.stop()) + .onComplete(ctx.succeeding(v -> ctx.verify(() -> { + assertThat(mockConsumer.closed()).isTrue(); + ctx.completeNow(); + }))); } /** @@ -192,6 +197,7 @@ private KafkaBasedNotificationReceiver createReceiver() { private ConsumerRecord createKafkaRecord( final AbstractNotification notification, final long offset) { + final var json = JsonObject.mapFrom(notification); final String topicName = NotificationTopicHelper.getTopicName(notification.getType()); return new ConsumerRecord<>(topicName, 0, offset, null, json); diff --git a/clients/notification-kafka/src/test/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationSenderTest.java b/clients/notification-kafka/src/test/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationSenderTest.java index 2e6ddb89ea..626602ee94 100644 --- a/clients/notification-kafka/src/test/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationSenderTest.java +++ b/clients/notification-kafka/src/test/java/org/eclipse/hono/client/notification/kafka/KafkaBasedNotificationSenderTest.java @@ -22,11 +22,9 @@ import java.util.Map; import org.apache.kafka.clients.producer.MockProducer; -import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.StringSerializer; import org.eclipse.hono.client.ServerErrorException; import org.eclipse.hono.client.kafka.producer.CachingKafkaProducerFactory; -import org.eclipse.hono.client.kafka.producer.KafkaProducerFactory; import org.eclipse.hono.kafka.test.KafkaClientUnitTestHelper; import org.eclipse.hono.notification.AbstractNotification; import org.eclipse.hono.notification.NotificationConstants; @@ -39,6 +37,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.json.JsonObject; import io.vertx.junit5.VertxExtension; @@ -71,96 +70,96 @@ public void setUp() { /** * Verifies that {@link KafkaBasedNotificationSender#start()} creates a producer and * * {@link KafkaBasedNotificationSender#stop()} closes it. + * + * @param ctx The vert.x test context. */ @Test - public void testLifecycle() { - final MockProducer mockProducer = newMockProducer(); - final CachingKafkaProducerFactory factory = newProducerFactory(mockProducer); - final KafkaBasedNotificationSender sender = new KafkaBasedNotificationSender(factory, config); + public void testLifecycle(final VertxTestContext ctx) { + final var mockProducer = newMockProducer(); + final var factory = newProducerFactory(mockProducer); + final var sender = new KafkaBasedNotificationSender(factory, config); + final Promise readyTracker = Promise.promise(); + sender.addOnKafkaProducerReadyHandler(readyTracker); assertThat(factory.getProducer(KafkaBasedNotificationSender.PRODUCER_NAME).isPresent()).isFalse(); - sender.start(); - assertThat(factory.getProducer(KafkaBasedNotificationSender.PRODUCER_NAME).isPresent()).isTrue(); - sender.stop(); - assertThat(factory.getProducer(KafkaBasedNotificationSender.PRODUCER_NAME).isPresent()).isFalse(); + sender.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> { + ctx.verify(() -> assertThat(factory.getProducer(KafkaBasedNotificationSender.PRODUCER_NAME).isPresent()) + .isTrue()); + return sender.stop(); + }) + .onComplete(ctx.succeeding(ok -> { + ctx.verify(() -> assertThat(factory.getProducer(KafkaBasedNotificationSender.PRODUCER_NAME).isPresent()).isFalse()); + ctx.completeNow(); + })); } /** * Verifies that the expected Kafka record is created when publishing a {@link TenantChangeNotification}. - * - * @param ctx The vert.x test context. */ @Test - public void testProducerRecordForTenantNotification(final VertxTestContext ctx) { + public void testProducerRecordForTenantNotification() { - final TenantChangeNotification notification = new TenantChangeNotification(CHANGE, TENANT_ID, CREATION_TIME, - ENABLED); - testProducerRecordForNotification(ctx, notification, TENANT_ID); + final var notification = new TenantChangeNotification(CHANGE, TENANT_ID, CREATION_TIME, ENABLED); + testProducerRecordForNotification(notification, TENANT_ID); } /** * Verifies that the expected Kafka record is created when publishing a {@link DeviceChangeNotification}. - * - * @param ctx The vert.x test context. */ @Test - public void testProducerRecordForDeviceNotification(final VertxTestContext ctx) { + public void testProducerRecordForDeviceNotification() { - final DeviceChangeNotification notification = new DeviceChangeNotification(CHANGE, TENANT_ID, DEVICE_ID, - CREATION_TIME, ENABLED); - testProducerRecordForNotification(ctx, notification, DEVICE_ID); + final var notification = new DeviceChangeNotification(CHANGE, TENANT_ID, DEVICE_ID, CREATION_TIME, ENABLED); + testProducerRecordForNotification(notification, DEVICE_ID); } /** * Verifies that the expected Kafka record is created when publishing a {@link CredentialsChangeNotification}. - * - * @param ctx The vert.x test context. */ @Test - public void testProducerRecordForCredentialsNotification(final VertxTestContext ctx) { + public void testProducerRecordForCredentialsNotification() { - final CredentialsChangeNotification notification = new CredentialsChangeNotification(TENANT_ID, DEVICE_ID, - CREATION_TIME); - testProducerRecordForNotification(ctx, notification, DEVICE_ID); + final var notification = new CredentialsChangeNotification(TENANT_ID, DEVICE_ID, CREATION_TIME); + testProducerRecordForNotification(notification, DEVICE_ID); } /** * Verifies that the expected Kafka record is created when publishing a {@link AllDevicesOfTenantDeletedNotification}. - * - * @param ctx The vert.x test context. */ @Test - public void testProducerRecordForAllDevicesOfTenantDeletedNotification(final VertxTestContext ctx) { + public void testProducerRecordForAllDevicesOfTenantDeletedNotification() { - final AllDevicesOfTenantDeletedNotification notification = new AllDevicesOfTenantDeletedNotification(TENANT_ID, - CREATION_TIME); - testProducerRecordForNotification(ctx, notification, TENANT_ID); + final var notification = new AllDevicesOfTenantDeletedNotification(TENANT_ID, CREATION_TIME); + testProducerRecordForNotification(notification, TENANT_ID); } private void testProducerRecordForNotification( - final VertxTestContext ctx, final AbstractNotification notificationToSend, final String expectedRecordKey) { + final VertxTestContext ctx = new VertxTestContext(); + // GIVEN a sender - final MockProducer mockProducer = newMockProducer(); - final KafkaBasedNotificationSender sender = newSender(mockProducer); + final var mockProducer = newMockProducer(); + final var sender = newSender(mockProducer); // WHEN publishing the notification sender.publish(notificationToSend) - .onComplete(ctx.succeeding(v -> { - // THEN the producer record is created from the given values - ctx.verify(() -> { - final ProducerRecord record = mockProducer.history().get(0); - - assertThat(record.topic()) - .isEqualTo(NotificationTopicHelper.getTopicName(notificationToSend.getType())); - assertThat(record.key()).isEqualTo(expectedRecordKey); - assertThat(record.value().getString(NotificationConstants.JSON_FIELD_TYPE)) - .isEqualTo(notificationToSend.getType().getTypeName()); - }); - ctx.completeNow(); - })); + .onComplete(ctx.succeeding(v -> { + // THEN the producer record is created from the given values + ctx.verify(() -> { + final var record = mockProducer.history().get(0); + + assertThat(record.topic()) + .isEqualTo(NotificationTopicHelper.getTopicName(notificationToSend.getType())); + assertThat(record.key()).isEqualTo(expectedRecordKey); + assertThat(record.value().getString(NotificationConstants.JSON_FIELD_TYPE)) + .isEqualTo(notificationToSend.getType().getTypeName()); + }); + ctx.completeNow(); + })); } /** @@ -169,38 +168,40 @@ private void testProducerRecordForNotification( * @param ctx The vert.x test context. */ @Test - public void testSendFailsWithTheExpectedError(final VertxTestContext ctx) { + public void testPublishFailsWithTheExpectedError(final VertxTestContext ctx) { // GIVEN a sender sending a message - final RuntimeException expectedError = new RuntimeException("boom"); - final MockProducer mockProducer = new MockProducer<>(false, new StringSerializer(), - new JsonObjectSerializer()); - final KafkaBasedNotificationSender sender = newSender(mockProducer); - - sender.publish(new TenantChangeNotification(CHANGE, TENANT_ID, CREATION_TIME, ENABLED)) - .onComplete(ctx.failing(t -> { - ctx.verify(() -> { - // THEN it fails with the expected error - assertThat(t).isInstanceOf(ServerErrorException.class); - assertThat(((ServerErrorException) t).getErrorCode()).isEqualTo(503); - assertThat(t.getCause()).isEqualTo(expectedError); - }); - ctx.completeNow(); - })); + final var mockProducer = newMockProducer(false); + final Promise readyTracker = Promise.promise(); + final var sender = newSender(mockProducer); + sender.addOnKafkaProducerReadyHandler(readyTracker); + + final var result = sender.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> sender.publish(new TenantChangeNotification(CHANGE, TENANT_ID, CREATION_TIME, ENABLED))); // WHEN the send operation fails + final RuntimeException expectedError = new RuntimeException("boom"); mockProducer.errorNext(expectedError); + result.onComplete(ctx.failing(t -> { + ctx.verify(() -> { + // THEN it fails with the expected error + assertThat(t).isInstanceOf(ServerErrorException.class); + assertThat(((ServerErrorException) t).getErrorCode()).isEqualTo(503); + assertThat(t.getCause()).isEqualTo(expectedError); + }); + ctx.completeNow(); + })); } /** - * Verifies that the constructor throws a nullpointer exception if a parameter is {@code null}. + * Verifies that the constructor throws an NPE if a parameter is {@code null}. */ @Test public void testThatConstructorThrowsOnMissingParameter() { - final MockProducer mockProducer = newMockProducer(); - - final KafkaProducerFactory factory = newProducerFactory(mockProducer); + final var mockProducer = newMockProducer(); + final var factory = newProducerFactory(mockProducer); assertThrows(NullPointerException.class, () -> new KafkaBasedNotificationSender(null, config)); assertThrows(NullPointerException.class, () -> new KafkaBasedNotificationSender(factory, null)); @@ -213,25 +214,30 @@ public void testThatConstructorThrowsOnMissingParameter() { */ @Test public void testThatSendThrowsOnMissingMandatoryParameter() { - final MockProducer mockProducer = newMockProducer(); - final KafkaBasedNotificationSender sender = newSender(mockProducer); + final var mockProducer = newMockProducer(); + final var sender = newSender(mockProducer); assertThrows(NullPointerException.class, () -> sender.publish(null)); } private MockProducer newMockProducer() { - return new MockProducer<>(true, new StringSerializer(), new JsonObjectSerializer()); + return newMockProducer(true); + } + + private MockProducer newMockProducer(final boolean autoComplete) { + return new MockProducer<>(autoComplete, new StringSerializer(), new JsonObjectSerializer()); } private CachingKafkaProducerFactory newProducerFactory( final MockProducer mockProducer) { - return CachingKafkaProducerFactory - .testFactory(vertxMock, (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); + return CachingKafkaProducerFactory.testFactory( + vertxMock, + (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); } private KafkaBasedNotificationSender newSender(final MockProducer mockProducer) { - final CachingKafkaProducerFactory factory = newProducerFactory(mockProducer); + final var factory = newProducerFactory(mockProducer); return new KafkaBasedNotificationSender(factory, config); } diff --git a/clients/telemetry-kafka/src/test/java/org/eclipse/hono/client/telemetry/kafka/KafkaBasedEventSenderTest.java b/clients/telemetry-kafka/src/test/java/org/eclipse/hono/client/telemetry/kafka/KafkaBasedEventSenderTest.java index f256aecde5..7287933437 100644 --- a/clients/telemetry-kafka/src/test/java/org/eclipse/hono/client/telemetry/kafka/KafkaBasedEventSenderTest.java +++ b/clients/telemetry-kafka/src/test/java/org/eclipse/hono/client/telemetry/kafka/KafkaBasedEventSenderTest.java @@ -38,6 +38,7 @@ import io.opentracing.Tracer; import io.opentracing.noop.NoopTracerFactory; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; import io.vertx.core.eventbus.EventBus; @@ -95,37 +96,41 @@ public void testSendEventCreatesCorrectRecord(final VertxTestContext ctx) { final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(true); final var factory = newProducerFactory(mockProducer); + final Promise readyTracker = Promise.promise(); final var sender = new KafkaBasedEventSender(vertxMock, factory, kafkaProducerConfig, true, tracer); + sender.addOnKafkaProducerReadyHandler(readyTracker); // WHEN sending a message - sender.sendEvent(tenant, device, contentType, Buffer.buffer(payload), properties, null) - .onComplete(ctx.succeeding(t -> { - ctx.verify(() -> { - // THEN the producer record is created from the given values... - final var producerRecord = mockProducer.history().get(0); - - assertThat(producerRecord.key()).isEqualTo(device.getDeviceId()); - assertThat(producerRecord.topic()) - .isEqualTo(new HonoTopic(HonoTopic.Type.EVENT, tenant.getTenantId()).toString()); - assertThat(producerRecord.value().toString()).isEqualTo(payload); - - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue(producerRecord.headers(), "foo", "bar"); - KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( - producerRecord.headers(), - MessageHelper.SYS_HEADER_PROPERTY_TTL, - 5000L); - - // ...AND contains the standard headers - KafkaClientUnitTestHelper.assertStandardHeaders( - producerRecord, - device.getDeviceId(), - contentType, - QoS.AT_LEAST_ONCE.ordinal()); - - verify(span).finish(); - }); - ctx.completeNow(); - })); + sender.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> sender.sendEvent(tenant, device, contentType, Buffer.buffer(payload), properties, null)) + .onComplete(ctx.succeeding(t -> { + ctx.verify(() -> { + // THEN the producer record is created from the given values... + final var producerRecord = mockProducer.history().get(0); + + assertThat(producerRecord.key()).isEqualTo(device.getDeviceId()); + assertThat(producerRecord.topic()) + .isEqualTo(new HonoTopic(HonoTopic.Type.EVENT, tenant.getTenantId()).toString()); + assertThat(producerRecord.value().toString()).isEqualTo(payload); + + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue(producerRecord.headers(), "foo", "bar"); + KafkaClientUnitTestHelper.assertUniqueHeaderWithExpectedValue( + producerRecord.headers(), + MessageHelper.SYS_HEADER_PROPERTY_TTL, + 5000L); + + // ...AND contains the standard headers + KafkaClientUnitTestHelper.assertStandardHeaders( + producerRecord, + device.getDeviceId(), + contentType, + QoS.AT_LEAST_ONCE.ordinal()); + + verify(span).finish(); + }); + ctx.completeNow(); + })); } diff --git a/clients/telemetry-kafka/src/test/java/org/eclipse/hono/client/telemetry/kafka/KafkaBasedTelemetrySenderTest.java b/clients/telemetry-kafka/src/test/java/org/eclipse/hono/client/telemetry/kafka/KafkaBasedTelemetrySenderTest.java index 7da35ca8a8..c2b90b01de 100644 --- a/clients/telemetry-kafka/src/test/java/org/eclipse/hono/client/telemetry/kafka/KafkaBasedTelemetrySenderTest.java +++ b/clients/telemetry-kafka/src/test/java/org/eclipse/hono/client/telemetry/kafka/KafkaBasedTelemetrySenderTest.java @@ -42,6 +42,7 @@ import io.opentracing.Tracer; import io.opentracing.noop.NoopTracerFactory; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; import io.vertx.core.eventbus.EventBus; @@ -104,13 +105,17 @@ public void testSendTelemetryCreatesCorrectRecord( final var mockProducer = KafkaClientUnitTestHelper.newMockProducer(true); final var factory = CachingKafkaProducerFactory .testFactory(vertxMock, (n, c) -> KafkaClientUnitTestHelper.newKafkaProducer(mockProducer)); + final Promise readyTracker = Promise.promise(); final var sender = new KafkaBasedTelemetrySender(vertxMock, factory, kafkaProducerConfig, true, tracer); + sender.addOnKafkaProducerReadyHandler(readyTracker); tenant.setResourceLimits(new ResourceLimits() .setMaxTtlTelemetryQoS0(10L) .setMaxTtlTelemetryQoS1(60L)); // WHEN sending telemetry data - sender.sendTelemetry(tenant, device, qos, contentType, Buffer.buffer(payload), properties, null) + sender.start() + .compose(ok -> readyTracker.future()) + .compose(ok -> sender.sendTelemetry(tenant, device, qos, contentType, Buffer.buffer(payload), properties, null)) .onComplete(ctx.succeeding(t -> { ctx.verify(() -> { diff --git a/core/src/main/java/org/eclipse/hono/util/LifecycleStatus.java b/core/src/main/java/org/eclipse/hono/util/LifecycleStatus.java index d677ba9c7a..ac2b812bdf 100644 --- a/core/src/main/java/org/eclipse/hono/util/LifecycleStatus.java +++ b/core/src/main/java/org/eclipse/hono/util/LifecycleStatus.java @@ -14,7 +14,11 @@ package org.eclipse.hono.util; +import java.util.Objects; +import java.util.function.Supplier; + import io.vertx.core.AsyncResult; +import io.vertx.core.Future; import io.vertx.core.Handler; import io.vertx.core.Promise; @@ -58,6 +62,8 @@ private synchronized void setState(final Status newState) { /** * Adds a handler to be notified once the tracked component has started up. + *

+ * The handlers will be invoked with a succeeded result when the {@link #setStarted()} method is called. * * @param handler The handler. */ @@ -69,6 +75,8 @@ public void addOnStartedHandler(final Handler> handler) { /** * Adds a handler to be notified once the tracked component has shut down. + *

+ * The handlers will be invoked with the outcome passed in to the {@link #setStopped(AsyncResult)} method. * * @param handler The handler. */ @@ -139,6 +147,42 @@ public synchronized boolean setStopping() { } } + /** + * Executes an attempt to stop the tracked component. + *

+ * This method will execute the given stop action once if the component is in state + * {@link Status#STARTING} or {@link Status#STARTED}. The future returned by the stop action is then + * passed in to the {@link #setStopped(AsyncResult)} method to transition the status + * to {@link Status#STOPPED}. + *

+ * Note that if this method is invoked concurrently, then only the first invocation's stop + * action will be run and its outcome will determine the returned future's completion status. + * + * @param stopAction The logic implementing the stopping of the component. + * @return A future for conveying the outcome of stopping the component to client code. + * The future will be succeeded if the component is already in the {@link Status#STOPPED} state. Otherwise, + * the future will be completed with the result returned by the stop action. + * @throws NullPointerException if stop action is {@code null}. + */ + public synchronized Future runStopAttempt(final Supplier> stopAction) { + + Objects.requireNonNull(stopAction); + + if (isStopped()) { + return Future.succeededFuture(); + } + + final Promise result = Promise.promise(); + addOnStoppedHandler(result); + + if (setStopping()) { + stopAction.get() + .onComplete(this::setStopped); + } + + return result.future(); + } + /** * Checks if the component is in the process of shutting down. * @@ -150,15 +194,27 @@ public synchronized boolean isStopping() { /** * Marks the tracked component as being shut down. - * + *

+ * Simply invokes {@link #setStopped(AsyncResult)} with a succeeded future. * @return {@code false} if the component is already in state {@code STOPPED}. */ public synchronized boolean setStopped() { + return setStopped(Future.succeededFuture()); + } + + /** + * Marks the tracked component as being shut down. + * + * @param outcome The outcome of stopping the component. The handlers registered via + * {@link #addOnStoppedHandler(Handler)} will be invoked with the given result. + * @return {@code false} if the component is already in state {@code STOPPED}. + */ + public synchronized boolean setStopped(final AsyncResult outcome) { if (state == Status.STOPPED) { return false; } else { setState(Status.STOPPED); - shutdownTracker.complete(); + shutdownTracker.handle(outcome); return true; } } diff --git a/services/command-router-quarkus/src/main/java/org/eclipse/hono/commandrouter/impl/kafka/InternalKafkaTopicCleanupService.java b/services/command-router-quarkus/src/main/java/org/eclipse/hono/commandrouter/impl/kafka/InternalKafkaTopicCleanupService.java index 876a3013f1..e1663abc53 100644 --- a/services/command-router-quarkus/src/main/java/org/eclipse/hono/commandrouter/impl/kafka/InternalKafkaTopicCleanupService.java +++ b/services/command-router-quarkus/src/main/java/org/eclipse/hono/commandrouter/impl/kafka/InternalKafkaTopicCleanupService.java @@ -231,23 +231,10 @@ private void determineToBeDeletedTopics(final Set allTopics) { @Override public void stop(final Promise stopResult) { - if (lifecycleStatus.isStopped()) { - stopResult.tryComplete(); - return; - } - - lifecycleStatus.addOnStoppedHandler(stopResult); - - if (lifecycleStatus.isStopping()) { - return; - } - - lifecycleStatus.setStopping(); - vertx.cancelTimer(timerId); - Optional.ofNullable(adminClient) - .map(KafkaAdminClient::close) - .orElseGet(Future::succeededFuture) - .onFailure(thr -> LOG.warn("error closing admin client", thr)) - .onSuccess(ok -> lifecycleStatus.setStopped()); + lifecycleStatus.runStopAttempt(() -> Optional.ofNullable(adminClient) + .map(KafkaAdminClient::close) + .orElseGet(Future::succeededFuture) + .onFailure(thr -> LOG.warn("error closing admin client", thr))) + .onComplete(stopResult); } } diff --git a/services/command-router-quarkus/src/main/java/org/eclipse/hono/commandrouter/impl/kafka/KafkaBasedCommandConsumerFactoryImpl.java b/services/command-router-quarkus/src/main/java/org/eclipse/hono/commandrouter/impl/kafka/KafkaBasedCommandConsumerFactoryImpl.java index 550726f6ef..5bfd4da42a 100644 --- a/services/command-router-quarkus/src/main/java/org/eclipse/hono/commandrouter/impl/kafka/KafkaBasedCommandConsumerFactoryImpl.java +++ b/services/command-router-quarkus/src/main/java/org/eclipse/hono/commandrouter/impl/kafka/KafkaBasedCommandConsumerFactoryImpl.java @@ -15,6 +15,7 @@ import java.time.Duration; import java.util.Map; import java.util.Objects; +import java.util.UUID; import java.util.regex.Pattern; import org.apache.kafka.clients.consumer.ConsumerConfig; @@ -34,7 +35,9 @@ import org.eclipse.hono.commandrouter.CommandRouterMetrics; import org.eclipse.hono.commandrouter.CommandTargetMapper; import org.eclipse.hono.tracing.TracingHelper; +import org.eclipse.hono.util.LifecycleStatus; import org.eclipse.hono.util.MessagingType; +import org.eclipse.microprofile.health.HealthCheckResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,12 +45,16 @@ import io.opentracing.SpanContext; import io.opentracing.Tracer; import io.opentracing.tag.Tags; +import io.vertx.core.AsyncResult; import io.vertx.core.CompositeFuture; import io.vertx.core.Context; import io.vertx.core.Future; +import io.vertx.core.Handler; +import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.buffer.Buffer; import io.vertx.ext.healthchecks.HealthCheckHandler; +import io.vertx.ext.healthchecks.Status; import io.vertx.kafka.client.common.impl.Helper; /** @@ -76,7 +83,7 @@ public class KafkaBasedCommandConsumerFactoryImpl implements CommandConsumerFact private final KafkaBasedInternalCommandSender internalCommandSender; private final KafkaBasedCommandResponseSender kafkaBasedCommandResponseSender; private final KafkaClientMetricsSupport kafkaClientMetricsSupport; - + private final LifecycleStatus lifecycleStatus = new LifecycleStatus(); private String groupId = DEFAULT_GROUP_ID; private KafkaBasedMappingAndDelegatingCommandHandler commandHandler; private AsyncHandlingAutoCommitKafkaConsumer kafkaConsumer; @@ -96,7 +103,7 @@ public class KafkaBasedCommandConsumerFactoryImpl implements CommandConsumerFact * @param metrics The component to use for reporting metrics. * @param kafkaClientMetricsSupport The Kafka metrics support. * @param tracer The tracer instance. - * @throws NullPointerException if any of the parameters except internalKafkaTopicCleanupService is {@code null}. + * @throws NullPointerException if any of the parameters are {@code null}. */ public KafkaBasedCommandConsumerFactoryImpl( final Vertx vertx, @@ -132,6 +139,17 @@ public KafkaBasedCommandConsumerFactoryImpl( tracer); } + /** + * Adds a handler to be invoked with a succeeded future once this factory is ready to be used. + * + * @param handler The handler to invoke. The handler will never be invoked with a failed future. + */ + public final void addOnFactoryReadyHandler(final Handler> handler) { + if (handler != null) { + lifecycleStatus.addOnStartedHandler(handler); + } + } + /** * {@inheritDoc} */ @@ -148,6 +166,17 @@ public void registerLivenessChecks(final HealthCheckHandler livenessHandler) { public void registerReadinessChecks(final HealthCheckHandler readinessHandler) { internalCommandSender.registerReadinessChecks(readinessHandler); kafkaBasedCommandResponseSender.registerReadinessChecks(readinessHandler); + readinessHandler.register( + "command-consumer-factory-kafka-consumer-%s".formatted(UUID.randomUUID()), + status -> { + if (kafkaConsumer == null) { + // this factory has not been started yet + status.tryComplete(Status.KO()); + } else { + final var response = kafkaConsumer.checkReadiness(); + status.tryComplete(new Status().setOk(response.getStatus() == HealthCheckResponse.Status.UP)); + } + }); } /** @@ -177,25 +206,46 @@ public MessagingType getMessagingType() { /** * {@inheritDoc} * - * @return The combined outcome of starting the Kafka consumer, the command handler and the internal Kafka - * topic cleanup service. + * @return The combined outcome of starting the Kafka consumer and the command handler. */ @Override public Future start() { + if (lifecycleStatus.isStarting()) { + return Future.succeededFuture(); + } else if (!lifecycleStatus.setStarting()) { + return Future.failedFuture(new IllegalStateException("factory is already started/stopping")); + } final Context context = Vertx.currentContext(); if (context == null) { return Future.failedFuture(new IllegalStateException("factory must be started in a Vert.x context")); } - final KafkaCommandProcessingQueue commandQueue = new KafkaCommandProcessingQueue(context); - commandHandler = new KafkaBasedMappingAndDelegatingCommandHandler(vertx, tenantClient, commandQueue, - commandTargetMapper, internalCommandSender, kafkaBasedCommandResponseSender, metrics, tracer); + final var commandQueue = new KafkaCommandProcessingQueue(context); + final Promise internalCommandSenderTracker = Promise.promise(); + internalCommandSender.addOnKafkaProducerReadyHandler(internalCommandSenderTracker); + final Promise commandResponseSenderTracker = Promise.promise(); + kafkaBasedCommandResponseSender.addOnKafkaProducerReadyHandler(commandResponseSenderTracker); + commandHandler = new KafkaBasedMappingAndDelegatingCommandHandler( + vertx, + tenantClient, + commandQueue, + commandTargetMapper, + internalCommandSender, + kafkaBasedCommandResponseSender, + metrics, + tracer); final Map consumerConfig = kafkaConsumerConfig.getConsumerConfig("command"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerConfig.putIfAbsent(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, CooperativeStickyAssignor.class.getName()); consumerConfig.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - kafkaConsumer = new AsyncHandlingAutoCommitKafkaConsumer<>(vertx, COMMANDS_TOPIC_PATTERN, - commandHandler::mapAndDelegateIncomingCommandMessage, consumerConfig); + + final Promise consumerTracker = Promise.promise(); + kafkaConsumer = new AsyncHandlingAutoCommitKafkaConsumer<>( + vertx, + COMMANDS_TOPIC_PATTERN, + commandHandler::mapAndDelegateIncomingCommandMessage, + consumerConfig); + kafkaConsumer.addOnKafkaConsumerReadyHandler(consumerTracker); kafkaConsumer.setPollTimeout(Duration.ofMillis(kafkaConsumerConfig.getPollTimeout())); kafkaConsumer.setConsumerCreationRetriesTimeout(KafkaClientFactory.UNLIMITED_RETRIES_DURATION); kafkaConsumer.setMetricsSupport(kafkaClientMetricsSupport); @@ -204,20 +254,26 @@ public Future start() { kafkaConsumer.setOnPartitionsLostHandler( partitions -> commandQueue.setRevokedPartitions(Helper.to(partitions))); - return CompositeFuture.all(commandHandler.start(), kafkaConsumer.start()) - .mapEmpty(); + CompositeFuture.all( + internalCommandSenderTracker.future(), + commandResponseSenderTracker.future(), + consumerTracker.future()) + .onSuccess(ok -> lifecycleStatus.setStarted()); + + return CompositeFuture.all(commandHandler.start(), kafkaConsumer.start()).mapEmpty(); } /** * {@inheritDoc} * - * @return The combined outcome of stopping the Kafka consumer, the command handler and the internal Kafka - * topic cleanup service. + * @return A future indicating the combined outcome of stopping the Kafka consumer and the command handler. + * The future will be succeeded once this component is stopped. */ @Override public Future stop() { - return CompositeFuture.join(kafkaConsumer.stop(), commandHandler.stop()) - .mapEmpty(); + + return lifecycleStatus.runStopAttempt(() -> CompositeFuture.join(kafkaConsumer.stop(), commandHandler.stop()) + .mapEmpty()); } @Override diff --git a/tests/src/test/java/org/eclipse/hono/tests/IntegrationTestSupport.java b/tests/src/test/java/org/eclipse/hono/tests/IntegrationTestSupport.java index 647c8593b4..e8fcdd212d 100644 --- a/tests/src/test/java/org/eclipse/hono/tests/IntegrationTestSupport.java +++ b/tests/src/test/java/org/eclipse/hono/tests/IntegrationTestSupport.java @@ -858,10 +858,16 @@ public Future init(final MessagingKafkaConsumerConfigProperties kafkaDowns initRegistryClient(); kafkaProducerFactory = CachingKafkaProducerFactory.sharedFactory(vertx); - applicationClient = new KafkaApplicationClientImpl(vertx, kafkaDownstreamProps, kafkaProducerFactory, + final Promise readyTracker = Promise.promise(); + final var client = new KafkaApplicationClientImpl( + vertx, + kafkaDownstreamProps, + kafkaProducerFactory, getKafkaProducerConfig()); - - return Future.succeededFuture(); + client.addOnKafkaProducerReadyHandler(readyTracker); + return client.start() + .compose(ok -> readyTracker.future()) + .onSuccess(ok -> applicationClient = client); } /** diff --git a/tests/src/test/java/org/eclipse/hono/tests/client/HonoKafkaConsumerIT.java b/tests/src/test/java/org/eclipse/hono/tests/client/HonoKafkaConsumerIT.java index b8b7d2e217..a09bb9458c 100644 --- a/tests/src/test/java/org/eclipse/hono/tests/client/HonoKafkaConsumerIT.java +++ b/tests/src/test/java/org/eclipse/hono/tests/client/HonoKafkaConsumerIT.java @@ -164,7 +164,8 @@ void closeConsumer(final VertxTestContext ctx) { @ParameterizedTest @MethodSource("partitionAssignmentStrategies") @Timeout(value = 10, timeUnit = TimeUnit.SECONDS) - public void testConsumerReadsLatestRecordsPublishedAfterStart(final String partitionAssignmentStrategy, + public void testConsumerReadsLatestRecordsPublishedAfterStart( + final String partitionAssignmentStrategy, final VertxTestContext ctx) throws InterruptedException { final int numTopics = 2; final int numPartitions = 5; @@ -200,10 +201,14 @@ public void testConsumerReadsLatestRecordsPublishedAfterStart(final String parti }; kafkaConsumer = new HonoKafkaConsumer<>(vertx, topics, recordHandler, consumerConfig); // start consumer - kafkaConsumer.start().onComplete(ctx.succeeding(v -> { - LOG.debug("consumer started, publish record to be received by the consumer"); - publish(publishTestTopic, publishedAfterStartRecordKey, Buffer.buffer("testPayload")); - })); + final Promise readyTracker = Promise.promise(); + kafkaConsumer.addOnKafkaConsumerReadyHandler(readyTracker); + kafkaConsumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v -> { + LOG.debug("consumer started, publish record to be received by the consumer"); + publish(publishTestTopic, publishedAfterStartRecordKey, Buffer.buffer("testPayload")); + })); if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) { ctx.failNow(new IllegalStateException("timeout waiting for record to be received")); @@ -220,7 +225,9 @@ public void testConsumerReadsLatestRecordsPublishedAfterStart(final String parti */ @Test @Timeout(value = 10, timeUnit = TimeUnit.SECONDS) - public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) throws InterruptedException { + public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(final VertxTestContext ctx) + throws InterruptedException { + final int numTopics = 1; final int numTestRecordsPerTopicPerRound = 20; final int numPartitions = 1; // has to be 1 here because we expect partition 0 to contain *all* the records published for a topic @@ -274,13 +281,19 @@ public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(fi kafkaConsumer = new HonoKafkaConsumer<>(vertx, topics, recordHandler, consumerConfig); // first start of consumer, letting it commit offsets - kafkaConsumer.start().onComplete(ctx.succeeding(v -> { - LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)"); - publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics); - })); - - assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), - TimeUnit.SECONDS)).isTrue(); + final Promise readyTracker = Promise.promise(); + kafkaConsumer.addOnKafkaConsumerReadyHandler(readyTracker); + kafkaConsumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v -> { + LOG.trace("consumer started, publish first round of records to be received by the consumer (so that it has offsets to commit)"); + publishRecords(numTestRecordsPerTopicPerRound, "round1_", topics); + })); + + assertThat(firstConsumerInstanceStartedAndStopped.awaitCompletion( + IntegrationTestSupport.getTestSetupTimeout(), + TimeUnit.SECONDS)) + .isTrue(); if (firstConsumerInstanceStartedAndStopped.failed()) { ctx.failNow(firstConsumerInstanceStartedAndStopped.causeOfFailure()); return; @@ -306,11 +319,15 @@ public void testConsumerReadsLatestRecordsPublishedAfterOutOfRangeOffsetReset(fi publishRecords(numTestRecordsPerTopicPerRound, "round3_", topics) .onFailure(ctx::failNow) .onSuccess(v -> { + final Promise newReadyTracker = Promise.promise(); kafkaConsumer = new HonoKafkaConsumer<>(vertx, topics, recordHandler2, consumerConfig); - kafkaConsumer.start().onComplete(ctx.succeeding(v2 -> { - LOG.debug("consumer started, publish another record to be received by the consumer"); - publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload")); - })); + kafkaConsumer.addOnKafkaConsumerReadyHandler(newReadyTracker); + kafkaConsumer.start() + .compose(ok -> newReadyTracker.future()) + .onComplete(ctx.succeeding(v2 -> { + LOG.debug("consumer started, publish another record to be received by the consumer"); + publish(publishTestTopic, lastRecordKey, Buffer.buffer("testPayload")); + })); }); if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) { @@ -420,30 +437,34 @@ public void testConsumerReadsLatestRecordsPublishedAfterTopicSubscriptionConfirm }; kafkaConsumer = new HonoKafkaConsumer<>(vertx, topicPattern, recordHandler, consumerConfig); // start consumer - kafkaConsumer.start().onComplete(ctx.succeeding(v -> { - ctx.verify(() -> { - assertThat(receivedRecords.size()).isEqualTo(0); - }); - final Promise nextRecordReceivedPromise = Promise.promise(); - nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise); - - LOG.debug("consumer started, create new topic implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()"); - final String newTopic = patternPrefix + "new"; - final String recordKey = "addedAfterStartKey"; - kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(newTopic) - .onComplete(ctx.succeeding(v2 -> { - LOG.debug("publish record to be received by the consumer"); - publish(newTopic, recordKey, Buffer.buffer("testPayload")); - })); - - nextRecordReceivedPromise.future().onComplete(ar -> { + final Promise readyTracker = Promise.promise(); + kafkaConsumer.addOnKafkaConsumerReadyHandler(readyTracker); + kafkaConsumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v -> { ctx.verify(() -> { - assertThat(receivedRecords.size()).isEqualTo(1); - assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey); + assertThat(receivedRecords.size()).isEqualTo(0); }); - ctx.completeNow(); - }); - })); + final Promise nextRecordReceivedPromise = Promise.promise(); + nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise); + + LOG.debug("consumer started, create new topic implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()"); + final String newTopic = patternPrefix + "new"; + final String recordKey = "addedAfterStartKey"; + kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(newTopic) + .onComplete(ctx.succeeding(v2 -> { + LOG.debug("publish record to be received by the consumer"); + publish(newTopic, recordKey, Buffer.buffer("testPayload")); + })); + + nextRecordReceivedPromise.future().onComplete(ar -> { + ctx.verify(() -> { + assertThat(receivedRecords.size()).isEqualTo(1); + assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey); + }); + ctx.completeNow(); + }); + })); } /** @@ -480,28 +501,32 @@ public void testConsumerReadsAllRecordsForDynamicallyCreatedTopics( kafkaConsumer = new HonoKafkaConsumer<>(vertx, topicPattern, recordHandler, consumerConfig); // start consumer - kafkaConsumer.start().onComplete(ctx.succeeding(v -> { - ctx.verify(() -> { - assertThat(receivedRecords.size()).isEqualTo(0); - }); - LOG.debug("consumer started, create new topics implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()"); - final String recordKey = "addedAfterStartKey"; - for (int i = 0; i < numTopicsAndRecords; i++) { - final String topic = patternPrefix + i; - kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(topic) - .onComplete(ctx.succeeding(v2 -> { - LOG.debug("publish record to be received by the consumer"); - publish(topic, recordKey, Buffer.buffer("testPayload")); - })); - } - allRecordsReceivedPromise.future().onComplete(ar -> { + final Promise readyTracker = Promise.promise(); + kafkaConsumer.addOnKafkaConsumerReadyHandler(readyTracker); + kafkaConsumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v -> { ctx.verify(() -> { - assertThat(receivedRecords.size()).isEqualTo(numTopicsAndRecords); - receivedRecords.forEach(record -> assertThat(record.key()).isEqualTo(recordKey)); + assertThat(receivedRecords.size()).isEqualTo(0); }); - ctx.completeNow(); - }); - })); + LOG.debug("consumer started, create new topics implicitly by invoking ensureTopicIsAmongSubscribedTopicPatternTopics()"); + final String recordKey = "addedAfterStartKey"; + for (int i = 0; i < numTopicsAndRecords; i++) { + final String topic = patternPrefix + i; + kafkaConsumer.ensureTopicIsAmongSubscribedTopicPatternTopics(topic) + .onComplete(ctx.succeeding(v2 -> { + LOG.debug("publish record to be received by the consumer"); + publish(topic, recordKey, Buffer.buffer("testPayload")); + })); + } + allRecordsReceivedPromise.future().onComplete(ar -> { + ctx.verify(() -> { + assertThat(receivedRecords.size()).isEqualTo(numTopicsAndRecords); + receivedRecords.forEach(record -> assertThat(record.key()).isEqualTo(recordKey)); + }); + ctx.completeNow(); + }); + })); if (!ctx.awaitCompletion(9, TimeUnit.SECONDS)) { ctx.failNow(new IllegalStateException(String.format( "timeout waiting for expected number of records (%d) to be received; received records: %d", @@ -554,17 +579,21 @@ public void testConsumerReadsAllRecordsAfterStart(final VertxTestContext ctx) th }; kafkaConsumer = new HonoKafkaConsumer<>(vertx, topics, recordHandler, consumerConfig); // start consumer - kafkaConsumer.start().onComplete(ctx.succeeding(v -> { - ctx.verify(() -> { - assertThat(receivedRecords.size()).isEqualTo(0); - }); - allRecordsReceivedPromise.future().onComplete(ar -> { + final Promise readyTracker = Promise.promise(); + kafkaConsumer.addOnKafkaConsumerReadyHandler(readyTracker); + kafkaConsumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v -> { ctx.verify(() -> { - assertThat(receivedRecords.size()).isEqualTo(totalExpectedMessages); + assertThat(receivedRecords.size()).isEqualTo(0); }); - ctx.completeNow(); - }); - })); + allRecordsReceivedPromise.future().onComplete(ar -> { + ctx.verify(() -> { + assertThat(receivedRecords.size()).isEqualTo(totalExpectedMessages); + }); + ctx.completeNow(); + }); + })); } /** @@ -597,25 +626,29 @@ public void testConsumerAutoCreatesTopicAndReadsLatestRecordsPublishedAfterStart topicsToDeleteAfterTests.add(topic); kafkaConsumer = new HonoKafkaConsumer<>(vertx, Set.of(topic), recordHandler, consumerConfig); // start consumer - kafkaConsumer.start().onComplete(ctx.succeeding(v -> { - ctx.verify(() -> { - assertThat(receivedRecords.size()).isEqualTo(0); - }); - final Promise nextRecordReceivedPromise = Promise.promise(); - nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise); - - LOG.debug("consumer started, publish record to be received by the consumer"); - final String recordKey = "addedAfterStartKey"; - publish(topic, recordKey, Buffer.buffer("testPayload")); - - nextRecordReceivedPromise.future().onComplete(ar -> { + final Promise readyTracker = Promise.promise(); + kafkaConsumer.addOnKafkaConsumerReadyHandler(readyTracker); + kafkaConsumer.start() + .compose(ok -> readyTracker.future()) + .onComplete(ctx.succeeding(v -> { ctx.verify(() -> { - assertThat(receivedRecords.size()).isEqualTo(1); - assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey); + assertThat(receivedRecords.size()).isEqualTo(0); }); - ctx.completeNow(); - }); - })); + final Promise nextRecordReceivedPromise = Promise.promise(); + nextRecordReceivedPromiseRef.set(nextRecordReceivedPromise); + + LOG.debug("consumer started, publish record to be received by the consumer"); + final String recordKey = "addedAfterStartKey"; + publish(topic, recordKey, Buffer.buffer("testPayload")); + + nextRecordReceivedPromise.future().onComplete(ar -> { + ctx.verify(() -> { + assertThat(receivedRecords.size()).isEqualTo(1); + assertThat(receivedRecords.get(0).key()).isEqualTo(recordKey); + }); + ctx.completeNow(); + }); + })); } private static Future createTopics(final Collection topicNames, final int numPartitions) { diff --git a/tests/src/test/java/org/eclipse/hono/tests/commandrouter/KafkaBasedCommandConsumerFactoryImplIT.java b/tests/src/test/java/org/eclipse/hono/tests/commandrouter/KafkaBasedCommandConsumerFactoryImplIT.java index 1368b0ebfd..bd91c64175 100644 --- a/tests/src/test/java/org/eclipse/hono/tests/commandrouter/KafkaBasedCommandConsumerFactoryImplIT.java +++ b/tests/src/test/java/org/eclipse/hono/tests/commandrouter/KafkaBasedCommandConsumerFactoryImplIT.java @@ -44,6 +44,7 @@ import org.eclipse.hono.client.kafka.producer.CachingKafkaProducerFactory; import org.eclipse.hono.client.kafka.producer.KafkaProducerFactory; import org.eclipse.hono.client.registry.TenantClient; +import org.eclipse.hono.commandrouter.CommandConsumerFactory; import org.eclipse.hono.commandrouter.CommandRouterMetrics; import org.eclipse.hono.commandrouter.CommandTargetMapper; import org.eclipse.hono.commandrouter.impl.kafka.KafkaBasedCommandConsumerFactoryImpl; @@ -122,43 +123,6 @@ public static void init() { kafkaProducer = KafkaProducer.create(vertx, producerConfig); } - /** - * Cleans up fixture. - * - * @param ctx The vert.x test context. - */ - @AfterAll - public static void shutDown(final VertxTestContext ctx) { - - final Promise topicsDeletedPromise = Promise.promise(); - // delete topics with a delay - overall test run might include command router tests, - // in which case the command router kafka consumer will try to commit offsets for these topics once, - // and the topic must still exist then, otherwise there will be errors and delays. So wait until the commits have happened. - vertx.setTimer(AsyncHandlingAutoCommitKafkaConsumer.DEFAULT_COMMIT_INTERVAL.toMillis(), tid -> { - final Set topicsToDelete = new HashSet<>(topicsToDeleteAfterTest); - topicsToDeleteAfterTest.clear(); - final Promise resultPromise = Promise.promise(); - adminClient.deleteTopics(new ArrayList<>(topicsToDelete), resultPromise); - resultPromise.future() - .onFailure(thr -> LOG.info("error deleting test topics", thr)) - .onSuccess(v -> LOG.debug("done deleting test topics (with {}s delay)", - AsyncHandlingAutoCommitKafkaConsumer.DEFAULT_COMMIT_INTERVAL.toSeconds())) - .onComplete(topicsDeletedPromise); - }); - - final Promise producerClosePromise = Promise.promise(); - kafkaProducer.close(producerClosePromise); - CompositeFuture.all(producerClosePromise.future(), topicsDeletedPromise.future()) - .onComplete(ar -> { - adminClient.close(); - adminClient = null; - kafkaProducer = null; - vertx.close(); - vertx = null; - }) - .onComplete(ctx.succeedingThenComplete()); - } - /** * Logs the current test's display name. * @@ -177,13 +141,45 @@ public void logTestName(final TestInfo testInfo) { @AfterEach void cleanupAfterTest(final VertxTestContext ctx) { @SuppressWarnings("rawtypes") - final List stopFutures = new ArrayList<>(); - componentsToStopAfterTest.forEach(component -> stopFutures.add(component.stop())); + final List stopFutures = componentsToStopAfterTest.stream() + .map(component -> component.stop() + .onSuccess(ok -> LOG.info("stopped component of type {}", component.getClass().getName())) + .onFailure(t -> LOG.info("failed to stop component of type {}", component.getClass().getName(), t))) + .collect(Collectors.toList()); componentsToStopAfterTest.clear(); CompositeFuture.all(stopFutures) .onComplete(ctx.succeedingThenComplete()); } + /** + * Cleans up fixture. + * + * @param ctx The vert.x test context. + */ + @AfterAll + public static void shutDown(final VertxTestContext ctx) { + + final Promise topicsDeletedPromise = Promise.promise(); + // delete topics with a delay - overall test run might include command router tests, + // in which case the command router kafka consumer will try to commit offsets for these topics once, + // and the topic must still exist then, otherwise there will be errors and delays. So wait until the + // commits have happened. + vertx.setTimer(AsyncHandlingAutoCommitKafkaConsumer.DEFAULT_COMMIT_INTERVAL.toMillis(), tid -> { + final Set topicsToDelete = new HashSet<>(topicsToDeleteAfterTest); + topicsToDeleteAfterTest.clear(); + adminClient.deleteTopics(new ArrayList<>(topicsToDelete)) + .onFailure(thr -> LOG.info("error deleting test topics", thr)) + .onSuccess(v -> LOG.debug("done deleting test topics (with {}s delay)", + AsyncHandlingAutoCommitKafkaConsumer.DEFAULT_COMMIT_INTERVAL.toSeconds())) + .onComplete(topicsDeletedPromise); + }); + + topicsDeletedPromise.future() + .compose(ok -> CompositeFuture.all(adminClient.close(), kafkaProducer.close())) + .compose(ok -> vertx.close()) + .onComplete(ctx.succeedingThenComplete()); + } + /** * Verifies that records, published on the tenant-specific Kafka command topic, get received by * the consumer created by the factory and get forwarded on the internal command topic in the @@ -226,12 +222,18 @@ public void testCommandsGetForwardedInIncomingOrder(final VertxTestContext ctx) final Context vertxContext = vertx.getOrCreateContext(); vertxContext.runOnContext(v0 -> { - final HonoKafkaConsumer internalConsumer = getInternalCommandConsumer(recordHandler); - final KafkaBasedCommandConsumerFactoryImpl consumerFactory = getKafkaBasedCommandConsumerFactory( + final Promise consumerTracker = Promise.promise(); + final var internalConsumer = getInternalCommandConsumer(recordHandler); + internalConsumer.addOnKafkaConsumerReadyHandler(consumerTracker); + final Promise factoryTracker = Promise.promise(); + final var consumerFactory = getKafkaBasedCommandConsumerFactory( targetAdapterInstanceGetterCompletionFutureSupplier, tenantId); - CompositeFuture.join(internalConsumer.start(), consumerFactory.start()) - .compose(f -> createCommandConsumer(tenantId, consumerFactory)) - .onComplete(setup.succeedingThenComplete()); + consumerFactory.addOnFactoryReadyHandler(factoryTracker); + CompositeFuture.all( + internalConsumer.start().compose(ok -> consumerTracker.future()), + consumerFactory.start().compose(ok -> factoryTracker.future())) + .compose(f -> createCommandConsumer(tenantId, consumerFactory)) + .onComplete(setup.succeedingThenComplete()); }); assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue(); @@ -312,7 +314,7 @@ public void testCommandsGetForwardedIfOneConsumerInstanceGetsClosed(final VertxT } }; final Promise firstConsumerAllGetAdapterInstanceInvocationsDone = Promise.promise(); - final LinkedList> firstConsumerGetAdapterInstancePromisesQueue = new LinkedList<>(); + final Deque> firstConsumerGetAdapterInstancePromisesQueue = new LinkedList<>(); // don't let getting the target adapter instance finish immediately final Supplier> firstConsumerGetAdapterInstanceSupplier = () -> { final Promise resultPromise = Promise.promise(); @@ -330,13 +332,19 @@ public void testCommandsGetForwardedIfOneConsumerInstanceGetsClosed(final VertxT final AtomicReference consumerFactory1Ref = new AtomicReference<>(); final Context vertxContext = vertx.getOrCreateContext(); vertxContext.runOnContext(v0 -> { - final HonoKafkaConsumer internalConsumer = getInternalCommandConsumer(recordHandler); - final KafkaBasedCommandConsumerFactoryImpl consumerFactory1 = getKafkaBasedCommandConsumerFactory( + final Promise consumerTracker = Promise.promise(); + final var internalConsumer = getInternalCommandConsumer(recordHandler); + internalConsumer.addOnKafkaConsumerReadyHandler(consumerTracker); + final Promise factoryTracker = Promise.promise(); + final var consumerFactory1 = getKafkaBasedCommandConsumerFactory( firstConsumerGetAdapterInstanceSupplier, tenantId); + consumerFactory1.addOnFactoryReadyHandler(factoryTracker); consumerFactory1Ref.set(consumerFactory1); - CompositeFuture.join(internalConsumer.start(), consumerFactory1.start()) - .compose(f -> createCommandConsumer(tenantId, consumerFactory1)) - .onComplete(setup.succeedingThenComplete()); + CompositeFuture.all( + internalConsumer.start().compose(ok -> consumerTracker.future()), + consumerFactory1.start().compose(ok -> factoryTracker.future())) + .compose(f -> createCommandConsumer(tenantId, consumerFactory1)) + .onComplete(setup.succeedingThenComplete()); }); assertThat(setup.awaitCompletion(IntegrationTestSupport.getTestSetupTimeout(), TimeUnit.SECONDS)).isTrue(); @@ -355,34 +363,40 @@ public void testCommandsGetForwardedIfOneConsumerInstanceGetsClosed(final VertxT final AtomicInteger secondConsumerGetAdapterInstanceInvocations = new AtomicInteger(); // wait for first record on internal topic to have been received ... - CompositeFuture.join(firstConsumerAllGetAdapterInstanceInvocationsDone.future(), firstRecordReceivedPromise.future()) + CompositeFuture.join( + firstConsumerAllGetAdapterInstanceInvocationsDone.future(), + firstRecordReceivedPromise.future()) .compose(v -> { // ... and wait some more, making sure that the offset of the first record has been committed final Promise delayPromise = Promise.promise(); vertx.setTimer(500, tid -> delayPromise.complete()); return delayPromise.future(); }) - .onComplete(v -> { + .compose(v -> { LOG.info("stopping first consumer factory"); - consumerFactory1Ref.get().stop() - .onComplete(ctx.succeeding(ar -> { - LOG.info("factory stopped"); - // no delay on getting the target adapter instance added here - final KafkaBasedCommandConsumerFactoryImpl consumerFactory2 = getKafkaBasedCommandConsumerFactory(() -> { - secondConsumerGetAdapterInstanceInvocations.incrementAndGet(); - return Future.succeededFuture(); - }, tenantId); - consumerFactory2.start() - .onComplete(ctx.succeeding(ar2 -> { - LOG.info("creating command consumer in new consumer factory"); - createCommandConsumer(tenantId, consumerFactory2) - .onComplete(ctx.succeeding(ar3 -> { - LOG.debug("consumer created"); - firstConsumerGetAdapterInstancePromisesQueue.forEach(Promise::tryComplete); - })); - })); - })); - }); + return consumerFactory1Ref.get().stop(); + }) + .compose(stopped -> { + LOG.info("factory stopped"); + // no delay on getting the target adapter instance added here + final Promise readyTracker = Promise.promise(); + final var consumerFactory2 = getKafkaBasedCommandConsumerFactory(() -> { + secondConsumerGetAdapterInstanceInvocations.incrementAndGet(); + return Future.succeededFuture(); + }, tenantId); + consumerFactory2.addOnFactoryReadyHandler(readyTracker); + return consumerFactory2.start() + .compose(started -> readyTracker.future()) + .map(consumerFactory2); + }) + .compose(consumerFactory2 -> { + LOG.info("creating command consumer in new consumer factory"); + return createCommandConsumer(tenantId, consumerFactory2); + }) + .onComplete(ctx.succeeding(ok -> { + LOG.debug("consumer created"); + firstConsumerGetAdapterInstancePromisesQueue.forEach(Promise::tryComplete); + })); final long timerId = vertx.setTimer(8000, tid -> { LOG.info("received records:\n{}", @@ -397,39 +411,46 @@ public void testCommandsGetForwardedIfOneConsumerInstanceGetsClosed(final VertxT // all but the first command should have been processed by the second consumer assertThat(secondConsumerGetAdapterInstanceInvocations.get()).isEqualTo(numTestCommands - 1); }); + LOG.info("all records received"); ctx.completeNow(); })); } - private Future createCommandConsumer(final String tenantId, - final KafkaBasedCommandConsumerFactoryImpl consumerFactory) { + private Future createCommandConsumer( + final String tenantId, + final CommandConsumerFactory consumerFactory) { + topicsToDeleteAfterTest.add(new HonoTopic(HonoTopic.Type.COMMAND, tenantId).toString()); return consumerFactory.createCommandConsumer(tenantId, null); } - private HonoKafkaConsumer getInternalCommandConsumer(final Handler> recordHandler) { + private HonoKafkaConsumer getInternalCommandConsumer( + final Handler> recordHandler) { + final Map consumerConfig = IntegrationTestSupport.getKafkaConsumerConfig() .getConsumerConfig("internal_cmd_consumer_test"); consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); final String topic = new HonoTopic(HonoTopic.Type.COMMAND_INTERNAL, adapterInstanceId).toString(); - final HonoKafkaConsumer honoKafkaConsumer = new HonoKafkaConsumer(vertx, Set.of(topic), recordHandler, - consumerConfig); - componentsToStopAfterTest.add(honoKafkaConsumer); + final var consumer = new HonoKafkaConsumer(vertx, Set.of(topic), recordHandler, consumerConfig); + componentsToStopAfterTest.add(consumer); topicsToDeleteAfterTest.add(topic); - return honoKafkaConsumer; + return consumer; } private KafkaBasedCommandConsumerFactoryImpl getKafkaBasedCommandConsumerFactory( final Supplier> targetAdapterInstanceGetterCompletionFutureSupplier, final String tenantToHandleCommandsFor) { + final KafkaProducerFactory producerFactory = CachingKafkaProducerFactory.sharedFactory(vertx); final TenantClient tenantClient = getTenantClient(); final CommandTargetMapper commandTargetMapper = new CommandTargetMapper() { @Override - public Future getTargetGatewayAndAdapterInstance(final String tenantId, + public Future getTargetGatewayAndAdapterInstance( + final String tenantId, final String deviceId, final SpanContext context) { + final JsonObject jsonObject = new JsonObject(); jsonObject.put(DeviceConnectionConstants.FIELD_ADAPTER_INSTANCE_ID, adapterInstanceId); jsonObject.put(DeviceConnectionConstants.FIELD_PAYLOAD_DEVICE_ID, deviceId); @@ -445,7 +466,7 @@ public Future getTargetGatewayAndAdapterInstance(final String tenant final Span span = TracingMockSupport.mockSpan(); final Tracer tracer = TracingMockSupport.mockTracer(span); - final MessagingKafkaConsumerConfigProperties kafkaConsumerConfig = new MessagingKafkaConsumerConfigProperties(); + final var kafkaConsumerConfig = new MessagingKafkaConsumerConfigProperties(); kafkaConsumerConfig.setConsumerConfig(Map.of( ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, IntegrationTestSupport.DOWNSTREAM_BOOTSTRAP_SERVERS)); final CommandRouterMetrics metrics = mock(CommandRouterMetrics.class);