diff --git a/antora.yml b/antora.yml index 5798a3217a..2d617a50c6 100644 --- a/antora.yml +++ b/antora.yml @@ -1,13 +1,13 @@ name: ROOT title: Self-Managed -version: 25.3 +version: 26.1 start_page: home:index.adoc nav: - modules/ROOT/nav.adoc asciidoc: attributes: # Date of release in the format YYYY-MM-DD - page-release-date: 2025-11-19 + page-release-date: 2026-03-31 # Only used in the main branch (latest version) page-header-data: order: 2 @@ -17,16 +17,16 @@ asciidoc: # Fallback versions # We try to fetch the latest versions from GitHub at build time # -- - full-version: 25.3.11 - latest-redpanda-tag: 'v25.3.11' + full-version: 26.1.1 + latest-redpanda-tag: 'v26.1.1' latest-console-tag: 'v3.3.1' - latest-release-commit: '6aa5af28b020b66e5caa966094882b7260497a53' + latest-release-commit: '35a825c9c1880ebeedf4c18bb8c6cceaa63566c1' latest-operator-version: 'v2.3.8-24.3.6' operator-beta-tag: '' helm-beta-tag: '' latest-redpanda-helm-chart-version: '' - redpanda-beta-version: '25.3.1-rc4' - redpanda-beta-tag: 'v25.3.1-rc4' + redpanda-beta-version: '' + redpanda-beta-tag: '' console-beta-version: '' console-beta-tag: '' # -- diff --git a/docs-data/property-overrides.json b/docs-data/property-overrides.json index 913ff8476f..5e5b95f8ab 100644 --- a/docs-data/property-overrides.json +++ b/docs-data/property-overrides.json @@ -472,12 +472,29 @@ "description": "Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage.\n\nLeave this property unset (`null`) to use automatic configuration:\n\n* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed\n* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration\n\nSet this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format.\n\nCAUTION: AWS requires virtual-hosted addressing for buckets created after September 30, 2020. If you use AWS S3 with buckets created after this date, use `virtual_host` addressing.\n\nNOTE: For MinIO deployments, Redpanda defaults to `path` style when this property is unset. To use `virtual_host` addressing with a configured `MINIO_DOMAIN`, set this property explicitly to `virtual_host`. For other S3-compatible storage backends, consult your provider's documentation to determine the required URL style.", "config_scope": "cluster" }, + "cloud_topics_compaction_interval_ms": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_compaction_key_map_memory": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_compaction_max_object_size": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_disable_level_zero_gc_for_tests": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_disable_metastore_flush_loop_for_tests": { + "version": "v26.1.1-rc2" + }, "cloud_topics_disable_reconciliation_loop": { - "exclude_from_docs": true, "config_scope": "cluster" }, "cloud_topics_enabled": { - "exclude_from_docs": true, + "description": "Enable Cloud Topics for the cluster. Cloud Topics are optimized for high-throughput, cost-sensitive workloads that can tolerate higher latencies compared to standard Kafka topics.", + "related_topics": [ + "xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics]" + ], "config_scope": "cluster" }, "cloud_topics_epoch_service_epoch_increment_interval": { @@ -488,26 +505,68 @@ "description": "The duration, in milliseconds, for which a cluster-wide epoch is cached locally on each broker.\n\nCaching the epoch locally reduces the need for frequent coordination with the controller. This property controls how long each broker can use a cached epoch value before fetching the latest value.\n\nIncrease this value to reduce coordination overhead in clusters with stable workloads. Decrease it if you need brokers to react more quickly to epoch changes in Tiered Storage.", "version": "v25.3.3" }, + "cloud_topics_fetch_debounce_enabled": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_l1_indexing_interval": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_long_term_flush_interval": { + "version": "v26.1.1-rc2" + }, "cloud_topics_long_term_garbage_collection_interval": { - "exclude_from_docs": true, "config_scope": "cluster" }, + "cloud_topics_parallel_fetch_enabled": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_preregistered_object_ttl": { + "version": "v26.1.1-rc2" + }, "cloud_topics_produce_batching_size_threshold": { - "exclude_from_docs": true, "config_scope": "cluster" }, "cloud_topics_produce_cardinality_threshold": { - "exclude_from_docs": true, "config_scope": "cluster" }, "cloud_topics_produce_upload_interval": { - "exclude_from_docs": true, + "description": "Time interval after which data is uploaded to object storage for Cloud Topics. When this time threshold is reached, Redpanda triggers an upload of buffered data to the object storage backend (S3, GCS, or MinIO), regardless of whether the size or cardinality thresholds have been met.\n\nThis property works together with <> and <> to control when uploads occur. An upload is triggered when any of these three thresholds is reached.", + "related_topics": [ + "xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics]", + "xref:reference:properties/cluster-properties.adoc#cloud_topics_produce_batching_size_threshold[`cloud_topics_produce_batching_size_threshold`]", + "xref:reference:properties/cluster-properties.adoc#cloud_topics_produce_cardinality_threshold[`cloud_topics_produce_cardinality_threshold`]" + ], "config_scope": "cluster" }, "cloud_topics_reconciliation_interval": { - "exclude_from_docs": true, + "description": "Time interval at which Redpanda reconciles data between short-term local storage and long-term object storage for Cloud Topics. During this reconciliation process, Redpanda optimizes the storage layout of data in short-term storage to improve the cost and performance associated with accessing data. After the reconciliation process has moved data into long-term storage, the data in short-term storage is subject to removal by a garbage collection process.", + "related_topics": [ + "xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics]", + "xref:reference:properties/cluster-properties.adoc#cloud_topics_long_term_garbage_collection_interval[`cloud_topics_long_term_garbage_collection_interval`]" + ], "config_scope": "cluster" }, + "cloud_topics_reconciliation_max_interval": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_reconciliation_max_object_size": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_reconciliation_min_interval": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_reconciliation_parallelism": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_reconciliation_slowdown_blend": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_reconciliation_speedup_blend": { + "version": "v26.1.1-rc2" + }, + "cloud_topics_reconciliation_target_fill_ratio": { + "version": "v26.1.1-rc2" + }, "cloud_topics_short_term_gc_backoff_interval": { "description": "The interval, in milliseconds, between invocations of the L0 garbage collection work loop when no progress is being made or errors are occurring.\n\nL0 (level-zero) objects are short-term data objects in Tiered Storage that are periodically garbage collected. When GC encounters errors or cannot make progress (for example, if there are no objects eligible for deletion), this backoff interval prevents excessive retries.\n\nIncrease this value to reduce system load when GC cannot make progress. Decrease it if you need faster retry attempts after transient errors.", "version": "v25.3.3" @@ -520,6 +579,9 @@ "description": "The minimum age, in milliseconds, of an L0 (level-zero) object before it becomes eligible for garbage collection.\n\nThis grace period delays deletion of L0 objects even after they become eligible based on epoch. The delay provides a safety buffer that can support recovery in cases involving accidental deletion or other operational issues.\n\nIncrease this value to extend the retention window for L0 objects, providing more time for recovery from operational errors. Decrease it to free up object storage space more quickly, but with less protection against accidental deletion.", "version": "v25.3.3" }, + "cloud_topics_upload_part_size": { + "version": "v26.1.1-rc2" + }, "cluster_id": { "description": "Cluster identifier.", "config_scope": "cluster" @@ -693,6 +755,9 @@ ], "config_scope": "cluster" }, + "default_redpanda_storage_mode": { + "version": "v26.1.1-rc2" + }, "delete.retention.ms": { "description": "The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded.\n\nIf you have enabled Tiered Storage and set <> or <> for the topic, you cannot enable tombstone removal.\n\nIf both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic.\n\nThis property supports three states:\n\n* Positive value: Sets the milliseconds to retain tombstone records before removal.\n* 0: Tombstone records are immediately eligible for removal.\n* Negative value: Disables tombstone removal entirely for this topic.", "related_topics": [ @@ -701,6 +766,9 @@ ], "config_scope": "topic" }, + "delete_topic_enable": { + "version": "v26.1.1-rc2" + }, "developer_mode": { "description": "CAUTION: Enabling `developer_mode` isn't recommended for production use.\n\nEnable developer mode, which skips most of the checks performed at startup.", "config_scope": "broker", @@ -1012,6 +1080,9 @@ ], "config_scope": "cluster" }, + "internal_rpc_request_timeout_ms": { + "version": "v26.1.1-rc2" + }, "internal_topic_replication_factor": { "description": "Target replication factor for internal topics.\n\n*Unit*: number of replicas per topic.", "config_scope": "cluster" @@ -1387,6 +1458,9 @@ "config_scope": "broker", "category": "schema-registry" }, + "nested_group_behavior": { + "version": "v26.1.1-rc2" + }, "node_id": { "config_scope": "broker", "category": "redpanda", @@ -1431,6 +1505,9 @@ "description": "The URL pointing to the well-known discovery endpoint for the OIDC provider.", "config_scope": "cluster" }, + "oidc_group_claim_path": { + "version": "v26.1.1-rc2" + }, "oidc_principal_mapping": { "description": "Rule for mapping JWT payload claim to a Redpanda user principal.", "related_topics": [ @@ -1503,6 +1580,9 @@ "description": "Controls when and how Redpanda automatically rebalances partition replicas across brokers. For more information, see xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing].\n\nValues:\n\n* `continuous`: Partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option requires an enterprise license, and it is customized by xref:reference:properties/cluster-properties.adoc#partition_autobalancing_node_availability_timeout_sec[`partition_autobalancing_node_availability_timeout_sec`] and xref:reference:properties/cluster-properties.adoc#partition_autobalancing_max_disk_usage_percent[`partition_autobalancing_max_disk_usage_percent`] properties.\n* `node_add`: Partition balancing happens when a node is added.\n* `off`: Partition balancing is disabled. This option is not recommended for production clusters.", "config_scope": "cluster" }, + "partition_autobalancing_node_autodecommission_timeout_sec": { + "version": "v26.1.1-rc2" + }, "partition_autobalancing_node_availability_timeout_sec": { "related_topics": [ "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" @@ -1685,6 +1765,14 @@ ], "config_scope": "topic" }, + "redpanda.storage.mode": { + "description": "The storage mode for a topic. Determines how topic data is stored and whether it is eligible for upload to object storage.\n\nAccepted values:\n\n* `local`: Topic data is stored only on the broker's local disk. Object storage upload is disabled for the topic, regardless of cluster-level Tiered Storage settings.\n* `tiered`: Topic data is stored on local disk and also uploaded to object storage. Enables xref:manage:tiered-storage.adoc[Tiered Storage] for the topic.\n* `cloud`: Topic data is stored in object storage using the Cloud Topics architecture. Local storage is used only as a write buffer.\n* `unset`: Uses the cluster-level config_ref:default_redpanda_storage_mode,true,properties/cluster-properties[] setting, or falls back to legacy `redpanda.remote.read` and `redpanda.remote.write` topic property behavior for backwards compatibility.\n\nThis property overrides the cluster-wide config_ref:default_redpanda_storage_mode,true,properties/cluster-properties[] setting for individual topics.", + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "config_scope": "topic", + "category": "tiered-storage" + }, "redpanda.value.schema.id.validation": { "description": "Enable validation of the schema ID for values on a record. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", "related_topics": [ @@ -1718,8 +1806,8 @@ "xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`]", "xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]", "xref:reference:properties/cluster-properties.adoc#default_topic_replication[`default_topic_replication`]", - "xref:develop:config-topics.adoc#choose-the-replication-factor[Choose the replication factor]", - "xref:develop:config-topics.adoc#change-the-replication-factor[Change the replication factor]", + "xref:develop:manage-topics/config-topics.adoc#choose-the-replication-factor[Choose the replication factor]", + "xref:develop:manage-topics/config-topics.adoc#change-the-replication-factor[Change the replication factor]", "xref:reference:properties/cluster-properties.adoc#default_topic_replication[default_topic_replication]" ], "config_scope": "topic" @@ -1901,6 +1989,9 @@ "related_topics": [], "config_scope": "cluster" }, + "schema_registry_enable_qualified_subjects": { + "version": "v26.1.1-rc2" + }, "schema_registry_replication_factor": { "description": "Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property.", "related_topics": [ @@ -2123,7 +2214,7 @@ "write.caching": { "description": "The write caching mode to apply to a topic.\n\nWhen `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first.", "related_topics": [ - "xref:develop:config-topics.adoc#configure-write-caching[Write caching]", + "xref:develop:manage-topics/config-topics.adoc#configure-write-caching[Write caching]", "xref:manage:tiered-storage.adoc[Tiered Storage]", "xref:reference:properties/cluster-properties.adoc#write_caching_default[`write_caching_default`]", "xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]" @@ -2133,7 +2224,7 @@ "write_caching_default": { "related_topics": [ "xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`]", - "xref:develop:config-topics.adoc#configure-write-caching[Write caching]" + "xref:develop:manage-topics/config-topics.adoc#configure-write-caching[Write caching]" ], "config_scope": "cluster", "description": "The default write caching mode to apply to user topics. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. \n\nFsyncs follow <> and <>, whichever is reached first.\n\nThe `write_caching_default` cluster property can be overridden with the xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] topic property." diff --git a/docs-data/redpanda-property-changes-v25.3.1-to-v25.3.3.json b/docs-data/redpanda-property-changes-v25.3.1-to-v25.3.3.json deleted file mode 100644 index b7ff9a7422..0000000000 --- a/docs-data/redpanda-property-changes-v25.3.1-to-v25.3.3.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "comparison": { - "oldVersion": "v25.3.1", - "newVersion": "v25.3.3", - "timestamp": "2025-12-21T10:45:35.556Z" - }, - "summary": { - "newProperties": 6, - "changedDefaults": 0, - "changedDescriptions": 0, - "changedTypes": 0, - "deprecatedProperties": 0, - "removedProperties": 0, - "emptyDescriptions": 3 - }, - "details": { - "newProperties": [ - { - "name": "cloud_topics_epoch_service_epoch_increment_interval", - "type": "integer", - "default": 600000, - "description": "The interval at which the cluster epoch is incremented." - }, - { - "name": "cloud_topics_epoch_service_local_epoch_cache_duration", - "type": "integer", - "default": 60000, - "description": "The local cache duration of a cluster wide epoch." - }, - { - "name": "cloud_topics_short_term_gc_backoff_interval", - "type": "integer", - "default": 60000, - "description": "The interval between invocations of the L0 garbage collection work loop when no progress is being made or errors are occurring." - }, - { - "name": "cloud_topics_short_term_gc_interval", - "type": "integer", - "default": 10000, - "description": "The interval between invocations of the L0 garbage collection work loop when progress is being made." - }, - { - "name": "cloud_topics_short_term_gc_minimum_object_age", - "type": "integer", - "default": 43200000, - "description": "The minimum age of an L0 object before it becomes eligible for garbage collection." - }, - { - "name": "fetch_max_read_concurrency", - "type": "integer", - "default": 1, - "description": "The maximum number of concurrent partition reads per fetch request on each shard. Setting this higher than the default can lead to partition starvation and unneeded memory usage." - } - ], - "changedDefaults": [], - "changedDescriptions": [], - "changedTypes": [], - "deprecatedProperties": [], - "removedProperties": [], - "emptyDescriptions": [ - { - "name": "redpanda.cloud_topic.enabled", - "type": "string" - }, - { - "name": "redpanda.remote.allowgaps", - "type": "boolean" - }, - { - "name": "redpanda.virtual.cluster.id", - "type": "string" - } - ] - } -} \ No newline at end of file diff --git a/docs-data/redpanda-property-changes-v25.3.7-to-v26.1.1-rc2.json b/docs-data/redpanda-property-changes-v25.3.7-to-v26.1.1-rc2.json new file mode 100644 index 0000000000..bb725f086f --- /dev/null +++ b/docs-data/redpanda-property-changes-v25.3.7-to-v26.1.1-rc2.json @@ -0,0 +1,503 @@ +{ + "comparison": { + "oldVersion": "v25.3.7", + "newVersion": "v26.1.1-rc2", + "timestamp": "2026-03-11T00:29:45.784Z" + }, + "summary": { + "newProperties": 27, + "changedDefaults": 6, + "changedDescriptions": 2, + "changedTypes": 6, + "deprecatedProperties": 0, + "removedProperties": 47, + "emptyDescriptions": 2 + }, + "details": { + "newProperties": [ + { + "name": "cloud_topics_compaction_interval_ms", + "type": "integer", + "default": 30000, + "description": "How often to trigger background compaction for cloud topics." + }, + { + "name": "cloud_topics_compaction_key_map_memory", + "type": "integer", + "default": 134217728, + "description": "Maximum number of bytes that may be used on each shard by cloud topics compaction key-offset maps." + }, + { + "name": "cloud_topics_compaction_max_object_size", + "type": "integer", + "default": 134217728, + "description": "Maximum size in bytes for L1 objects produced by cloud topics compaction." + }, + { + "name": "cloud_topics_disable_level_zero_gc_for_tests", + "type": "boolean", + "default": false, + "description": "Disables the level-zero garbage collector in cloud topics. This property exists to simplify testing and shouldn't be set in production." + }, + { + "name": "cloud_topics_disable_metastore_flush_loop_for_tests", + "type": "boolean", + "default": false, + "description": "Disables the metastore flush loop in cloud topics. The property exists to simplify testing of read replicas and shouldn't be set in production." + }, + { + "name": "cloud_topics_fetch_debounce_enabled", + "type": "boolean", + "default": true, + "description": "Enables fetch debouncing in cloud topics. This mechanism guarantees that the broker fetches every object only once improving the performance and lowering the cost." + }, + { + "name": "cloud_topics_l1_indexing_interval", + "type": "integer", + "default": 4194304, + "description": "The byte interval at which index entries are created within long term storage objects for cloud topics. Index entries are stored in the object metadata and enable efficient seeking by offset or timestamp within a partition. Lower values produce more index entries (better seek granularity) at the cost of a larger footer." + }, + { + "name": "cloud_topics_long_term_flush_interval", + "type": "integer", + "default": 600000, + "description": "Time interval at which long term storage metadata is flushed to object storage." + }, + { + "name": "cloud_topics_parallel_fetch_enabled", + "type": "boolean", + "default": true, + "description": "Enable parallel fetching in cloud topics. This mechanism improves the throughput by allowing the broker to download data needed by the fetch request using multiple shards." + }, + { + "name": "cloud_topics_preregistered_object_ttl", + "type": "integer", + "default": 3600000, + "description": "Time-to-live for pre-registered L1 objects before they are expired." + }, + { + "name": "cloud_topics_reconciliation_max_interval", + "type": "integer", + "default": 10000, + "description": "Maximum reconciliation interval for adaptive scheduling." + }, + { + "name": "cloud_topics_reconciliation_max_object_size", + "type": "integer", + "default": 83886080, + "description": "Maximum size in bytes for L1 objects produced by the reconciler. With the default target fill ratio of 0.8, this gives an effective target object size of 64 MiB." + }, + { + "name": "cloud_topics_reconciliation_min_interval", + "type": "integer", + "default": 250, + "description": "Minimum reconciliation interval for adaptive scheduling. The reconciler will not run more frequently than this." + }, + { + "name": "cloud_topics_reconciliation_parallelism", + "type": "integer", + "default": 8, + "description": "Maximum number, per shard, of concurrent objects built by reconciliation" + }, + { + "name": "cloud_topics_reconciliation_slowdown_blend", + "type": "number", + "default": 0.4, + "description": "Blend factor for slowing down reconciliation (0.0 to 1.0). Higher values mean reconciliation lowers its frequency faster when trying to find a frequency that produces well-sized objects. Generally this should be lower than the speedup blend, because reconciliation has less opportunities to adapt its frequency when it runs less frequently." + }, + { + "name": "cloud_topics_reconciliation_speedup_blend", + "type": "number", + "default": 0.9, + "description": "Blend factor for speeding up reconciliation (0.0 to 1.0). Higher values mean reconciliation increases its frequency faster when trying to find a frequency that produces well-sized objects." + }, + { + "name": "cloud_topics_reconciliation_target_fill_ratio", + "type": "number", + "default": 0.8, + "description": "Target fill ratio for L1 objects. The reconciler adapts its interval to produce objects at approximately this fill level (0.0 to 1.0)." + }, + { + "name": "cloud_topics_upload_part_size", + "type": "integer", + "default": 16777216, + "description": "The part size in bytes used for multipart uploads. The minimum of 5 MiB is the smallest non-terminal part size allowed by cloud object storage providers." + }, + { + "name": "default_redpanda_storage_mode", + "type": "string", + "default": "unset", + "description": "Default storage mode for newly-created topics. Determines how topic data is stored: `local` for broker-local storage only, `tiered` for both local and object storage, `cloud` for object-only storage using the Cloud Topics architecture, or `unset` to use legacy remote.read/write configs for backwards compatibility." + }, + { + "name": "delete_topic_enable", + "type": "boolean", + "default": true, + "description": "Enable or disable topic deletion via the Kafka DeleteTopics API. When set to false, all topic deletion requests are rejected with error code 73 (TOPIC_DELETION_DISABLED). This is a cluster-wide safety setting that cannot be overridden by superusers. Topics in kafka_nodelete_topics are always protected regardless of this setting." + }, + { + "name": "internal_rpc_request_timeout_ms", + "type": "integer", + "default": 10000, + "description": "Default timeout for RPC requests between Redpanda nodes." + }, + { + "name": "log_compaction_max_priority_wait_ms", + "type": "integer", + "default": 3600000, + "description": "Maximum time a priority partition (for example, __consumer_offsets) can wait for compaction before preempting regular compaction." + }, + { + "name": "nested_group_behavior", + "type": "string", + "default": "none", + "description": "Behavior for handling nested groups when extracting groups from authentication tokens. Two options are available - none and suffix. With none, the group is left alone (e.g. '/group/child/grandchild'). Suffix will extract the final component from the nested group (e.g. '/group' -> 'group' and '/group/child/grandchild' -> 'grandchild')." + }, + { + "name": "oidc_group_claim_path", + "type": "string", + "default": "$.groups", + "description": "JSON path to extract groups from the JWT payload." + }, + { + "name": "partition_autobalancing_node_autodecommission_timeout_sec", + "type": "integer", + "default": null, + "description": "When a node is unavailable for at least this timeout duration, it triggers Redpanda to decommission the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`." + }, + { + "name": "schema_registry_avro_use_named_references", + "type": "object", + "description": "No description" + }, + { + "name": "schema_registry_enable_qualified_subjects", + "type": "boolean", + "default": false, + "description": "Enable parsing of qualified subject syntax (:.context:subject). When false, subjects are treated literally, as subjects in the default context. When true, qualified syntax is parsed to extract context and subject." + } + ], + "changedDefaults": [ + { + "name": "cloud_topics_reconciliation_interval", + "oldDefault": 10000, + "newDefault": null + }, + { + "name": "leader_balancer_mode", + "newDefault": "calibrated" + }, + { + "name": "log_compaction_disable_tx_batch_removal", + "newDefault": null + }, + { + "name": "log_compaction_tx_batch_removal_enabled", + "oldDefault": false, + "newDefault": true + }, + { + "name": "redpanda.storage.mode", + "oldDefault": null, + "newDefault": "unset" + }, + { + "name": "tls_v1_2_cipher_suites", + "oldDefault": "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:AES256-GCM-SHA384:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:AES128-SHA:AES128-CCM:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES256-SHA:AES256-CCM", + "newDefault": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + } + ], + "changedDescriptions": [ + { + "name": "leader_balancer_mode", + "oldDescription": "No description", + "newDescription": "Mode of the leader balancer optimization strategy. `calibrated` uses a heuristic that balances leaders based on replica counts per shard. `random` randomly moves leaders to reduce load on heavily-loaded shards. Legacy values `greedy_balanced_shards` and `random_hill_climbing` are treated as `calibrated`." + }, + { + "name": "partition_autobalancing_node_availability_timeout_sec", + "oldDescription": "When a node is unavailable for at least this timeout duration, it triggers Redpanda to move partitions off of the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`. ", + "newDescription": "When a node is unavailable for at least this timeout duration, it triggers Redpanda to move partitions off of the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`." + } + ], + "changedTypes": [ + { + "name": "cloud_topics_reconciliation_interval", + "oldType": "integer", + "newType": "string" + }, + { + "name": "coproc_supervisor_server", + "oldType": "deprecated_property", + "newType": "object" + }, + { + "name": "dashboard_dir", + "oldType": "deprecated_property", + "newType": "object" + }, + { + "name": "enable_central_config", + "oldType": "deprecated_property", + "newType": "object" + }, + { + "name": "leader_balancer_mode", + "oldType": "deprecated_property", + "newType": "string" + }, + { + "name": "log_compaction_disable_tx_batch_removal", + "oldType": "deprecated_property", + "newType": "string" + } + ], + "deprecatedProperties": [], + "removedProperties": [ + { + "name": "alter_topic_cfg_timeout_ms", + "type": "integer", + "description": "The duration, in milliseconds, that Redpanda waits for the replication of entries in the controller log when executing a request to alter topic configurations. This timeout ensures that configuration changes are replicated across the cluster before the alteration request is considered complete." + }, + { + "name": "cloud_storage_disable_metadata_consistency_checks", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "cloud_storage_reconciliation_ms", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "coproc_max_batch_size", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "coproc_max_inflight_bytes", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "coproc_max_ingest_bytes", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "coproc_offset_flush_interval_ms", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "create_topic_timeout_ms", + "type": "integer", + "description": "Timeout, in milliseconds, to wait for new topic creation." + }, + { + "name": "datalake_disk_space_monitor_interval", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "enable_admin_api", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "enable_coproc", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "find_coordinator_timeout_ms", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "full_raft_configuration_recovery_pattern", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "id_allocator_replication", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_admin_topic_api_rate", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_client_group_byte_rate_quota", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_client_group_fetch_byte_rate_quota", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_memory_batch_size_estimate_for_fetch", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_quota_balancer_min_shard_throughput_bps", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_quota_balancer_min_shard_throughput_ratio", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_quota_balancer_node_period", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_quota_balancer_window", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "kafka_throughput_throttling_v2", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "log_compaction_adjacent_merge_self_compaction_count", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "log_message_timestamp_alert_after_ms", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "log_message_timestamp_alert_before_ms", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "max_version", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "metadata_status_wait_timeout_ms", + "type": "integer", + "description": "Maximum time to wait in metadata request for cluster health to be refreshed." + }, + { + "name": "min_version", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "node_management_operation_timeout_ms", + "type": "integer", + "description": "Timeout for executing node management operations." + }, + { + "name": "raft_max_concurrent_append_requests_per_follower", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "raft_recovery_default_read_size", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "recovery_append_timeout_ms", + "type": "integer", + "description": "Timeout for append entry requests issued while updating a stale follower." + }, + { + "name": "rm_sync_timeout_ms", + "type": "integer", + "description": "Resource manager's synchronization timeout. Specifies the maximum time for this node to wait for the internal state machine to catch up with all events written by previous leaders before rejecting a request." + }, + { + "name": "rm_violation_recovery_policy", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "schema_registry_protobuf_renderer_v2", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "seed_server_meta_topic_partitions", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "seq_table_min_size", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "target_fetch_quota_byte_rate", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "target_quota_byte_rate", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "tm_sync_timeout_ms", + "type": "integer", + "description": "Transaction manager's synchronization timeout. Maximum time to wait for internal state machine to catch up before rejecting a request." + }, + { + "name": "tm_violation_recovery_policy", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "transaction_coordinator_replication", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "tx_registry_log_capacity", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "tx_registry_sync_timeout_ms", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "use_scheduling_groups", + "type": "deprecated_property", + "description": "No description" + }, + { + "name": "wait_for_leader_timeout_ms", + "type": "integer", + "description": "Timeout to wait for leadership in metadata cache." + } + ], + "emptyDescriptions": [ + { + "name": "redpanda.remote.allowgaps", + "type": "boolean" + }, + { + "name": "redpanda.virtual.cluster.id", + "type": "string" + } + ] + } +} \ No newline at end of file diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc index 3c8abcbce6..342239d1e0 100644 --- a/modules/ROOT/nav.adoc +++ b/modules/ROOT/nav.adoc @@ -36,7 +36,9 @@ ** xref:develop:kafka-clients.adoc[Kafka Compatibility] ** xref:develop:benchmark.adoc[] ** xref:develop:http-proxy.adoc[] -** xref:develop:config-topics.adoc[] +** xref:develop:manage-topics/index.adoc[] +*** xref:develop:manage-topics/config-topics.adoc[] +*** xref:develop:manage-topics/cloud-topics.adoc[] ** xref:console:ui/edit-topic-configuration.adoc[Edit Topic Configuration] ** xref:develop:produce-data/index.adoc[Produce Data] *** xref:develop:produce-data/configure-producers.adoc[] @@ -112,6 +114,7 @@ ***** xref:manage:kubernetes/storage/k-emptydir.adoc[emptyDir] **** xref:manage:kubernetes/storage/k-resize-persistentvolumes.adoc[Expand PersistentVolumes] **** xref:manage:kubernetes/storage/k-delete-persistentvolume.adoc[Delete PersistentVolumes] +*** xref:manage:kubernetes/k-cloud-topics.adoc[Cloud Topics] *** xref:manage:kubernetes/tiered-storage/index.adoc[Tiered Storage] **** xref:manage:kubernetes/tiered-storage/k-tiered-storage.adoc[Use Tiered Storage] **** xref:manage:kubernetes/tiered-storage/k-fast-commission-decommission.adoc[] @@ -134,6 +137,7 @@ ***** xref:manage:kubernetes/security/authentication/k-authentication.adoc[Enable Authentication] ***** xref:manage:kubernetes/security/authentication/k-user-controller.adoc[Manage Users and ACLs (Operator)] ***** xref:manage:kubernetes/security/authorization/k-role-controller.adoc[Manage Roles and ACLs (Operator)] +***** xref:manage:kubernetes/security/authentication/k-schema-registry-acls.adoc[Manage Schema Registry ACLs (Operator)] **** xref:manage:kubernetes/security/k-audit-logging.adoc[Audit Logging] *** xref:manage:kubernetes/k-rack-awareness.adoc[Rack Awareness] *** xref:manage:kubernetes/k-remote-read-replicas.adoc[Remote Read Replicas] @@ -163,6 +167,7 @@ *** xref:manage:audit-logging.adoc[Audit Logging] **** xref:manage:audit-logging/audit-log-samples.adoc[Sample Audit Log Messages] *** xref:manage:cluster-maintenance/disk-utilization.adoc[] +*** xref:manage:cluster-maintenance/about-throughput-quotas.adoc[] *** xref:manage:cluster-maintenance/manage-throughput.adoc[Manage Throughput] *** xref:manage:cluster-maintenance/compaction-settings.adoc[Compaction Settings] *** xref:manage:cluster-maintenance/configure-client-connections.adoc[] @@ -172,6 +177,7 @@ *** xref:manage:security/authentication.adoc[Authentication] *** xref:manage:security/authorization/index.adoc[Authorization] **** xref:manage:security/authorization/rbac.adoc[Role-Based Access Control (RBAC)] +**** xref:manage:security/authorization/gbac.adoc[Group-Based Access Control (GBAC)] **** xref:manage:security/authorization/acl.adoc[Access Control Lists (ACLs)] *** xref:manage:security/fips-compliance.adoc[FIPS Compliance] *** xref:manage:security/encryption.adoc[] diff --git a/modules/console/pages/ui/edit-topic-configuration.adoc b/modules/console/pages/ui/edit-topic-configuration.adoc index 75dcd24042..ab9207a367 100644 --- a/modules/console/pages/ui/edit-topic-configuration.adoc +++ b/modules/console/pages/ui/edit-topic-configuration.adoc @@ -18,4 +18,4 @@ == Suggested reading - xref:reference:properties/topic-properties.adoc[] -- xref:develop:config-topics.adoc[] \ No newline at end of file +- xref:develop:manage-topics/config-topics.adoc[] \ No newline at end of file diff --git a/modules/deploy/pages/console/kubernetes/deploy.adoc b/modules/deploy/pages/console/kubernetes/deploy.adoc index c63e6337c5..83cce815bb 100644 --- a/modules/deploy/pages/console/kubernetes/deploy.adoc +++ b/modules/deploy/pages/console/kubernetes/deploy.adoc @@ -667,6 +667,8 @@ spec: == Monitoring +Configure metrics exposure and Prometheus scraping for Redpanda Console. + Enable monitoring for Redpanda Console: [,yaml] @@ -678,6 +680,88 @@ config: port: 9090 ---- +=== Prometheus ServiceMonitor + +If you use the https://github.com/prometheus-operator/prometheus-operator[Prometheus Operator^], deploy a `ServiceMonitor` resource alongside Redpanda Console. Prometheus then discovers and scrapes Console metrics from the `/admin/metrics` endpoint. + +[tabs] +====== +Operator:: ++ +-- + +To enable the ServiceMonitor in the Console custom resource, set `monitoring.enabled` to `true`: + +[,yaml] +---- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Console +metadata: + name: redpanda-console + namespace: redpanda +spec: + monitoring: + enabled: true <1> + scrapeInterval: "30s" <2> + labels: <3> + release: kube-prometheus-stack + cluster: + clusterRef: + name: redpanda +---- + +<1> Set to `true` to create a `ServiceMonitor` resource. Default: `false`. +<2> How often Prometheus scrapes the metrics endpoint. Default: `1m`. +<3> Additional labels to apply to the `ServiceMonitor`. Match your Prometheus Operator's `serviceMonitorSelector` by applying the same labels here. + +Apply the Console CR: + +[,bash] +---- +kubectl apply -f console.yaml --namespace redpanda +---- + +-- +Helm:: ++ +-- + +To enable the ServiceMonitor in the Console Helm chart, add the following to your `console-values.yaml`: + +[,yaml] +---- +monitoring: + enabled: true <1> + scrapeInterval: "30s" <2> + labels: {} <3> +---- + +<1> Set to `true` to create a `ServiceMonitor` resource. Default: `false`. +<2> How often Prometheus scrapes the metrics endpoint. Default: `1m`. +<3> Additional labels to apply to the `ServiceMonitor`. Match your Prometheus Operator's `serviceMonitorSelector` by applying the same labels here. For example: ++ +[,yaml] +---- +monitoring: + enabled: true + labels: + release: kube-prometheus-stack +---- + +If you deploy Redpanda Console as a subchart of the Redpanda Helm chart, configure monitoring under the `console` key. All `monitoring` options are available under this key. + +[,yaml] +---- +console: + monitoring: + enabled: true +---- + +-- +====== + +When the Console server is configured with TLS (`config.server.tls.enabled: true`), the ServiceMonitor uses HTTPS and configures CA validation for scraping. + == Troubleshooting * **Connection refused**: Verify Redpanda broker addresses and network policies diff --git a/modules/deploy/pages/redpanda/manual/production/dev-deployment.adoc b/modules/deploy/pages/redpanda/manual/production/dev-deployment.adoc index 36e5db81e1..44aaf8cd91 100644 --- a/modules/deploy/pages/redpanda/manual/production/dev-deployment.adoc +++ b/modules/deploy/pages/redpanda/manual/production/dev-deployment.adoc @@ -8,7 +8,7 @@ You can deploy Redpanda using well-known configuration properties optimized for [NOTE] ==== -* Development mode enables write caching by default. This is a relaxed mode of xref:develop:produce-data/configure-producers.adoc#acksall[`acks=all`] that acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to fsync to disk. Write caching provides lower latency while still ensuring that a majority of brokers acknowledge the write. For more information, or to disable this, see xref:develop:config-topics.adoc#configure-write-caching[write caching]. +* Development mode enables write caching by default. This is a relaxed mode of xref:develop:produce-data/configure-producers.adoc#acksall[`acks=all`] that acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to fsync to disk. Write caching provides lower latency while still ensuring that a majority of brokers acknowledge the write. For more information, or to disable this, see xref:develop:manage-topics/config-topics.adoc#configure-write-caching[write caching]. * Development mode also bypasses `fsync`, acknowledging messages before they're stored to disk. This reduces the durability of messages, could cause potential data loss, and could give unrealistic performance characteristics for a production environment. ==== diff --git a/modules/deploy/partials/high-availability.adoc b/modules/deploy/partials/high-availability.adoc index 5801bf4f76..3e0b843de6 100644 --- a/modules/deploy/partials/high-availability.adoc +++ b/modules/deploy/partials/high-availability.adoc @@ -111,7 +111,7 @@ endif::[] A multi-region deployment is similar to a multi-AZ deployment, in that it needs at least three regions to counter the loss of a single region. Note that this deployment strategy increases latency due to the physical distance between regions. In addition to higher produce and end-to-end latency and increased costs, multi-region deployments require careful tuning. Redpanda recommends that you work closely with Redpanda’s Customer Success team when implementing a multi-region deployment. Also consider the following strategies to mitigate these challenges: -* Configure xref:develop:produce-data/leader-pinning.adoc#configure-leader-pinning[leader pinning] to ensure that topic partition leaders are geographically closer to clients. This can help lower network costs and latency by routing produce requests to brokers located in specific AZs. +* Configure xref:develop:produce-data/leader-pinning.adoc[Leader Pinning] to ensure that topic partition leaders are geographically closer to clients. This can help lower network costs and latency by routing produce requests to brokers located in specific AZs. * If your produce latency exceeds your requirements, you can configure producers to have `acks=1` instead of `acks=all`. This reduces latency by only waiting for the leader to acknowledge, rather than waiting for all brokers to respond. However, using this configuration can decrease message durability. If the partition leader goes offline, you may lose any messages that are acknowledged but not yet replicated. === Multi-cluster deployment diff --git a/modules/develop/pages/kafka-clients.adoc b/modules/develop/pages/kafka-clients.adoc index 3f7815ded2..ba55a37e30 100644 --- a/modules/develop/pages/kafka-clients.adoc +++ b/modules/develop/pages/kafka-clients.adoc @@ -60,7 +60,7 @@ endif::[] * HTTP Proxy (pandaproxy): Unlike other REST proxy implementations in the Kafka ecosystem, Redpanda HTTP Proxy does not support topic and ACLs CRUD through the HTTP Proxy. HTTP Proxy is designed for clients producing and consuming data that do not perform administrative functions. ifdef::env-cloud[] + -* The `delete.retention.ms` topic configuration in Kafka is not supported. Tombstone markers are not removed for topics with a `compact` xref:get-started:config-topics.adoc#change-the-cleanup-policy[cleanup policy]. Redpanda only deletes tombstone markers when topics with a cleanup policy of `compact,delete` have reached their xref:get-started:create-topic.adoc[retention limits]. +* The `delete.retention.ms` topic configuration in Kafka is not supported for Tiered Storage topics. Cloud Topics and local storage topics support Tombstone marker deletion using `delete.retention.ms`, but in Tiered Storage topics, Tombstone markers are only removed in accordance with normal topic retention, and only if the cleanup policy is `delete` or `compact, delete`. endif::[] ifndef::env-cloud[] + diff --git a/modules/develop/pages/manage-topics/cloud-topics.adoc b/modules/develop/pages/manage-topics/cloud-topics.adoc new file mode 100644 index 0000000000..ac7275f48d --- /dev/null +++ b/modules/develop/pages/manage-topics/cloud-topics.adoc @@ -0,0 +1,96 @@ += Manage Cloud Topics +:description: Cloud Topics are "diskless" Redpanda topics that enable you to store data directly to object storage to trade off latency for lower costs. +:page-topic-type: how-to +:personas: streaming_developer, platform_admin +:learning-objective-1: Describe the latency and cost trade-offs of Cloud Topics compared to standard Redpanda topics +:learning-objective-2: Create a Cloud Topic using rpk after enabling Cloud Topics on your cluster +:learning-objective-3: Identify Cloud Topics limitations and configurations that reduce cross-AZ networking costs +// tag::single-source[] + +ifndef::env-cloud[] +[NOTE] +==== +include::shared:partial$enterprise-license.adoc[] +==== +endif::[] + +Starting in v26.1, Redpanda provides glossterm:Cloud Topic[,Cloud Topics] to support multi-modal streaming workloads in the most cost-effective way possible: as a per-topic configuration running mixed latency workloads. While standard Redpanda +ifdef::env-cloud[] +xref:get-started:config-topics.adoc[topics] +endif::[] +ifndef::env-cloud[] +xref:develop:manage-topics/config-topics.adoc[topics] +endif::[] +that use local storage or Tiered Storage are ideal for latency-sensitive workloads (for example, for audit logs or analytics), Cloud Topics are optimized for latency-tolerant, high-throughput workloads where cross-AZ networking charges are a major consideration that can become the dominant cost driver at high throughput. These workloads can include observability streams, offline analytics, AI/ML model training data feeds, or development environments that have flexible latency requirements. + +Instead of replicating every byte across expensive network links, Cloud Topics leverage durable, inexpensive cloud storage (S3, ADLS, GCS, MinIO) as the primary mechanism to both replicate data and serve it to consumers. This eliminates over 90% of the cost of replicating data over network links in multi-AZ clusters. The end-to-end latency experienced when using Cloud Topics can range from 500 ms to as high as a few seconds with different object stores. Lower latencies may be achievable in certain environments, but Cloud Topics is optimized for throughput rather than low latency or tightly constrained tail latency. This latency profile is often acceptable for many streaming workloads, and can unlock new streaming use cases that previously were not cost effective. + +With Cloud Topics, data from the client is not acknowledged until it is uploaded to object storage. This maintains durability in the face of infrastructure failures, but results in an increase in both produce latency and end-to-end latency, driven by both batching of produced data and the inherent latency of the underlying object store. You should generally expect end-to-end latencies of 1-2 seconds with public cloud stores. + +== Prerequisites + +ifdef::env-cloud[] +- xref:manage:rpk/rpk-install.adoc[Install rpk] v26.1 or later. +endif::[] +ifndef::env-cloud[] +- xref:get-started:rpk-install.adoc[] v26.1 or later. +- xref:manage:tiered-storage.adoc#set-up-tiered-storage[Enable cloud storage] on your Redpanda cluster. ++ +[NOTE] +==== +If you plan to use Cloud Topics for all new topics in a Redpanda cluster, be sure to set the following cluster-level property: +[,bash] +---- +default_redpanda_storage_mode=cloud +---- +This ensures that newly-created Redpanda topics are Cloud Topics by default. +For details, see xref:manage:tiered-storage.adoc#enable-tiered-storage-for-a-cluster[Enable Tiered Storage for a cluster]. +==== + +- xref:manage:tiered-storage.adoc#configure-object-storage[Configure object storage]. +- Ensure that you have an Enterprise license. ++ +To check your license status, run: ++ +[,bash] +---- +rpk cluster license info +---- + +endif::[] + +== Limitations + +- Shadow links do not currently support Cloud Topics. +- After a Cloud Topic is created, it cannot be converted back to a standard Redpanda topic. Conversely, existing topics created as standard topics cannot be converted to Cloud Topics. + +== Enable Cloud Topics + +To enable Cloud Topics for a cluster: + +[,bash] +---- +rpk cluster config set cloud_topics_enabled=true +---- + +NOTE: This configuration update requires a restart to take effect. + + +After enabling Cloud Topics, you can proceed to create new Cloud Topics: + +[,bash] +---- +rpk topic create -c redpanda.storage.mode=cloud +---- + +[,console] +---- +TOPIC STATUS +audit.analytics.may2025 OK +---- + +You can make a topic a Cloud Topic only at topic creation time. + +In addition to replication, cross-AZ ingress (producer) and egress (consumer) traffic can also contribute substantially to cloud networking costs. When running multi-AZ clusters in general, Redpanda strongly recommends using xref:develop:consume-data/follower-fetching.adoc[Follower Fetching], which allows consumers to avoid crossing network zones. When possible, you can use xref:develop:produce-data/leader-pinning.adoc[leader pinning], which positions a topic's partition leader close to the producers, providing a similar benefit for ingress traffic. These features can add additional savings to the replication cost savings of Cloud Topics. + +// end::single-source[] diff --git a/modules/develop/pages/manage-topics/config-topics.adoc b/modules/develop/pages/manage-topics/config-topics.adoc new file mode 100644 index 0000000000..09d5c0b191 --- /dev/null +++ b/modules/develop/pages/manage-topics/config-topics.adoc @@ -0,0 +1,368 @@ += Manage Topics +:page-categories: Clients, Development +:description: Learn how to create topics, update topic configurations, and delete topics or records. +tag::single-source[] + +include::develop:partial$topic-defaults.adoc[] + +== Create a topic + +Creating a topic can be as simple as specifying a name for your topic on the command line. For example, to create a topic named `xyz`, run: + +[,bash] +---- +rpk topic create xyz +---- + +ifndef::env-cloud[] +This command creates a topic named `xyz` with one partition and one replica, because these are the default values set in the cluster configuration file. Replicas are copies of partitions that are distributed across different brokers, so if one broker goes down, other brokers still have a copy of the data. + +endif::[] + +ifdef::env-cloud[] +This command creates a topic named `xyz` with one partition and three replicas, because these are the default values set in the cluster configuration file. Replicas are copies of partitions that are distributed across different brokers, so if one broker goes down, other brokers still have a copy of the data. + +Redpanda Cloud supports 40,000 topics per cluster. + +endif::[] + +=== Choose the number of partitions + +A partition acts as a log file where topic data is written. Dividing topics into partitions allows producers to write messages in parallel and consumers to read messages in parallel. The higher the number of partitions, the greater the throughput. + +TIP: As a general rule, select a number of partitions that corresponds to the maximum number of consumers in any consumer group that will consume the data. + +For example, suppose you plan to create a consumer group with 10 consumers. To create topic `xyz` with 10 partitions, run: + +[,bash] +---- +rpk topic create xyz -p 10 +---- + +ifndef::env-cloud[] +[[choose-the-replication-factor]] +=== Choose the replication factor + +The default replication factor in the cluster configuration is set to 1. By choosing a replication factor greater than 1, you ensure that each partition has a copy of its data on at least one other broker. One replica acts as the leader, and the other replicas are followers. + +To specify a replication factor of 3 for topic `xyz`, run: + +[,bash] +---- +rpk topic create xyz -r 3 +---- + +NOTE: The replication factor must be an odd number. Redpanda Data recommends a replication factor of 3 for most use cases. Administrators may set a minimum required replication factor for any new topic in the cluster through the cluster-level xref:reference:cluster-properties.adoc#minimum_topic_replications[`minimum_topic_replications`] property. + +TIP: If you enable xref:manage:tiered-storage.adoc[Tiered Storage] on a topic, you can then use xref:manage:topic-recovery.adoc[topic recovery] to restore data for a deleted topic. + +endif::[] + +ifndef::env-cloud[] +[[choose-a-storage-mode]] +=== Choose a storage mode + +Starting in Redpanda v26.1, you can set the `redpanda.storage.mode` topic property to control how a topic stores data: + +[cols="1,3"] +|=== +| Value | Behavior + +| `unset` (default) +| Legacy behavior. Tiered Storage is controlled by the `redpanda.remote.read` and `redpanda.remote.write` topic properties and their cluster-level defaults (`cloud_storage_enable_remote_read` and `cloud_storage_enable_remote_write`). + +| `local` +| Topic data is stored only on the broker's local disk. Object storage upload is disabled for the topic, regardless of `redpanda.remote.read` and `redpanda.remote.write` values. + +| `tiered` +| Data is stored on local disk and uploaded to object storage. Enables xref:manage:tiered-storage.adoc[Tiered Storage] for the topic regardless of `redpanda.remote.read` and `redpanda.remote.write` values. + +| `cloud` +| Data is stored durably in object storage using the glossterm:Cloud Topic[,Cloud Topics] architecture. Local storage is used only as a write buffer. See xref:develop:manage-topics/cloud-topics.adoc[]. +|=== + +To set the storage mode at topic creation time: + +[,bash] +---- +rpk topic create -c redpanda.storage.mode=tiered +---- + +When `redpanda.storage.mode` is set to `local`, `tiered`, or `cloud`, the `redpanda.remote.read` and `redpanda.remote.write` topic properties have no effect on the topic. + +To apply a default storage mode to all new topics in a cluster, set the `default_redpanda_storage_mode` cluster property: + +[,bash] +---- +rpk cluster config set default_redpanda_storage_mode=tiered +---- + +To set `local` as the default storage mode for all new topics in a cluster: + +[,bash] +---- +rpk cluster config set default_redpanda_storage_mode=local +---- +If `default_redpanda_storage_mode` is not configured (the default), new topics use `unset` mode and Tiered Storage behavior is inherited from the cluster-level `cloud_storage_enable_remote_write` and `cloud_storage_enable_remote_read` properties. + +endif::[] + +== Update topic configurations + +After you create a topic, you can update the topic property settings for all new data written to it. For example, you can add partitions or change the cleanup policy. + +=== Add partitions + +You can assign a certain number of partitions when you create a topic, and add partitions later. For example, suppose you add brokers to your cluster, and you want to take advantage of the additional processing power. To increase the number of partitions for existing topics, run: + +[,bash] +---- +rpk topic add-partitions [TOPICS...] --num [#] +---- + +Note that `--num <#>` is the number of partitions to _add_, not the total number of partitions. + +include::develop:partial$balance-existing-topic-redistribution.adoc[] + +ifndef::env-cloud[] +[[change-the-replication-factor]] +=== Change the replication factor + +Suppose you create a topic with the default replication factor of 1 (which is specified in the cluster properties configuration file). Now you want to change the replication factor to 3, so you can have two backups of topic data in case a broker goes down. To set the replication factor to 3, run: + +[,bash] +---- +rpk topic alter-config [TOPICS...] --set replication.factor=3 +---- + +NOTE: The replication factor can't exceed the number of Redpanda brokers. If you try to set a replication factor greater than the number of brokers, the request is rejected. + +endif::[] + +ifndef::env-cloud[] +[[change-the-storage-mode]] +=== Change the storage mode + +You can change a topic's `redpanda.storage.mode` after creation, with the following restrictions: + +[cols="1,1,1,3"] +|=== +| From | To | Permitted | Notes + +| `local` +| `tiered` +| Yes +| Enables Tiered Storage for the topic. Object storage must be configured. + +| `tiered` +| `local` +| With caution +| Disables object storage uploads. Redpanda strongly recommends against repeatedly toggling this setting, as it can result in data gaps in Tiered Storage. + +| Any +| `cloud` +| No +| Cloud Topics can only be set at topic creation time. + +| `cloud` +| Any +| No +| A Cloud Topic cannot be converted to a local or Tiered Storage topic. +|=== + +For example, to transition an existing local topic to Tiered Storage: + +[,bash] +---- +rpk topic alter-config --set redpanda.storage.mode=tiered +---- + +endif::[] + + +=== Change the cleanup policy + +The cleanup policy determines how to clean up the partition log files when they reach a certain size: + +* `delete` deletes data based on age or log size. Topics retain all records until then. +* `compact` compacts the data by only keeping the latest values for each KEY. +* `compact,delete` combines both methods. + +Unlike compacted topics, which keep only the most recent message for a given key, topics configured with a `delete` cleanup policy provide a running history of all changes for those topics. + +include::develop:partial$topic-properties-warning.adoc[] + +For example, to change a topic's policy to `compact`, run: + +[,bash] +---- +rpk topic alter-config [TOPICS…] —-set cleanup.policy=compact +---- + +ifndef::env-cloud[] +For details on compaction in Redpanda, see xref:manage:cluster-maintenance/compaction-settings.adoc[Compaction settings]. + +endif::[] + +[[configure-write-caching]] +=== Configure write caching + +Write caching is a relaxed mode of xref:develop:produce-data/configure-producers.adoc#acksall[`acks=all`] that provides better performance at the expense of durability. It acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. This provides lower latency while still ensuring that a majority of brokers acknowledge the write. + +Write caching applies to user topics. It does not apply to transactions or consumer offsets: data written in the context of a transaction and consumer offset commits is always written to disk and fsynced before being acknowledged to the client. + +ifndef::env-cloud[] +NOTE: For clusters in xref:reference:rpk/rpk-redpanda/rpk-redpanda-mode.adoc#development-mode[development mode], write caching is enabled by default. For clusters in production mode, it is disabled by default. + +endif::[] + +Only enable write caching on workloads that can tolerate some data loss in the case of multiple, simultaneous broker failures. Leaving write caching disabled safeguards your data against complete data center or availability zone failures. + +ifndef::env-cloud[] + +==== Configure at cluster level + +To enable write caching by default in all user topics, set the cluster-level property xref:reference:cluster-properties.adoc#write_caching_default[`write_caching_default`]: + +`rpk cluster config set write_caching_default=true` + +With `write_caching_default` set to true at the cluster level, Redpanda fsyncs to disk according to xref:reference:cluster-properties.adoc#raft_replica_max_pending_flush_bytes[`raft_replica_max_pending_flush_bytes`] and xref:reference:cluster-properties.adoc#raft_replica_max_flush_delay_ms[`raft_replica_max_flush_delay_ms`], whichever is reached first. + +endif::[] + +==== Configure at topic level + +To override the cluster-level setting at the topic level, set the topic-level property `write.caching`: + +`rpk topic alter-config my_topic --set write.caching=true` + +With `write.caching` enabled at the topic level, Redpanda fsyncs to disk according to `flush.ms` and `flush.bytes`, whichever is reached first. + +=== Remove a configuration setting + +You can remove a configuration that overrides the default setting, and the setting will use the default value again. For example, suppose you altered the cleanup policy to use `compact` instead of the default, `delete`. Now you want to return the policy setting to the default. To remove the configuration setting `cleanup.policy=compact`, run `rpk topic alter-config` with the `--delete` flag: + +[,bash] +---- +rpk topic alter-config [TOPICS...] --delete cleanup.policy +---- + +== List topic configuration settings + +To display all the configuration settings for a topic, run: + +[,bash] +---- +rpk topic describe -c +---- + +The `-c` flag limits the command output to just the topic configurations. This command is useful for checking the default configuration settings before you make any changes and for verifying changes after you make them. + +The following command output displays after running `rpk topic describe test-topic`, where `test-topic` was created with default settings: + +ifndef::env-cloud[] +[,bash] +---- +rpk topic describe test_topic +SUMMARY +======= +NAME test_topic +PARTITIONS 1 +REPLICAS 1 + +CONFIGS +======= +KEY VALUE SOURCE +cleanup.policy delete DYNAMIC_TOPIC_CONFIG +compression.type producer DEFAULT_CONFIG +max.message.bytes 1048576 DEFAULT_CONFIG +message.timestamp.type CreateTime DEFAULT_CONFIG +redpanda.datapolicy function_name: script_name: DEFAULT_CONFIG +redpanda.remote.delete true DEFAULT_CONFIG +redpanda.remote.read false DEFAULT_CONFIG +redpanda.remote.write false DEFAULT_CONFIG +redpanda.storage.mode unset DEFAULT_CONFIG +retention.bytes -1 DEFAULT_CONFIG +retention.local.target.bytes -1 DEFAULT_CONFIG +retention.local.target.ms 86400000 DEFAULT_CONFIG +retention.ms 604800000 DEFAULT_CONFIG +segment.bytes 1073741824 DEFAULT_CONFIG +---- + +Suppose you add two partitions, and increase the number of replicas to 3. The new command output confirms the changes in the `SUMMARY` section: + +[.no-copy] +---- +SUMMARY +======= +NAME test_topic +PARTITIONS 3 +REPLICAS 3 +---- + +endif::[] + +ifdef::env-cloud[] +[,bash] +---- +rpk topic describe test_topic +SUMMARY +======= +NAME test_topic +PARTITIONS 1 +REPLICAS 3 + +CONFIGS +======= +KEY VALUE SOURCE +cleanup.policy delete DYNAMIC_TOPIC_CONFIG +compression.type producer DEFAULT_CONFIG +max.message.bytes 20971520 DEFAULT_CONFIG +message.timestamp.type CreateTime DEFAULT_CONFIG +redpanda.datapolicy function_name: script_name: DEFAULT_CONFIG +redpanda.remote.delete true DEFAULT_CONFIG +redpanda.remote.read false DEFAULT_CONFIG +redpanda.remote.write false DEFAULT_CONFIG +retention.bytes -1 DEFAULT_CONFIG +retention.local.target.bytes -1 DEFAULT_CONFIG +retention.local.target.ms 86400000 DEFAULT_CONFIG +retention.ms 604800000 DEFAULT_CONFIG +segment.bytes 1073741824 DEFAULT_CONFIG +---- + +endif::[] + +== Delete a topic + +To delete a topic, run: + +[,bash] +---- +rpk topic delete +---- + +When a topic is deleted, its underlying data is deleted, too. + +To delete multiple topics at a time, provide a space-separated list. For example, to delete two topics named `topic1` and `topic2`, run: + +[,bash] +---- +rpk topic delete topic1 topic2 +---- + +You can also use the `-r` flag to specify one or more regular expressions; then, any topic names that match the pattern you specify are deleted. For example, to delete topics with names that start with "`f`" and end with "`r`", run: + +[,bash] +---- +rpk topic delete -r '^f.*' '.*r$' +---- + +Note that the first regular expression must start with the `^` symbol, and the last expression must end with the `$` symbol. This requirement helps prevent accidental deletions. + +include::develop:partial$delete-topic-records.adoc[] + +== Next steps + +xref:develop:produce-data/configure-producers.adoc[] + +end::single-source[] diff --git a/modules/develop/pages/manage-topics/index.adoc b/modules/develop/pages/manage-topics/index.adoc new file mode 100644 index 0000000000..d1eb45c4f9 --- /dev/null +++ b/modules/develop/pages/manage-topics/index.adoc @@ -0,0 +1,4 @@ += Topics +:page-categories: Clients, Development +:description: Learn how to manage topics in Redpanda, including creation, configuration, and advanced features. +:page-layout: index diff --git a/modules/develop/pages/produce-data/configure-producers.adoc b/modules/develop/pages/produce-data/configure-producers.adoc index 18de11b961..77a9bc4a3a 100644 --- a/modules/develop/pages/produce-data/configure-producers.adoc +++ b/modules/develop/pages/produce-data/configure-producers.adoc @@ -74,7 +74,7 @@ Kafka, a message is considered acknowledged without the requirement that it has been fsynced. Messages that have not been fsynced to disk may be lost in the event of a broker crash. So when using `acks=all`, the Redpanda default configuration is more resilient than Kafka's. You can also consider -using xref:develop:config-topics.adoc#configure-write-caching[write caching], which is a relaxed mode of `acks=all` that acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to fsync to disk. This provides lower latency while still ensuring that a majority of brokers acknowledge the write. +using xref:develop:manage-topics/config-topics.adoc#configure-write-caching[write caching], which is a relaxed mode of `acks=all` that acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to fsync to disk. This provides lower latency while still ensuring that a majority of brokers acknowledge the write. endif::[] diff --git a/modules/develop/pages/produce-data/leader-pinning.adoc b/modules/develop/pages/produce-data/leader-pinning.adoc index 2d88adab35..74cb3a0da8 100644 --- a/modules/develop/pages/produce-data/leader-pinning.adoc +++ b/modules/develop/pages/produce-data/leader-pinning.adoc @@ -1,10 +1,22 @@ -= Leader Pinning -:description: Learn about leader pinning and how to configure a preferred partition leader location based on cloud availability zones or regions. += Configure Leader Pinning +:description: Learn about Leader Pinning and how to configure a preferred partition leader location based on cloud availability zones or regions. +:page-topic-type: how-to +:personas: streaming_developer, platform_admin // tag::single-source[] +:learning-objective-1: Configure preferred partition leader placement using rack labels +:learning-objective-2: Configure ordered rack preference for priority-based leader failover +:learning-objective-3: Identify conditions where Leader Pinning cannot place leaders in preferred racks -Produce requests that write data to Redpanda topics go through the topic partition leader, which syncs messages across its follower replicas. For a Redpanda cluster deployed across multiple availability zones (AZs), leader pinning ensures that a topic's partition leaders are geographically closer to clients, which helps decrease networking costs and guarantees lower latency. +Produce requests that write data to Redpanda topics are routed through the topic partition leader, which syncs messages across its follower replicas. For a Redpanda cluster deployed across multiple availability zones (AZs), Leader Pinning ensures that a topic's partition leaders are geographically closer to clients, which helps decrease networking costs and guarantees lower latency. + +If consumers are located in the same preferred region or AZ for Leader Pinning, and you have not set up xref:develop:consume-data/follower-fetching.adoc[follower fetching], Leader Pinning can also help reduce networking costs on consume requests. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} -If consumers are located in the same preferred region or AZ for leader pinning, and you have not set up xref:develop:consume-data/follower-fetching.adoc[follower fetching], leader pinning can also help reduce networking costs on consume requests. ifndef::env-cloud[] == Prerequisites @@ -14,64 +26,160 @@ ifndef::env-cloud[] include::shared:partial$enterprise-license.adoc[] ==== -Before you can enable leader pinning, you must xref:manage:rack-awareness.adoc#configure-rack-awareness[configure rack awareness] on the cluster. If the config_ref:enable_rack_awareness,true,properties/cluster-properties[] cluster configuration property is set to `false`, leader pinning is disabled across the cluster. +Before you can enable Leader Pinning, you must xref:manage:rack-awareness.adoc#configure-rack-awareness[configure rack awareness] on the cluster. If the config_ref:enable_rack_awareness,true,properties/cluster-properties[] cluster configuration property is set to `false`, Leader Pinning is disabled across the cluster. endif::[] ifndef::env-cloud[] -== Configure leader pinning - -You can use both a topic configuration property and a cluster configuration property to configure leader pinning. +== Set leader rack preferences -You can set the topic configuration property for individual topics only, or set the cluster-wide configuration property that will enable leader pinning by default for all topics. You can also use a combination in which a default setting applies across the cluster, and you toggle the setting on or off for specific topics. +You can configure Leader Pinning at the topic level, the cluster level, or both. Set the topic configuration property to configure individual topics, or set the cluster configuration property to apply a default for all topics. You can also combine both: apply a cluster-wide default, then override specific topics with the topic property. This configuration is based on the following scenario: you have Redpanda deployed in a multi-AZ or multi-region cluster, and you have configured each broker so that the config_ref:rack,true,properties/broker-properties[] configuration property contains racks corresponding to the AZs: -* Set the topic configuration property xref:reference:properties/topic-properties.adoc#redpandaleaderspreference[`redpanda.leaders.preference`]. The property accepts the following string values: +* Set the topic configuration property xref:reference:properties/topic-properties.adoc#redpanda-leaders-preference[`redpanda.leaders.preference`]. This property accepts the following string values: + -- -** `none`: Opt out the topic from leader pinning. +** `none`: Disable Leader Pinning for the topic. ** `racks:[,,...]`: Specify the preferred location (rack) of all topic partition leaders. The list can contain one or more racks, and you can list the racks in any order. Spaces in the list are ignored, for example: `racks:rack1,rack2` and `racks: rack1, rack2` are equivalent. You cannot specify empty racks, for example: `racks: rack1,,rack2`. If you specify multiple racks, Redpanda tries to distribute the partition leader locations equally across brokers in these racks. +** `ordered_racks:[,,...]`: Supported in Redpanda v26.1 or later. Specify the preferred racks in priority order. Redpanda places leaders in the first listed rack when available, failing over to each subsequent rack when higher-priority racks are unavailable. If all listed racks are unavailable, leaders fall back to any other available brokers. Brokers with no rack assignment are treated as lowest priority. + -To find the rack identifier, run `rpk cluster info`. +Use `ordered_racks` for multi-region deployments with a primary region for leaders and explicit failover to a disaster recovery site. + +The xref:reference:properties/topic-properties.adoc#redpanda-leaders-preference[`redpanda.leaders.preference`] property inherits the default value from the cluster property `default_leaders_preference`. + +To find the rack identifiers of all brokers, run: + +[,bash] +---- +rpk cluster info +---- + +.Expected output +[,bash,role="no-copy"] +---- +CLUSTER +======= +redpanda.be267958-279d-49cd-ae86-98fc7ed2de48 + +BROKERS +======= +ID HOST PORT RACK +0* 54.70.51.189 9092 us-west-2a +1 35.93.178.18 9092 us-west-2b +2 35.91.121.126 9092 us-west-2c +---- + +To set the topic property: + +[,bash] +---- +rpk topic alter-config --set redpanda.leaders.preference=ordered_racks:, +---- -- -+ -This property inherits the default value from the cluster property `default_leaders_preference`. -* Set the cluster configuration property config_ref:default_leaders_preference,true,properties/cluster-properties[], which specifies the default leader pinning configuration for all topics that don’t have `redpanda.leaders.preference` explicitly set. It accepts values in the same format as `redpanda.leaders.preference`. Default: `none` +* Set the cluster configuration property config_ref:default_leaders_preference,true,properties/cluster-properties[], which specifies the default Leader Pinning configuration for all topics that don’t have `redpanda.leaders.preference` explicitly set. It accepts values in the same format as `redpanda.leaders.preference`, where the default is `none`. + This property also affects internal topics, such as `__consumer_offsets` and transaction coordinators. All offset tracking and transaction coordination requests get placed within the preferred regions or AZs for all clients, so you see end-to-end latency and networking cost benefits. ++ +To set the cluster property: ++ +[,bash] +---- +rpk cluster config set default_leaders_preference ordered_racks:, +---- -If there is more than one broker in the preferred AZ (or AZs), leader pinning distributes partition leaders uniformly across brokers in the AZ. +If there is more than one broker in the preferred AZ (or AZs), Leader Pinning distributes partition leaders uniformly across brokers in the AZ. endif::[] ifdef::env-cloud[] -== Configure leader pinning +== Set leader rack preferences -Configure leader pinning if you have Redpanda deployed in a multi-AZ or multi-region cluster and your ingress is concentrated in a particular AZ or region. +Configure Leader Pinning if you have Redpanda deployed in a multi-AZ or multi-region cluster and your ingress is concentrated in a particular AZ or region. -Use the topic configuration property `redpanda.leaders.preference` to configure leader pinning for individual topics. The property accepts the following string values: +Use the topic configuration property `redpanda.leaders.preference` to configure Leader Pinning for individual topics. The property accepts the following string values: -** `none`: Opt out the topic from leader pinning. +** `none`: Disable Leader Pinning for the topic. ** `racks:[,,...]`: Specify the preferred location (rack) of all topic partition leaders. The list can contain one or more racks, and you can list the racks in any order. Spaces in the list are ignored, for example: `racks:rack1,rack2` and `racks: rack1, rack2` are equivalent. You cannot specify empty racks, for example: `racks: rack1,,rack2`. If you specify multiple racks, Redpanda tries to distribute the partition leader locations equally across brokers in these racks. -+ -To find the rack identifier, run `rpk cluster info`. +** `ordered_racks:[,,...]`: Supported in Redpanda v26.1 or later. Specify the preferred racks in priority order. Redpanda places leaders in the first listed rack when available, failing over to each subsequent rack when higher-priority racks are unavailable. If all listed racks are unavailable, leaders fall back to any other available brokers. Brokers with no rack assignment are treated as lowest priority. + +To find the rack identifiers of all brokers, run: + +[,bash] +---- +rpk cluster info +---- + +.Expected output +[,bash,role="no-copy"] +---- +CLUSTER +======= +redpanda.be267958-279d-49cd-ae86-98fc7ed2de48 + +BROKERS +======= +ID HOST PORT RACK +0* 54.70.51.189 9092 us-west-2a +1 35.93.178.18 9092 us-west-2b +2 35.91.121.126 9092 us-west-2c +---- + +To set the topic property: + +[,bash] +---- +rpk topic alter-config --set redpanda.leaders.preference=ordered_racks:, +---- + +If there is more than one broker in the preferred AZ (or AZs), Leader Pinning distributes partition leaders uniformly across brokers in the AZ. -If there is more than one broker in the preferred AZ (or AZs), leader pinning distributes partition leaders uniformly across brokers in the AZ. +endif::[] + +== Limitations + +Leader Pinning controls which replica is elected as leader, and does not move replicas to different brokers. If all of a topic's replicas are on brokers in non-preferred racks, no replica exists in the preferred racks to elect as leader, and Redpanda may elect a non-preferred leader indefinitely. + +For example, consider a cluster deployed across four racks (A, B, C, D) with Leader Pinning configured as `ordered_racks:A,B,C,D`. With a replication factor of 3, rack awareness can only place replicas in three of the four racks. If the highest-priority rack (A) does not receive a replica, no replica exists there to elect as leader, and Redpanda may elect a non-preferred leader indefinitely. + +ifndef::env-cloud[] +To prevent this scenario: + +* Enable config_ref:enable_rack_awareness,true,properties/cluster-properties[`enable_rack_awareness`] to distribute replicas across racks automatically. +* Ensure the topic's replication factor at least equals the total number of racks in the cluster, so every rack, including the highest-priority rack, receives a replica. endif::[] +ifdef::env-cloud[] +To prevent this scenario, ensure the topic's replication factor at least equals the total number of racks in the cluster, so every rack, including the highest-priority rack, receives a replica. + +endif::[] + +== Leader Pinning failover across availability zones + +If there are three AZs: A, B, and C, and A becomes unavailable, the failover behavior with `racks` is as follows: + +* The topic with `A` as the preferred leader AZ will have its partition leaders uniformly distributed across B and C. +* The topic with `A,B` as the preferred leader AZs will have its partition leaders in B. +* The topic with `B` as the preferred leader AZ will have its partition leaders in B as well. + +=== Failover with ordered rack preference + +With `ordered_racks`, the failover order follows the configured priority list. Leaders move to the next available rack in the list when higher-priority racks become unavailable. -== Leader pinning failover across availability zones +For a topic configured with `ordered_racks:A,B,C`: -If there are three AZs: A, B, and C, and A becomes unavailable, the failover behavior is as follows: +* The topic with `A` as the first-priority rack will have its partition leaders in A. +* If A becomes unavailable, leaders move to B. +* If A and B become unavailable, leaders move to C. +* If A, B, and C all become unavailable, leaders fall back to any available brokers. -* A topic with "A" as the preferred leader AZ will have its partition leaders uniformly distributed across B and C. -* A topic with "A,B" as the preferred leader AZs will have its partition leaders in B. -* A topic with “B” as the preferred leader AZ will have its partition leaders in B as well. +If a higher-priority rack recovers and the topic's replication factor ensures that rack receives a replica, Redpanda automatically moves leaders back to the highest available preferred rack. == Suggested reading +// TODO: Add link to Cloud Topics +// * For latency-tolerant, high-throughput workloads where cross-AZ networking charges are a major cost driver, also consider xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics] * xref:develop:consume-data/follower-fetching.adoc[] // end::single-source[] \ No newline at end of file diff --git a/modules/get-started/pages/quick-start.adoc b/modules/get-started/pages/quick-start.adoc index 5b0bdb92ea..5173dcd955 100644 --- a/modules/get-started/pages/quick-start.adoc +++ b/modules/get-started/pages/quick-start.adoc @@ -445,7 +445,7 @@ docker exec -it redpanda-0 rpk topic create chat-room \ -X pass=secretpassword ``` + -<1> Set a replication factor of 3 to replicate the topic across all 3 brokers. This replication factor provides high availability and data durability. For more details, see xref:develop:config-topics.adoc#choose-the-replication-factor[Choose the replication factor]. +<1> Set a replication factor of 3 to replicate the topic across all 3 brokers. This replication factor provides high availability and data durability. For more details, see xref:develop:manage-topics/config-topics.adoc#choose-the-replication-factor[Choose the replication factor]. <2> Enable remote reads for this topic to read offloaded records from object storage. <3> Enable remote writes for this topic to offload older records to object storage. For more details, see xref:manage:tiered-storage.adoc[]. + diff --git a/modules/get-started/pages/release-notes/helm-charts.adoc b/modules/get-started/pages/release-notes/helm-charts.adoc index 9aa030bc00..dd7c48ee12 100644 --- a/modules/get-started/pages/release-notes/helm-charts.adoc +++ b/modules/get-started/pages/release-notes/helm-charts.adoc @@ -12,6 +12,18 @@ See also: * xref:upgrade:k-compatibility.adoc[] * xref:upgrade:k-rolling-upgrade.adoc[] -== Redpanda chart v25.3.x +== Console chart v26.1.x -link:https://github.com/redpanda-data/redpanda-operator/blob/release/v25.3.x/charts/redpanda/CHANGELOG.md[Changelog^]. +link:https://github.com/redpanda-data/redpanda-operator/blob/release/v26.1.x/charts/console/CHANGELOG.md[Changelog^]. + +=== Prometheus ServiceMonitor + +The Console Helm chart now supports deploying a Prometheus `ServiceMonitor` using `monitoring.enabled`, `monitoring.scrapeInterval`, and `monitoring.labels`. See xref:deploy:console/kubernetes/deploy.adoc#prometheus-servicemonitor[Prometheus ServiceMonitor]. + +== Redpanda chart v26.1.x + +link:https://github.com/redpanda-data/redpanda-operator/blob/release/v26.1.x/charts/redpanda/CHANGELOG.md[Changelog^]. + +=== Config-watcher sidecar resource configuration + +You can now configure explicit CPU and memory resource requests and limits for the config-watcher sidecar using `statefulset.sideCars.configWatcher.resources`. This is required in namespaces that enforce LimitRange or ResourceQuota policies. See xref:manage:kubernetes/k-manage-resources.adoc#config-watcher[Configure config-watcher sidecar resources]. \ No newline at end of file diff --git a/modules/get-started/pages/release-notes/operator.adoc b/modules/get-started/pages/release-notes/operator.adoc index 294cd53cfa..cc8c6b9902 100644 --- a/modules/get-started/pages/release-notes/operator.adoc +++ b/modules/get-started/pages/release-notes/operator.adoc @@ -10,16 +10,10 @@ See also: * xref:upgrade:k-rolling-upgrade.adoc[] -== Redpanda Operator v25.3.x +== Redpanda Operator v26.1.x -link:https://github.com/redpanda-data/redpanda-operator/blob/release/v25.3.x/operator/CHANGELOG.md[Changelog^] +link:https://github.com/redpanda-data/redpanda-operator/blob/release/v26.1.x/operator/CHANGELOG.md[Changelog^] -=== ShadowLink resource for disaster recovery +=== Prometheus ServiceMonitor for Console -Redpanda Operator v25.3.x introduces the ShadowLink custom resource for managing shadow links in Kubernetes. The ShadowLink resource allows you to declaratively configure and manage disaster recovery replication between Redpanda clusters. - -* **Declarative configuration**: Define shadow links as Kubernetes resources with full lifecycle management. -* **Status monitoring**: View shadow link health and replication status directly from Kubernetes. -* **Integrated failover**: Delete the ShadowLink resource to fail over all topics. - -See xref:manage:kubernetes/shadowing/k-shadow-linking.adoc[Shadow Linking in Kubernetes] for setup and xref:manage:kubernetes/shadowing/k-monitor-shadowing.adoc[monitoring] documentation. \ No newline at end of file +The Console custom resource supports a `monitoring` configuration that deploys a Prometheus `ServiceMonitor` to automatically discover and scrape Console metrics. See xref:deploy:console/kubernetes/deploy.adoc#prometheus-servicemonitor[Prometheus ServiceMonitor]. \ No newline at end of file diff --git a/modules/get-started/pages/release-notes/redpanda.adoc b/modules/get-started/pages/release-notes/redpanda.adoc index 86dfaf23f8..58b63099fb 100644 --- a/modules/get-started/pages/release-notes/redpanda.adoc +++ b/modules/get-started/pages/release-notes/redpanda.adoc @@ -7,173 +7,57 @@ This topic includes new content added in version {page-component-version}. For a * xref:redpanda-cloud:get-started:whats-new-cloud.adoc[] * xref:redpanda-cloud:get-started:cloud-overview.adoc#redpanda-cloud-vs-self-managed-feature-compatibility[Redpanda Cloud vs Self-Managed feature compatibility] -NOTE: Redpanda v25.3 introduces breaking schema changes for Iceberg topics. If you are using Iceberg topics and want to retain the data in the corresponding Iceberg tables, review xref:upgrade:iceberg-schema-changes-and-migration-guide.adoc[] before upgrading your cluster, and follow the required migration steps to avoid sending new records to a dead-letter queue table. -== Iceberg topics with GCP BigLake +== Cloud Topics -A new xref:manage:iceberg/iceberg-topics-gcp-biglake.adoc[REST catalog integration] with Google Cloud BigLake allows you to add Redpanda topics as Iceberg tables in your data lakehouse. +xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics] are now available, making it possible to use durable cloud storage (S3, ADLS, GCS) as the primary backing store instead of local disk, eliminating over 90% of cross-AZ replication costs. This makes them ideal for latency-tolerant, high-throughput workloads such as observability streams, analytics pipelines, and AI/ML training data feeds, where cross-AZ networking charges are the dominant cost driver. -See xref:manage:iceberg/use-iceberg-catalogs.adoc[] for details on configuring Iceberg REST catalog integrations with Redpanda. +You can use Cloud Topics exclusively in Redpanda Streaming clusters, or in combination with traditional Tiered Storage and local storage topics on a shared cluster supporting low latency workloads. -== Shadowing +Cloud Topics require Tiered Storage and an Enterprise license. For setup instructions and limitations, see xref:develop:manage-topics/cloud-topics.adoc[]. -Redpanda v25.3 introduces xref:deploy:redpanda/manual/disaster-recovery/shadowing/index.adoc[], an enterprise-licensed disaster recovery solution that provides asynchronous, offset-preserving replication between distinct Redpanda clusters. Shadowing enables cross-region data protection by replicating topic data, configurations, consumer group offsets, ACLs, and Schema Registry data with byte-level fidelity. +== Group-based access control (GBAC) -The shadow cluster operates in read-only mode while continuously receiving updates from the source cluster. During a disaster, you can failover individual topics or an entire shadow link to make resources fully writable for production traffic. See xref:deploy:redpanda/manual/disaster-recovery/shadowing/failover-runbook.adoc[] for emergency procedures. +Redpanda {page-component-version} introduces xref:manage:security/authorization/gbac.adoc[group-based access control (GBAC)], which extends OIDC authentication to support group-based permissions. In addition to assigning roles or ACLs to individual users, you can assign them to OIDC groups. Users inherit permissions from all groups reported by their identity provider (IdP) in the OIDC token claims. -Shadowing includes comprehensive metrics for monitoring replication health. See xref:manage:disaster-recovery/shadowing/monitor.adoc[] and xref:reference:public-metrics-reference.adoc#shadow-link-metrics[Shadow Link metrics reference]. +GBAC supports two authorization patterns: -== Connected client monitoring +* Assign a group as a member of an RBAC role so that all users in the group inherit the role's ACLs. +* Create ACLs directly with a `Group:` principal. -You can view details about Kafka client connections using `rpk` or the Admin API ListKafkaConnections endpoint. This allows you to view detailed information about active client connections on a cluster, and identify and troubleshoot problematic clients. For more information, see the xref:manage:cluster-maintenance/manage-throughput.adoc#view-connected-client-details[connected client details] example in the Manage Throughput guide. +Group membership is managed entirely by your IdP. Redpanda reads group information from the OIDC token at authentication time and works across the Kafka API, Schema Registry, and HTTP Proxy. -== New Admin API style +== FIPS 140-3 validation and FIPS Docker image -Redpanda v25.3 introduces a new API style for the Admin API, powered by https://connectrpc.com/docs/introduction[ConnectRPC]. New Redpanda features and operations in v25.3 are available as ConnectRPC services, allowing you to use autogenerated Protobuf clients in addition to using HTTP clients such as `curl`. +Redpanda's cryptographic module has been upgraded from FIPS 140-2 to https://csrc.nist.gov/pubs/fips/140-3/final[FIPS 140-3^] validation. Additionally, Redpanda now provides a FIPS-specific Docker image (`docker.redpanda.com/redpandadata/redpanda:-fips`) for `amd64` and `arm64` architectures, with the required OpenSSL FIPS module pre-configured. -Use the new ConnectRPC endpoints with the following v25.3 features: +NOTE: If you are upgrading with FIPS mode enabled, ensure all SASL/SCRAM user passwords are at least 14 characters before upgrading. FIPS 140-3 enforces stricter HMAC key size requirements. -* Shadowing -* Connected client monitoring +See xref:manage:security/fips-compliance.adoc[] for configuration details. -Existing Admin API endpoints from versions earlier than 25.3 remain supported, and you can continue to use them as usual. See xref:manage:use-admin-api.adoc[Manage Redpanda with the Admin API] to learn more about Admin API, and the link:/api/doc/admin/v2/[Admin API reference] to view the new endpoints. +== Iceberg: Expanded JSON Schema support -== Schema Registry import mode +Redpanda now supports additional JSON Schema patterns when translating to Iceberg tables: -Redpanda Schema Registry now supports an import mode that allows you to import existing schemas and retain their current IDs and version numbers. Import mode is useful when migrating from another schema registry. +* `$ref` support: Internal references using `$ref` (for example, `"$ref": "#/definitions/myType"`) are resolved from schema resources declared in the same document. External references are not yet supported. +* Map type from `additionalProperties`: `additionalProperties` objects that contain subschemas now translate to Iceberg `map`. +* `oneOf` nullable pattern: The `oneOf` keyword is now supported for the standard nullable pattern if exactly one branch is `{"type":"null"}` and the other is a non-null schema. -Starting with this release, import mode must be used when importing schemas. Read-write mode no longer allows specifying a schema ID and version when registering a schema. -See xref:manage:schema-reg/schema-reg-api.adoc#set-schema-registry-mode[Use the Schema Registry API]. +See xref:manage:iceberg/specify-iceberg-schema.adoc#how-iceberg-modes-translate-to-table-format[Specify Iceberg Schema] for JSON types mapping and updated requirements. -== Security report +== Ordered rack preference for Leader Pinning -You can now generate a security report for your Redpanda cluster using the link:/api/doc/admin/operation/operation-get_security_report[`/v1/security/report`] Admin API endpoint. The report provides detailed information about TLS configuration, authentication methods, authorization status, and security alerts across all Redpanda interfaces (Kafka, RPC, Admin, Schema Registry, HTTP Proxy). +xref:develop:produce-data/leader-pinning.adoc[Leader Pinning] now supports the `ordered_racks` configuration value, which lets you specify preferred racks in priority order. Unlike `racks`, which distributes leaders uniformly across all listed racks, `ordered_racks` places leaders in the highest-priority available rack and fails over to subsequent racks only when higher-priority racks become unavailable. -== Topic identifiers +== User-based throughput quotas -Redpanda v25.3 implements topic identifiers using 16 byte UUIDs as proposed in https://cwiki.apache.org/confluence/display/KAFKA/KIP-516%3A+Topic+Identifiers[KIP-516^]. +Redpanda now supports throughput quotas based on authenticated user principals. Unlike client-based quotas (which rely on self-declared `client-id` values), user-based quotas enforce limits using verified identities from SASL, mTLS, or OIDC authentication. -== Shadowing metrics - -Redpanda v25.3 introduces comprehensive xref:reference:public-metrics-reference.adoc#shadow-link-metrics[Shadowing metrics] for monitoring disaster recovery replication: - -* xref:reference:public-metrics-reference.adoc#redpanda_shadow_link_client_errors[`redpanda_shadow_link_client_errors`] - Track Kafka client errors during shadow link operations -* xref:reference:public-metrics-reference.adoc#redpanda_shadow_link_shadow_lag[`redpanda_shadow_link_shadow_lag`] - Monitor replication lag between source and shadow partitions -* xref:reference:public-metrics-reference.adoc#redpanda_shadow_link_shadow_topic_state[`redpanda_shadow_link_shadow_topic_state`] - Track shadow topic state distribution across links -* xref:reference:public-metrics-reference.adoc#redpanda_shadow_link_total_bytes_fetched[`redpanda_shadow_link_total_bytes_fetched`] - Monitor data transfer volume from source cluster -* xref:reference:public-metrics-reference.adoc#redpanda_shadow_link_total_bytes_written[`redpanda_shadow_link_total_bytes_written`] - Track data written to shadow cluster -* xref:reference:public-metrics-reference.adoc#redpanda_shadow_link_total_records_fetched[`redpanda_shadow_link_total_records_fetched`] - Monitor total records fetched from source cluster -* xref:reference:public-metrics-reference.adoc#redpanda_shadow_link_total_records_written[`redpanda_shadow_link_total_records_written`] - Track total messages written to shadow cluster - -For monitoring guidance and alert recommendations, see xref:manage:disaster-recovery/shadowing/monitor.adoc[]. - -== New commands - -Redpanda v25.3 introduces the following xref:reference:rpk/rpk-shadow/rpk-shadow.adoc[`rpk shadow`] commands for managing Redpanda shadow links: - -* xref:reference:rpk/rpk-shadow/rpk-shadow-config-generate.adoc[`rpk shadow config generate`] - Generate configuration files for shadow links -* xref:reference:rpk/rpk-shadow/rpk-shadow-create.adoc[`rpk shadow create`] - Create new shadow links -* xref:reference:rpk/rpk-shadow/rpk-shadow-update.adoc[`rpk shadow update`] - Update existing shadow link configurations -* xref:reference:rpk/rpk-shadow/rpk-shadow-list.adoc[`rpk shadow list`] - List all shadow links -* xref:reference:rpk/rpk-shadow/rpk-shadow-describe.adoc[`rpk shadow describe`] - View shadow link configuration details -* xref:reference:rpk/rpk-shadow/rpk-shadow-status.adoc[`rpk shadow status`] - Monitor shadow link replication status -* xref:reference:rpk/rpk-shadow/rpk-shadow-failover.adoc[`rpk shadow failover`] - Perform emergency failover operations -* xref:reference:rpk/rpk-shadow/rpk-shadow-delete.adoc[`rpk shadow delete`] - Delete shadow links - -In addition, the following commands have been added: - -* xref:reference:rpk/rpk-cluster/rpk-cluster-connections.adoc[`rpk cluster connections`] - Monitor cluster connections and client statistics. -* xref:reference:rpk/rpk-redpanda/rpk-redpanda-config-print.adoc[`rpk redpanda config print`] - Display node configuration. +You can set quotas for individual users, default users, or fine-grained user/client combinations. See xref:manage:cluster-maintenance/about-throughput-quotas.adoc[] for conceptual details, and xref:manage:cluster-maintenance/manage-throughput.adoc#set-user-based-quotas[Set user-based quotas] to get started. == New configuration properties -Redpanda 25.3 introduces the following configuration properties: - -**Shadowing:** - -* xref:reference:properties/cluster-properties.adoc#enable_shadow_linking[`enable_shadow_linking`]: Enable shadow links (Enterprise license required) - -**Timestamp validation:** - -* xref:reference:properties/cluster-properties.adoc#log_message_timestamp_after_max_ms[`log_message_timestamp_after_max_ms`]: Maximum timestamp difference for future records -* xref:reference:properties/cluster-properties.adoc#log_message_timestamp_before_max_ms[`log_message_timestamp_before_max_ms`]: Maximum timestamp difference for past records -* xref:reference:properties/topic-properties.adoc#messagetimestampaftermaxms[`message.timestamp.after.max.ms`]: Topic-level timestamp validation (future) -* xref:reference:properties/topic-properties.adoc#messagetimestampbeforemaxms[`message.timestamp.before.max.ms`]: Topic-level timestamp validation (past) - -**Audit logging:** - -* xref:reference:properties/cluster-properties.adoc#audit_use_rpc[`audit_use_rpc`]: Use internal RPCs for audit logging - -**Object storage:** - -* xref:reference:properties/object-storage-properties.adoc#cloud_storage_client_lease_timeout_ms[`cloud_storage_client_lease_timeout_ms`]: Object storage connection timeout -* xref:reference:properties/object-storage-properties.adoc#cloud_storage_gc_max_segments_per_run[`cloud_storage_gc_max_segments_per_run`]: Limits segment deletion rate during xref:manage:tiered-storage.adoc#object-storage-housekeeping[object storage housekeeping] - -**Iceberg:** - -* xref:reference:properties/cluster-properties.adoc#iceberg_default_catalog_namespace[`iceberg_default_catalog_namespace`]: Default Iceberg catalog namespace for tables -* xref:reference:properties/cluster-properties.adoc#iceberg_dlq_table_suffix[`iceberg_dlq_table_suffix`]: Iceberg DLQ table name suffix -* xref:reference:properties/cluster-properties.adoc#iceberg_rest_catalog_gcp_user_project[`iceberg_rest_catalog_gcp_user_project`]: GCP project for Iceberg REST catalog billing -* xref:reference:properties/cluster-properties.adoc#iceberg_topic_name_dot_replacement[`iceberg_topic_name_dot_replacement`]: Dot replacement in Iceberg table names - -**TLS:** - -* xref:reference:properties/cluster-properties.adoc#tls_v1_2_cipher_suites[`tls_v1_2_cipher_suites`]: TLS 1.2 cipher suites for client connections -* xref:reference:properties/cluster-properties.adoc#tls_v1_3_cipher_suites[`tls_v1_3_cipher_suites`]: TLS 1.3 cipher suites for client connections - -**Tiered Storage:** - -* xref:reference:properties/cluster-properties.adoc#cloud_topics_epoch_service_epoch_increment_interval[`cloud_topics_epoch_service_epoch_increment_interval`]: Cluster epoch increment interval -* xref:reference:properties/cluster-properties.adoc#cloud_topics_epoch_service_local_epoch_cache_duration[`cloud_topics_epoch_service_local_epoch_cache_duration`]: Local epoch cache duration -* xref:reference:properties/cluster-properties.adoc#cloud_topics_short_term_gc_backoff_interval[`cloud_topics_short_term_gc_backoff_interval`]: Short-term garbage collection backoff interval -* xref:reference:properties/cluster-properties.adoc#cloud_topics_short_term_gc_interval[`cloud_topics_short_term_gc_interval`]: Short-term garbage collection interval -* xref:reference:properties/cluster-properties.adoc#cloud_topics_short_term_gc_minimum_object_age[`cloud_topics_short_term_gc_minimum_object_age`]: Minimum object age for garbage collection - -**Other configuration:** - -* xref:reference:properties/cluster-properties.adoc#controller_backend_reconciliation_concurrency[`controller_backend_reconciliation_concurrency`]: Maximum concurrent controller reconciliation operations -* xref:reference:properties/cluster-properties.adoc#fetch_max_read_concurrency[`fetch_max_read_concurrency`]: Maximum concurrent partition reads per fetch request -* xref:reference:properties/cluster-properties.adoc#kafka_max_message_size_upper_limit_bytes[`kafka_max_message_size_upper_limit_bytes`]: Maximum allowed `max.message.size` topic property value -* xref:reference:properties/cluster-properties.adoc#kafka_produce_batch_validation[`kafka_produce_batch_validation`]: Validation level for produced batches -* xref:reference:properties/cluster-properties.adoc#log_compaction_max_priority_wait_ms[`log_compaction_max_priority_wait_ms`]: Maximum time a priority partition can wait for compaction before preempting regular compaction -* xref:reference:properties/cluster-properties.adoc#log_compaction_tx_batch_removal_enabled[`log_compaction_tx_batch_removal_enabled`]: Enable transactional batch removal during compaction -* xref:reference:properties/cluster-properties.adoc#sasl_mechanisms_overrides[`sasl_mechanisms_overrides`]: SASL authentication mechanisms per listener - -=== Changes to default values - -The following configuration properties have new default values in v25.3: - -* xref:reference:properties/cluster-properties.adoc#core_balancing_continuous[`core_balancing_continuous`]: Changed from `false` to `true` (Enterprise license required). -* xref:reference:properties/cluster-properties.adoc#partition_autobalancing_mode[`partition_autobalancing_mode`]: Changed from `node_add` to `continuous` (Enterprise license required). -* xref:reference:properties/cluster-properties.adoc#iceberg_throttle_backlog_size_ratio[`iceberg_throttle_backlog_size_ratio`]: Changed from `0.3` to `null`. - -[[behavior-changes]] -=== Behavior changes - -The following topic properties now support enhanced tristate behavior: - -* xref:reference:properties/topic-properties.adoc#segment-ms[`segment.ms`] -* xref:reference:properties/topic-properties.adoc#retention-bytes[`retention.bytes`] -* xref:reference:properties/topic-properties.adoc#retention-ms[`retention.ms`] -* xref:reference:properties/topic-properties.adoc#retention-local-target-bytes[`retention.local.target.bytes`] -* xref:reference:properties/topic-properties.adoc#retention-local-target-ms[`retention.local.target.ms`] -* xref:reference:properties/topic-properties.adoc#initial-retention-local-target-bytes[`initial.retention.local.target.bytes`] -* xref:reference:properties/topic-properties.adoc#initial-retention-local-target-ms[`initial.retention.local.target.ms`] -* xref:reference:properties/topic-properties.adoc#delete-retention-ms[`delete.retention.ms`] -* xref:reference:properties/topic-properties.adoc#min-cleanable-dirty-ratio[`min.cleanable.dirty.ratio`] - -Previously, these properties treated zero and negative values the same way. Now they support three distinct states: positive values set specific limits, zero provides immediate eligibility for cleanup/compaction, and negative values disable the feature entirely. Review your topic configurations if you currently use zero values for these properties. - -=== Deprecations - -The following configuration properties have been deprecated in v25.3 and will be removed in a future release: - -* `kafka_memory_batch_size_estimate_for_fetch`: No replacement. Remove from configuration. -* `log_compaction_disable_tx_batch_removal`: Use xref:reference:properties/cluster-properties.adoc#log_compaction_tx_batch_removal_enabled[`log_compaction_tx_batch_removal_enabled`] instead. Note the inverted logic: the new property enables the behavior when set to `true`. -* `log_message_timestamp_alert_after_ms`: Use xref:reference:properties/cluster-properties.adoc#log_message_timestamp_after_max_ms[`log_message_timestamp_after_max_ms`] instead. -* `log_message_timestamp_alert_before_ms`: Use xref:reference:properties/cluster-properties.adoc#log_message_timestamp_before_max_ms[`log_message_timestamp_before_max_ms`] instead. -* `raft_recovery_default_read_size`: No replacement. Remove from configuration. - -== Deprecated features +**Authentication:** -Redpanda has deprecated support for specific TLSv1.2 and TLSv1.3 cipher suites and now uses more secure defaults. See xref:upgrade:deprecated/index.adoc[Deprecated Features] for the complete list. +* xref:reference:properties/cluster-properties.adoc#nested_group_behavior[`nested_group_behavior`]: Control how Redpanda handles nested groups extracted from authentication tokens +* xref:reference:properties/cluster-properties.adoc#oidc_group_claim_path[`oidc_group_claim_path`]: JSON path to extract groups from the JWT payload diff --git a/modules/manage/examples/kubernetes/group-crds.feature b/modules/manage/examples/kubernetes/group-crds.feature new file mode 100644 index 0000000000..f5527eb90e --- /dev/null +++ b/modules/manage/examples/kubernetes/group-crds.feature @@ -0,0 +1,35 @@ +@cluster:sasl @variant:vectorized +Feature: Group CRDs + Background: Cluster available + Given cluster "sasl" is available + + @skip:gke @skip:aks @skip:eks + Scenario: Manage group ACLs + When I apply Kubernetes manifest: + """ +# tag::manage-group-acls[] + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: Group + metadata: + name: engineering + spec: + cluster: + clusterRef: + name: sasl + authorization: + acls: + - type: allow + resource: + type: topic + name: team- + patternType: prefixed + operations: [Read, Describe] + - type: allow + resource: + type: subject + name: team- + patternType: prefixed + operations: [Read, Describe] +# end::manage-group-acls[] + """ diff --git a/modules/manage/examples/kubernetes/role-crds.feature b/modules/manage/examples/kubernetes/role-crds.feature index 0f9dfc9ccf..2707bf84ff 100644 --- a/modules/manage/examples/kubernetes/role-crds.feature +++ b/modules/manage/examples/kubernetes/role-crds.feature @@ -43,8 +43,6 @@ Feature: Role CRDs And I apply Kubernetes manifest: """ # tag::manage-roles-with-authorization[] - # In this example manifest, a role called "read-only-role" is created in a cluster called "sasl". - # The role includes authorization rules that allow reading from topics with names starting with "public-". --- apiVersion: cluster.redpanda.com/v1alpha2 kind: RedpandaRole @@ -64,6 +62,12 @@ Feature: Role CRDs name: public- patternType: prefixed operations: [Read, Describe] + - type: allow + resource: + type: subject + name: public- + patternType: prefixed + operations: [Read, Describe] # end::manage-roles-with-authorization[] """ And role "read-only-role" is successfully synced diff --git a/modules/manage/examples/kubernetes/schema-crds.feature b/modules/manage/examples/kubernetes/schema-crds.feature index 3706aec034..7d5c467d1c 100644 --- a/modules/manage/examples/kubernetes/schema-crds.feature +++ b/modules/manage/examples/kubernetes/schema-crds.feature @@ -104,3 +104,72 @@ Feature: Schema CRDs """ And schema "order-event" is successfully synced Then I should be able to check compatibility against "order-event" in cluster "basic" + + @skip:gke @skip:aks @skip:eks + Scenario: Manage fully compatible schema (Avro) + Given there is no schema "fully-compatible-schema" in cluster "basic" + When I apply Kubernetes manifest: + """ +# tag::full-compatibility-schema-manifest[] + # This manifest creates an Avro schema named "fully-compatible-schema" in the "basic" cluster. + # The schema uses Full compatibility, ensuring backward and forward compatibility across versions. + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: Schema + metadata: + name: fully-compatible-schema + namespace: redpanda + spec: + cluster: + clusterRef: + name: basic + schemaType: avro + compatibilityLevel: Full + text: | + { + "type": "record", + "name": "ExampleRecord", + "fields": [ + { "type": "string", "name": "field1" }, + { "type": "int", "name": "field2" } + ] + } +# end::full-compatibility-schema-manifest[] + """ + And schema "fully-compatible-schema" is successfully synced + Then I should be able to check compatibility against "fully-compatible-schema" in cluster "basic" + + @skip:gke @skip:aks @skip:eks + Scenario: Manage order schema with references (Avro) + Given there is no schema "order-schema" in cluster "basic" + When I apply Kubernetes manifest: + """ +# tag::schema-references-manifest[] + # This manifest creates an Avro schema named "order-schema" that references another schema. + # Schema references enable modular and reusable schema components for complex data structures. + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: Schema + metadata: + name: order-schema + namespace: redpanda + spec: + cluster: + clusterRef: + name: basic + references: + - name: product-schema + subject: product + version: 1 + text: | + { + "type": "record", + "name": "Order", + "fields": [ + { "name": "product", "type": "Product" } + ] + } +# end::schema-references-manifest[] + """ + And schema "order-schema" is successfully synced + Then I should be able to check compatibility against "order-schema" in cluster "basic" diff --git a/modules/manage/examples/kubernetes/topic-crds.feature b/modules/manage/examples/kubernetes/topic-crds.feature index a84c10112f..4843df62ec 100644 --- a/modules/manage/examples/kubernetes/topic-crds.feature +++ b/modules/manage/examples/kubernetes/topic-crds.feature @@ -25,3 +25,55 @@ Feature: Topic CRDs """ And topic "topic1" is successfully synced Then I should be able to produce and consume from "topic1" in cluster "basic" + + @skip:gke @skip:aks @skip:eks + Scenario: Manage topic with write caching + Given there is no topic "chat-room" in cluster "basic" + When I apply Kubernetes manifest: + """ +# tag::write-caching-topic-example[] + # This manifest creates a topic called "chat-room" with write caching enabled. + # Write caching provides better performance at the expense of durability. + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: Topic + metadata: + name: chat-room + spec: + cluster: + clusterRef: + name: basic + partitions: 3 + replicationFactor: 1 + additionalConfig: + write.caching: "true" +# end::write-caching-topic-example[] + """ + And topic "chat-room" is successfully synced + Then I should be able to produce and consume from "chat-room" in cluster "basic" + + @skip:gke @skip:aks @skip:eks + Scenario: Manage topic with cleanup policy + Given there is no topic "compacted-topic" in cluster "basic" + When I apply Kubernetes manifest: + """ +# tag::cleanup-policy-topic-example[] + # This manifest creates a topic with the cleanup policy set to "delete". + # The cleanup policy determines how partition log files are managed when they reach a certain size. + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: Topic + metadata: + name: compacted-topic + spec: + cluster: + clusterRef: + name: basic + partitions: 3 + replicationFactor: 1 + additionalConfig: + cleanup.policy: "delete" +# end::cleanup-policy-topic-example[] + """ + And topic "compacted-topic" is successfully synced + Then I should be able to produce and consume from "compacted-topic" in cluster "basic" diff --git a/modules/manage/examples/kubernetes/user-crds.feature b/modules/manage/examples/kubernetes/user-crds.feature index ada3478265..b49ae0db83 100644 --- a/modules/manage/examples/kubernetes/user-crds.feature +++ b/modules/manage/examples/kubernetes/user-crds.feature @@ -59,9 +59,6 @@ Feature: User CRDs When I apply Kubernetes manifest: """ # tag::manage-authz-only-manifest[] - # In this example manifest, an ACL called "travis" is created in a cluster called "sasl". - # The ACL give an existing user called "travis" permissions to read from all topics whose names start with some-topic. - # This example assumes that you already have a user called "travis" in your cluster. --- apiVersion: cluster.redpanda.com/v1alpha2 kind: User @@ -86,8 +83,121 @@ Feature: User CRDs name: some-topic patternType: prefixed operations: [Read] + - type: allow + resource: + type: subject + name: some-topic + patternType: prefixed + operations: [Read] # end::manage-authz-only-manifest[] """ And user "travis" is successfully synced And I delete the CRD user "travis" Then "travis" should be able to authenticate to the "sasl" cluster with password "password" and mechanism "SCRAM-SHA-256" + + @skip:gke @skip:aks @skip:eks + Scenario: Grant a user read access to a subject + Given there is no user "consumer-app" in cluster "sasl" + When I apply Kubernetes manifest: + """ +# tag::grant-user-read-access[] + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: User + metadata: + name: consumer-app + spec: + cluster: + clusterRef: + name: redpanda + authorization: + acls: + - type: allow + resource: + type: topic + name: orders + patternType: literal + operations: [Read] + - type: allow + resource: + type: subject + name: orders-value + patternType: literal + operations: [Read] +# end::grant-user-read-access[] + """ + And user "consumer-app" is successfully synced + And I delete the CRD user "consumer-app" + + @skip:gke @skip:aks @skip:eks + Scenario: Grant a producer write access using prefix patterns + Given there is no user "producer-app" in cluster "sasl" + When I apply Kubernetes manifest: + """ +# tag::grant-producer-write-access[] + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: User + metadata: + name: producer-app + spec: + cluster: + clusterRef: + name: redpanda + authentication: + type: scram-sha-512 + password: + valueFrom: + secretKeyRef: + name: producer-app-secret + key: password + authorization: + acls: + - type: allow + resource: + type: topic + name: events- + patternType: prefixed + operations: [Write, Describe] + - type: allow + resource: + type: subject + name: events- + patternType: prefixed + operations: [Write, Describe] +# end::grant-producer-write-access[] + """ + And user "producer-app" is successfully synced + And I delete the CRD user "producer-app" + + @skip:gke @skip:aks @skip:eks + Scenario: Grant global Schema Registry access + Given there is no user "schema-admin" in cluster "sasl" + When I apply Kubernetes manifest: + """ +# tag::grant-global-sr-access[] + --- + apiVersion: cluster.redpanda.com/v1alpha2 + kind: User + metadata: + name: schema-admin + spec: + cluster: + clusterRef: + name: redpanda + authorization: + acls: + - type: allow + resource: + type: registry + operations: [Read, Write, Delete, Describe, DescribeConfigs, AlterConfigs] + - type: allow + resource: + type: subject + name: "" + patternType: prefixed + operations: [Read, Write, Delete, Describe, DescribeConfigs, AlterConfigs] +# end::grant-global-sr-access[] + """ + And user "schema-admin" is successfully synced + And I delete the CRD user "schema-admin" diff --git a/modules/manage/pages/audit-logging/audit-log-samples.adoc b/modules/manage/pages/audit-logging/audit-log-samples.adoc index 7ba297e6b8..6681c21047 100644 --- a/modules/manage/pages/audit-logging/audit-log-samples.adoc +++ b/modules/manage/pages/audit-logging/audit-log-samples.adoc @@ -3,6 +3,9 @@ :page-categories: Management, Security // tag::single-source[] +ifdef::env-cloud[:gbac-doc: security:authorization/gbac.adoc] +ifndef::env-cloud[:gbac-doc: manage:security/authorization/gbac.adoc] + ifndef::env-cloud[] [NOTE] ==== @@ -80,6 +83,59 @@ This scenario shows the message resulting from an admin using rpk with successfu ---- ==== +.Authentication successful (OIDC with group claims) +[%collapsible] +==== +This scenario shows a successful OIDC authentication event that includes the user's IdP group memberships in the `user.groups` field. Group memberships are extracted from the OIDC token and included in all authentication events for OIDC users. +[,json] +---- +{ + "category_uid": 3, + "class_uid": 3002, + "metadata": { + "product": { + "name": "Redpanda", + "uid": "0", + "vendor_name": "Redpanda Data, Inc.", + "version": "v26.1.1" + }, + "version": "1.0.0" + }, + "severity_id": 1, + "time": 1700533469078, + "type_uid": 300201, + "activity_id": 1, + "auth_protocol": "SASL-OAUTHBEARER", + "auth_protocol_id": 99, + "dst_endpoint": { + "ip": "127.0.0.1", + "port": 9092, + "svc_name": "kafka rpc protocol" + }, + "is_cleartext": false, + "is_mfa": false, + "service": { + "name": "kafka rpc protocol" + }, + "src_endpoint": { + "ip": "10.0.1.50", + "name": "kafka-client", + "port": 48210 + }, + "status_id": 1, + // IdP group memberships extracted from the OIDC token + "user": { + "name": "alice@example.com", + "type_id": 1, + "groups": [ + {"type": "idp_group", "name": "engineering"}, + {"type": "idp_group", "name": "analytics"} + ] + } +} +---- +==== + .Authentication failed [%collapsible] ==== @@ -237,6 +293,93 @@ This example illustrates an ACL update that also requires a superuser authentica ---- ==== +.Authorization matched on a group ACL +[%collapsible] +==== +This example shows an API Activity (6003) where the authorization decision matched an ALLOW ACL on a `Group:` principal. The `actor.user.groups` field includes the matched group with type `idp_group`, and the `authorization_metadata` shows the group ACL that granted access. See xref:{gbac-doc}[Group-Based Access Control]. + +[,json] +---- +{ + "category_uid": 6, + "class_uid": 6003, + "metadata": { + "product": { + "name": "Redpanda", + "uid": "0", + "vendor_name": "Redpanda Data, Inc.", + "version": "v26.1.0" + }, + "version": "1.0.0" + }, + "severity_id": 1, + "time": 1774544504327, + "type_uid": 600303, + "activity_id": 3, + "actor": { + "authorizations": [ + { + "decision": "authorized", + "policy": { + "desc": "acl: {principal type {group} name {/sales} host {{any_host}} op all perm allow}, resource: type {topic} name {sales-topic} pattern {literal}", + "name": "aclAuthorization" + } + } + ], + // The matched group appears in the user's groups field + "user": { + "name": "alice", + "type_id": 1, + "groups": [ + { + "type": "idp_group", + "name": "/sales" + } + ] + } + }, + "api": { + "operation": "produce", + "service": { + "name": "kafka rpc protocol" + } + }, + "dst_endpoint": { + "ip": "127.0.1.1", + "port": 9092, + "svc_name": "kafka rpc protocol" + }, + "resources": [ + { + "name": "sales-topic", + "type": "topic" + } + ], + "src_endpoint": { + "ip": "127.0.0.1", + "name": "rdkafka", + "port": 42728 + }, + "status_id": 1, + "unmapped": { + "authorization_metadata": { + "acl_authorization": { + "host": "{{any_host}}", + "op": "all", + "permission_type": "allow", + "principal": "type {group} name {/sales}" + }, + "resource": { + "name": "sales-topic", + "pattern": "literal", + "type": "topic" + } + } + } +} +---- +==== + .Metadata request (with counts) [%collapsible] ==== diff --git a/modules/manage/pages/cluster-maintenance/about-throughput-quotas.adoc b/modules/manage/pages/cluster-maintenance/about-throughput-quotas.adoc new file mode 100644 index 0000000000..ebb13c63c7 --- /dev/null +++ b/modules/manage/pages/cluster-maintenance/about-throughput-quotas.adoc @@ -0,0 +1,281 @@ += About Client Throughput Quotas +:description: Understand how Redpanda's user-based and client ID-based throughput quotas work, including entity hierarchy, precedence rules, and quota tracking behavior. +:page-topic-type: concepts +:page-aliases: +:personas: platform_admin, developer +:learning-objective-1: Describe the difference between user-based and client ID-based quotas +:learning-objective-2: Determine which quota type to use for your use case +:learning-objective-3: Explain quota precedence rules and how Redpanda tracks quota usage + +// tag::single-source[] +ifdef::env-cloud[] +:authentication-doc: security:cloud-authentication.adoc +endif::[] +ifndef::env-cloud[] +:authentication-doc: manage:security/authentication.adoc +endif::[] + +Redpanda uses throughput quotas to limit the rate of produce and consume requests from clients. Understanding how quotas work helps you prevent individual clients from disproportionately consuming resources and causing performance degradation for other clients (also known as the "noisy-neighbor" problem), and ensure fair resource sharing across users and applications. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +To configure and manage throughput quotas, see xref:manage:cluster-maintenance/manage-throughput.adoc[]. + +== Throughput control overview + +Redpanda provides two ways to control throughput: + +* Broker-wide limits: Configured using cluster properties. For details, see xref:manage:cluster-maintenance/manage-throughput.adoc#broker-wide-throughput-limits[Broker-wide throughput limits]. +* Client throughput quotas: Configured using the Kafka API. Client quotas enable per-user and per-client rate limiting with fine-grained control through entity hierarchy and precedence rules. This page focuses on client quotas. + +== Supported quota types + +Redpanda supports three Kafka API-based quota types: + +|=== +| Quota type | Description + +| `producer_byte_rate` +| Limit throughput of produce requests (bytes per second) + +| `consumer_byte_rate` +| Limit throughput of fetch requests (bytes per second) + +| `controller_mutation_rate` +| Limit rate of topic mutation requests (partitions created or deleted per second) +|=== + +All quota types can be applied to groups of client connections based on user principals, client IDs, or combinations of both. + +== Quota entities + +Redpanda uses two pieces of identifying information from each client connection to determine which quota applies: + +* Client ID: An ID that clients self-declare. Quotas can target an exact client ID (`client-id`) or a prefix (`client-id-prefix`). Multiple client connections that share a client ID or ID prefix are grouped into a single quota entity. +* User glossterm:principal[]: An authenticated identity verified through SASL, mTLS, or OIDC. Connections that share the same user are considered one entity. + +You can configure quotas that target either entity type, or combine both for fine-grained control. + +=== Client ID-based quotas + +Client ID-based quotas apply to clients identified by their `client-id` field, which is set by the client application. The client ID is typically a configurable property when you create a client with Kafka libraries. When using client ID-based quotas, multiple clients using the same client ID share the same quota tracking. + +Client ID-based quotas rely on clients honestly reporting their identity and correctly setting the `client-id` property. This makes client ID-based quotas unsuitable for guaranteeing isolation between tenants. + +Use client ID-based quotas when: + +* Authentication is not enabled. +* Grouping by application or service name is sufficient. +* You operate a single-tenant environment where all clients are trusted. +* You need simple rate limiting without user-level isolation. + +=== User-based quotas + +IMPORTANT: User-based quotas require xref:manage:security/authentication.adoc[authentication] to be enabled on your cluster. + +User-based quotas apply to authenticated user principals. Each user has a separate quota, providing a way to limit the impact of individual users on the cluster. + +User-based quotas rely on Redpanda's authentication system to verify user identity. The user principal is extracted from SASL credentials, mTLS certificates, or OIDC tokens and cannot be forged by clients. + +Use user-based quotas when: + +* You operate a multi-tenant environment, such as SaaS platforms or enterprises with departments. +* You require isolation between users or tenants, to avoid noisy-neighbor issues. +* You need per-user billing or metering. + +=== Combined user and client quotas + +You can combine user and client identities for fine-grained control over specific (user, client) combinations. + +Use combined quotas when: + +* You need fine-grained control, for example: user `alice` using a specific application. +* Different rate limits apply to different apps used by the same user. For example, `alice`+'s+ `payment-processor` gets 10 MB/s, but `alice`+'s+ `analytics-consumer` gets 50 MB/s. See <> for examples. + +== Quota precedence and tracking + +When a request arrives, Redpanda resolves which quota to apply by matching the request's authenticated user principal and client ID against configured quotas. Redpanda applies the most specific match, using the precedence order in the following table (highest priority first). + +The precedence level that matches also determines how quota usage is tracked. Redpanda tracks quota usage using a tracker key that determines which connections share the same quota bucket. How connections are grouped into buckets depends on the type of entity the quota targets. + +To get independent quota tracking per user and client ID combination, configure quotas that include both dimensions, such as `/config/users//clients/` or `/config/users//clients/`. + +.Quota precedence, tracking, and isolation by configuration level +[cols="1,2,3,2,3", options="header"] +|=== +| Level | Match type | Config path | Tracker key | Isolation behavior + +| 1 +| Exact user + exact client +| `/config/users//clients/` +| `(user, client-id)` +| Each unique (user, client-id) pair tracked independently + +| 2 +| Exact user + client prefix +| `/config/users//client-id-prefix/` +| `(user, client-id-prefix)` +| Clients matching the prefix share tracking within that user + +| 3 +| Exact user + default client +| `/config/users//clients/` +| `(user, client-id)` +| Each unique (user, client-id) pair tracked independently + +| 4 +| Exact user only +| `/config/users/` +| `user` +| All clients for that user share a single tracking bucket + +| 5 +| Default user + exact client +| `/config/users//clients/` +| `(user, client-id)` +| Each unique (user, client-id) pair tracked independently + +| 6 +| Default user + client prefix +| `/config/users//client-id-prefix/` +| `(user, client-id-prefix)` +| Clients matching the prefix share tracking within each user + +| 7 +| Default user + default client +| `/config/users//clients/` +| `(user, client-id)` +| Each unique (user, client-id) pair tracked independently + +| 8 +| Default user only +| `/config/users/` +| `user` +| All clients for each user share a single tracking bucket (per user) + +| 9 +| Exact client only +| `/config/clients/` +| `client-id` +| All users with that client ID share a single tracking bucket + +| 10 +| Client prefix only +| `/config/client-id-prefix/` +| `client-id-prefix` +| All clients matching the prefix share a single bucket across all users + +| 11 +| Default client only +| `/config/clients/` +| `client-id` +| Each unique client ID tracked independently + +| 12 +| No quota configured +| N/A +| N/A +| No tracking / unlimited throughput +|=== + +IMPORTANT: The `` entity matches any user or client that doesn't have a more specific quota configured. This is different from an empty/unauthenticated user (`user=""`), or undeclared client ID (`client-id=""`), which are treated as specific entities. + +=== Unauthenticated connections + +Unauthenticated connections have an empty user principal (`user=""`) and are not treated as `user=`. + +Unauthenticated connections: + +* Fall back to client-only quotas. +* Have unlimited throughput only if no client-only quota matches. + +=== Example: Precedence resolution + +Given these configured quotas: + +[,bash] +---- +rpk cluster quotas alter --add consumer_byte_rate=5000000 --name user=alice --name client-id=app-1 +rpk cluster quotas alter --add consumer_byte_rate=10000000 --name user=alice +rpk cluster quotas alter --add consumer_byte_rate=20000000 --name client-id=app-1 +---- + +|=== +| User + Client ID | Precedence match + +| `user=alice`, `client-id=app-1` +| Level 1: Exact user + exact client + +| `user=alice`, `client-id=app-2` +| Level 4: Exact user only + +| `user=bob`, `client-id=app-1` +| Level 9: Exact client only + +| `user=bob`, `client-id=app-2` +| Level 12: No quota configured +|=== + +When no quota matches (level 12), the connection is not throttled. + +=== Example: User-only quota + +If you configure a 10 MB/s produce quota for user `alice`: + +[,bash] +---- +rpk cluster quotas alter --add producer_byte_rate=10000000 --name user=alice +---- + +Then `alice` connecting with client ID `app-1` and `alice` connecting with client ID `app-2` share the same 10 MB/s produce limit. + +To give each of `alice`+'s+ clients an independent 10 MB/s limit, configure: + +[,bash] +---- +rpk cluster quotas alter --add producer_byte_rate=10000000 --name user=alice --default client-id +---- + +=== Example: User default quota + +If you configure a default 10 MB/s produce quota for all users: + +[,bash] +---- +rpk cluster quotas alter --add producer_byte_rate=10000000 --default user +---- + +This quota applies to all users who don't have a more specific quota configured. Each user is tracked independently: `alice` gets her own 10 MB/s bucket, `bob` gets his own 10 MB/s bucket, and so on. + +Within each user, all client ID values share that user's bucket. `alice` connecting with client ID `app-1` and `alice` connecting with client ID `app-2` share the same 10 MB/s produce limit, while `bob`+'s+ connections have a separate 10 MB/s limit. + +[[throttling-enforcement]] +== Throughput throttling enforcement + +NOTE: As of v24.2, Redpanda enforces all throughput limits per broker, including client throughput. + +Redpanda enforces throughput limits by applying backpressure to clients. When a connection exceeds its throughput limit, Redpanda throttles the connection to bring the rate back within the allowed level: + +. Redpanda adds a `throttle_time_ms` field to responses, indicating how long the client should wait. +. If the client doesn't honor the throttle time, Redpanda inserts delays on the connection's next read operation. + +ifndef::env-cloud[] +The throttling delay may not exceed the limit set by the `max_kafka_throttle_delay_ms` tunable property. +endif::[] + +ifdef::env-cloud[] +In Redpanda Cloud, the throttling delay is set to 30 seconds. +endif::[] + +== Default behavior + +Quotas are opt-in restrictions and not enforced by default. When no quotas are configured, clients have unlimited throughput. + +== Next steps + +* xref:manage:cluster-maintenance/manage-throughput.adoc[Configure throughput quotas] +* xref:{authentication-doc}[Enable authentication for user-based quotas] diff --git a/modules/manage/pages/cluster-maintenance/compaction-settings.adoc b/modules/manage/pages/cluster-maintenance/compaction-settings.adoc index a4c55a2f79..e0d528b982 100644 --- a/modules/manage/pages/cluster-maintenance/compaction-settings.adoc +++ b/modules/manage/pages/cluster-maintenance/compaction-settings.adoc @@ -12,7 +12,7 @@ image::shared:compaction-example.png[Example of topic compaction] This diagram illustrates a compacted topic. Imagine a remote sensor network that uses image recognition to track appearances of red pandas in a geographic area. The sensor network employs special devices that send records to a topic when they detect one. You might enable compaction to reduce topic storage while still maintaining a record in the topic of the last time each device saw a red panda, perhaps to see if they stop frequenting a given area. The left side of the diagram shows all records sent across the topic. The right side illustrates the results of compaction; older records for certain keys are deleted from the log. -NOTE: If your application requires consuming every record for a given key, consider using the `delete` xref:develop:config-topics#change-the-cleanup-policy.adoc[cleanup policy] instead. +NOTE: If your application requires consuming every record for a given key, consider using the `delete` xref:develop:manage-topics/config-topics.adoc#change-the-cleanup-policy[cleanup policy] instead. IMPORTANT: When using xref:manage:tiered-storage.adoc[Tiered Storage], compaction functions at the local storage level. As long as a segment remains in local storage, its records are eligible for compaction. Once a segment is uploaded to object storage and removed from local storage it is not retrieved for further compaction operations. A key may therefore appear in multiple segments between Tiered Storage and local storage. diff --git a/modules/manage/pages/cluster-maintenance/continuous-data-balancing.adoc b/modules/manage/pages/cluster-maintenance/continuous-data-balancing.adoc index 5813068b43..56fdf9182b 100644 --- a/modules/manage/pages/cluster-maintenance/continuous-data-balancing.adoc +++ b/modules/manage/pages/cluster-maintenance/continuous-data-balancing.adoc @@ -2,19 +2,30 @@ :description: Continuous Data Balancing simplifies operations with self-healing clusters that dynamically balance partitions. :page-aliases: cluster-administration:continuous-data-balancing.adoc :page-categories: Management +:page-topic-type: how-to +:personas: infrastructure_operator +:learning-objective-1: Enable Continuous Data Balancing on a Redpanda cluster +:learning-objective-2: Check data balancing status using rpk +:learning-objective-3: Cancel partition balancing moves for a specific node [NOTE] ==== include::shared:partial$enterprise-license.adoc[] ==== -Continuous Data Balancing continuously monitors your node and rack availability and disk usage. This enables self-healing clusters that dynamically balance partitions, ensuring smooth operations and optimal cluster performance. +Continuous Data Balancing continuously monitors your node and rack availability and disk usage, dynamically balancing partitions to maintain smooth operations and optimal cluster performance. -It also maintains the configured replication level, even after infrastructure failure. Node availability has the highest priority in data balancing. After a rack (with all nodes belonging to it) becomes unavailable, Redpanda moves partition replicas to the remaining nodes. This violates the rack awareness constraint. But after this rack (or a new one) becomes available, Redpanda repairs the rack awareness constraint by moving excess replicas from racks that have more than one replica to the newly-available rack. +Continuous Data Balancing also maintains the configured replication level, even after infrastructure failure. Node availability has the highest priority in data balancing. After a rack (with all nodes belonging to it) becomes unavailable, Redpanda moves partition replicas to the remaining nodes. This violates the rack awareness constraint. After the rack (or a replacement rack) becomes available, Redpanda repairs the constraint by moving excess replicas from racks that have more than one replica to the newly-available rack. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} == Set Continuous Data Balancing properties -To enable Continuous Data Balancing, set the `partition_autobalancing_mode` property to `continuous`. You can then customize properties for monitoring your node availability and disk usage. +To enable Continuous Data Balancing, set the `partition_autobalancing_mode` property to `continuous`. Customize the following properties to monitor node availability and disk usage. |=== | Property | Description @@ -22,29 +33,42 @@ To enable Continuous Data Balancing, set the `partition_autobalancing_mode` prop | `partition_autobalancing_node_availability_timeout_sec` | When a node is unreachable for the specified amount of time, Redpanda acts as if the node had been decommissioned: rebalancing begins, re-creating all of its replicas on other nodes in the cluster. + + -*Note:* The node remains part of the cluster, and it can rejoin when it comes back online. A node that was actually decommissioned is removed from the cluster. + +The node remains part of the cluster and can rejoin when it comes back online. A node that was actually decommissioned is removed from the cluster. + + Default is 900 seconds (15 minutes). +[[partition_autobalancing_node_autodecommission_timeout_sec]] +| `partition_autobalancing_node_autodecommission_timeout_sec` +| When a node is unavailable for this timeout duration, Redpanda automatically and permanently decommissions the node. This property only applies when `partition_autobalancing_mode` is set to `continuous`. Unlike `partition_autobalancing_node_availability_timeout_sec`, which moves partitions while keeping the node in the cluster, this property removes the node from the cluster entirely. A decommissioned node cannot rejoin the cluster. + + + +Only one node is decommissioned at a time. If a decommission is already in progress, automatic decommission does not trigger until it completes. If the decommission stalls (for example, because the node holds the only replica of a partition), manual intervention is required. See xref:manage:cluster-maintenance/nodewise-partition-recovery.adoc[]. + + + +By default, this property is null and automatic decommission is disabled. + | `partition_autobalancing_max_disk_usage_percent` | When a node fills up to this disk usage percentage, Redpanda starts moving replicas off the node to other nodes with disk utilization below the percentage. + + Default is 80%. |=== -For information about other modes with `partition_autobalancing_mode`, see xref:./cluster-balancing.adoc[Cluster Balancing]. +For the other `partition_autobalancing_mode` options, see xref:manage:cluster-maintenance/cluster-balancing.adoc[Cluster balancing]. + +== Use data balancing commands -== Use Data Balancing commands +Use the following `rpk` commands to monitor and control data balancing. === Check data balancing status To see the status, run: -`rpk cluster partitions balancer-status` +[,bash] +---- +rpk cluster partitions balancer-status +---- This shows the time since the last data balancing, the number of replica movements in progress, the nodes that are unavailable, and the nodes that are over the disk space threshold (default = 80%). -It also returns a data balancing status: `off`, `ready`, `starting`, `in-progress`, or `stalled`. If the command reports a `stalled` status, check the following: +It also returns a data balancing status: `off`, `ready`, `starting`, `in-progress`, or `stalled`. If the command reports a `stalled` status, verify: * Are there enough healthy nodes? For example, in a three node cluster, no movements are possible for partitions with three replicas. * Does the cluster have sufficient space? Partitions are not moved if all nodes in the cluster are utilizing more than their disk space threshold. @@ -55,10 +79,16 @@ It also returns a data balancing status: `off`, `ready`, `starting`, `in-progres To cancel the current partition balancing moves, run: -`rpk cluster partitions movement-cancel` +[,bash] +---- +rpk cluster partitions movement-cancel +---- -To cancel the partition moves in a specific node, add `--node`. For example: +To cancel partition moves on a specific node, use the `--node` flag. For example: -`rpk cluster partitions movement-cancel --node 1` +[,bash] +---- +rpk cluster partitions movement-cancel --node 1 +---- -NOTE: If continuous balancing hasn't been turned off, and if the system is still unbalanced, then it schedules another partition balancing. To stop all balancing, first set `partition_autobalancing_mode` to `off`. Then cancel current data balancing moves. +NOTE: If continuous balancing is still enabled and the cluster remains unbalanced, Redpanda schedules another partition balancing round. To stop all balancing, first set `partition_autobalancing_mode` to `off`, then cancel the current data balancing moves. diff --git a/modules/manage/pages/cluster-maintenance/decommission-brokers.adoc b/modules/manage/pages/cluster-maintenance/decommission-brokers.adoc index b3a155a2a6..e3c1d7927f 100644 --- a/modules/manage/pages/cluster-maintenance/decommission-brokers.adoc +++ b/modules/manage/pages/cluster-maintenance/decommission-brokers.adoc @@ -10,6 +10,16 @@ When you decommission a broker, its partition replicas are reallocated across th CAUTION: When a broker is decommissioned, it cannot rejoin the cluster. If a broker with the same ID tries to rejoin the cluster, it is rejected. +== Decommissioning methods + +There are two ways to decommission brokers in Redpanda: + +* Manual decommissioning (described in this guide): Use `rpk` commands to explicitly decommission a broker when you need full control over the timing and selection of brokers to remove. + +* Automatic decommissioning: When xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Continuous Data Balancing] is enabled, you can configure the xref:manage:cluster-maintenance/continuous-data-balancing.adoc#partition_autobalancing_node_autodecommission_timeout_sec[partition_autobalancing_node_autodecommission_timeout_sec] property to automatically decommission brokers that remain unavailable for a specified duration. + +Both methods permanently remove the broker from the cluster. Decommissioned brokers cannot rejoin. + == What happens when a broker is decommissioned? When a broker is decommissioned, the controller leader creates a reallocation plan for all partition replicas that are allocated to that broker. By default, this reallocation is done in batches of 50 to avoid overwhelming the remaining brokers with Raft recovery. See xref:reference:tunable-properties.adoc#partition_autobalancing_concurrent_moves[`partition_autobalancing_concurrent_moves`]. diff --git a/modules/manage/pages/cluster-maintenance/disk-utilization.adoc b/modules/manage/pages/cluster-maintenance/disk-utilization.adoc index 8d66849891..b1484a49a7 100644 --- a/modules/manage/pages/cluster-maintenance/disk-utilization.adoc +++ b/modules/manage/pages/cluster-maintenance/disk-utilization.adoc @@ -85,7 +85,7 @@ Redpanda runs a log cleanup process in the background to apply these policy sett See also: * xref:manage:tiered-storage.adoc#manage-local-capacity-for-tiered-storage-topics[Manage local capacity for Tiered Storage topics] -* xref:develop:config-topics.adoc#delete-records-from-a-topic[Delete records from a topic] +* xref:develop:manage-topics/config-topics.adoc#delete-records-from-a-topic[Delete records from a topic] [[set-time-based-retention]] === Set time-based retention diff --git a/modules/manage/pages/cluster-maintenance/manage-throughput.adoc b/modules/manage/pages/cluster-maintenance/manage-throughput.adoc index dab8e18374..ce1e6da001 100644 --- a/modules/manage/pages/cluster-maintenance/manage-throughput.adoc +++ b/modules/manage/pages/cluster-maintenance/manage-throughput.adoc @@ -1,32 +1,47 @@ = Manage Throughput -:description: Learn how to manage the throughput of Kafka traffic. +:description: Configure broker-wide and client-specific throughput quotas to prevent resource exhaustion and noisy-neighbor issues. :page-categories: Management, Networking +:page-topic-type: how-to +:personas: platform_admin, developer +:learning-objective-1: Set user-based throughput quotas +:learning-objective-2: Set client ID-based quotas +:learning-objective-3: Monitor quota usage and throttling behavior // tag::single-source[] ifdef::env-cloud[] :monitor-doc: manage:monitor-cloud.adoc#throughput :connected-clients-api-doc-ref: link:/api/doc/cloud-dataplane/operation/operation-monitoringservice_listkafkaconnections +:authentication-doc: security:cloud-authentication.adoc endif::[] ifndef::env-cloud[] :monitor-doc: manage:monitoring.adoc#throughput :connected-clients-api-doc-ref: link:/api/doc/admin/v2/operation/operation-redpanda-core-admin-v2-clusterservice-listkafkaconnections +:authentication-doc: manage:security/authentication.adoc endif::[] -Redpanda supports throughput throttling on both ingress and egress independently, and allows configuration at the broker and client levels. This helps prevent clients from causing unbounded network and disk usage on brokers. You can configure limits at two levels: +Redpanda throttles throughput on ingress and egress independently, and you can configure limits at the broker and client levels. This prevents clients from causing unbounded network and disk usage on brokers. -* *Broker limits*: These apply to all clients connected to the broker and restrict total traffic on the broker. See <>. +You can configure limits at two levels: + +* Broker limits: These apply to all clients connected to the broker and restrict total traffic on the broker. See <>. ifndef::env-cloud[] -* *Client limits*: These apply to a set of clients defined by their `client_id` and help prevent a set of clients from starving other clients using the same broker. You can manage client quotas with xref:reference:rpk/rpk-cluster/rpk-cluster-quotas.adoc[`rpk cluster quotas`], with {ui}, or with the Kafka API. When no quotas apply, the client has unlimited throughput. +* Client limits: These apply to authenticated users or clients defined by their client ID. You can manage client quotas with xref:reference:rpk/rpk-cluster/rpk-cluster-quotas.adoc[`rpk cluster quotas`], with {ui}, or with the Kafka API. When no quotas apply, the client has unlimited throughput. endif::[] ifdef::env-cloud[] -* *Client limits*: These apply to a set of clients defined by their `client_id` and help prevent a set of clients from starving other clients using the same broker. You can manage client quotas with xref:reference:rpk/rpk-cluster/rpk-cluster-quotas.adoc[`rpk cluster quotas`], with the {ui} UI, with the link:https://docs.redpanda.com/api/doc/cloud-dataplane/operation/operation-quotaservice_listquotas[Redpanda Cloud Data Plane API], or with the Kafka API. When no quotas apply, the client has unlimited throughput. +* Client limits: These apply to authenticated users or clients defined by their client ID. You can manage client quotas with xref:reference:rpk/rpk-cluster/rpk-cluster-quotas.adoc[`rpk cluster quotas`], with the {ui} UI, with the link:https://docs.redpanda.com/api/doc/cloud-dataplane/operation/operation-quotaservice_listquotas[Redpanda Cloud Data Plane API], or with the Kafka API. When no quotas apply, the client has unlimited throughput. NOTE: Throughput throttling is supported for BYOC and Dedicated clusters only. endif::[] +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + == View connected client details -You may find it helpful to check the xref:{monitor-doc}[current produce and consume throughput] of a client before you configure throughput quotas. +Before configuring throughput quotas, check the xref:{monitor-doc}[current produce and consume throughput] of a client. ifndef::env-cloud[] Use the xref:reference:rpk/rpk-cluster/rpk-cluster-connections-list.adoc[`rpk cluster connections list`] command or the {connected-clients-api-doc-ref}[ListKafkaConnections] Admin API endpoint to view detailed information about active Kafka client connections. @@ -258,6 +273,8 @@ UID STATE USER CLIENT-ID b41584f3-2662-4185-a4b8-0d8510f5c780 OPEN UNAUTHENTICATED perf-producer-client 127.0.0.1:55002 0 0 8s 7.743592270s 0B 0B 1 b20601a3-624c-4a8c-ab88-717643f01d56 OPEN UNAUTHENTICATED perf-producer-client 127.0.0.1:55012 0 0 9s 0s 78.9MB 0B 292 ---- + +The `USER` field in the connection list shows the authenticated principal. Unauthenticated connections show `UNAUTHENTICATED`, which corresponds to an empty user principal (`user=""`) in quota configurations, not `user=`. -- ifndef::env-cloud[] @@ -417,24 +434,19 @@ curl \ } ---- ==== +The user principal field in the connection list shows the authenticated principal. Unauthenticated connections show `AUTHENTICATION_STATE_UNAUTHENTICATED`, which corresponds to an empty user principal (`user=""`) in quota configurations, not `user=`. -- endif::[] ====== +To view connections for a specific authenticated user: -== Throughput throttling enforcement - -NOTE: As of v24.2, Redpanda enforces all throughput limits per broker, including client throughput. - -Throughput limits are enforced by applying backpressure to clients. When a connection is in breach of the throughput limit, the throttler advises the client about the delay (throttle time) that would bring the rate back to the allowed level. Redpanda starts by adding a `throttle_time_ms` field to responses. If that isn't honored, delays are inserted on the connection's next read operation. +[,bash] +---- +rpk cluster connections list --user alice +---- -ifdef::env-cloud[] -In Redpanda Cloud, the throttling delay is set to 30 seconds. -endif::[] - -ifndef::env-cloud[] -The throttling delay may not exceed the limit set by xref:reference:tunable-properties.adoc#max_kafka_throttle_delay_ms[`max_kafka_throttle_delay_ms`]. -endif::[] +This shows all connections from user `alice`, useful for monitoring clients that are subject to user-based quotas. == Broker-wide throughput limits @@ -469,47 +481,96 @@ The properties for broker-wide throughput quota balancing are configured at the ==== By default, both `kafka_throughput_limit_node_in_bps` and `kafka_throughput_limit_node_out_bps` are disabled, and no throughput limits are applied. You must manually set them to enable throughput throttling. ==== + +To set broker-wide throughput limits, use xref:reference:rpk/rpk-cluster/rpk-cluster-config-set.adoc[`rpk cluster config set`] to configure the cluster properties: + +[,bash] +---- +# Set ingress limit to 100 MB/s per broker +rpk cluster config set kafka_throughput_limit_node_in_bps 100000000 + +# Set egress limit to 200 MB/s per broker +rpk cluster config set kafka_throughput_limit_node_out_bps 200000000 +---- endif::[] == Client throughput limits -Redpanda provides configurable throughput quotas that apply to an individual client or a group of clients. You can apply a quota for an individual client based on an exact match with its `client_id`, or a group of clients based on IDs that start with a given prefix. +Redpanda provides configurable throughput quotas for individual clients or authenticated users. Quotas are managed through the Kafka-compatible AlterClientQuotas and DescribeClientQuotas APIs, accessible with `rpk`, Redpanda Console, or Kafka client libraries. -As of v24.2, client throughput quotas are compatible with the https://cwiki.apache.org/confluence/display/KAFKA/KIP-546%3A+Add+Client+Quota+APIs+to+the+Admin+Client[AlterClientQuotas and DescribeClientQuotas^] Kafka APIs, and are separate from quotas configured through cluster configuration in earlier Redpanda versions. The client throughput quotas no longer apply on a per-shard basis, and now limit the rates across a Redpanda broker's node. The quotas are neither shared nor balanced between brokers. +Redpanda supports two types of client throughput quotas: -Redpanda supports the following Kafka API-based quota types on clients: +* Client ID-based quotas: Limit throughput based on the self-declared `client-id` field. +* User-based quotas: Limit throughput based on authenticated user glossterm:principal[]. Requires xref:{authentication-doc}[authentication]. -|=== -| Quota type | Description +You can also combine both types for fine-grained control (for example, limiting a specific user when using a specific client application). -| `producer_byte_rate` -| Limit throughput of produce requests +For conceptual information about quota types, entity hierarchy, precedence rules, and how Redpanda tracks and enforces quotas through throttling, see xref:manage:cluster-maintenance/about-throughput-quotas.adoc[]. -| `consumer_byte_rate` -| Limit throughput of fetch requests +=== Set user-based quotas -| `controller_mutation_rate` -| Limit rate of topic mutation requests, including create, add, and delete partition, in number of partitions per second +IMPORTANT: User-based quotas require authentication to be enabled. To set up authentication, see xref:{authentication-doc}[]. -|=== +==== Quota for a specific user + +To limit throughput for a specific authenticated user across all clients: + +[,bash] +---- +rpk cluster quotas alter --add producer_byte_rate=2000000 --name user=alice +---- -You can also apply a default quota for all other client requests that don't have a specific quota based on an exact match or `client_id` prefix. +This limits user `alice` to 2 MB/s for produce requests regardless of the client ID used. -It is possible to create conflicting quotas if you configure the same quotas through both the Kafka API and a cluster configuration. Redpanda resolves these conflicts by following an order of preference in finding a matching quota for a request: +To view quotas for a user: -. Quota configured through the Kafka API for an exact match on `client_id` -. Quota configured through the Kafka API for a prefix match on `client_id` -ifndef::env-cloud[] -. Quota configured through cluster configuration properties (`kafka_client_group_byte_rate_quota`, `kafka_client_group_fetch_byte_rate_quota`-deprecated in v24.2) for a prefix match on `client_id` -endif::[] -. Default quota configured through the Kafka API on `client_id` -ifndef::env-cloud[] -. Default quota configured through cluster configuration properties (`target_quota_byte_rate`, `target_fetch_quota_byte_rate`, `kafka_admin_topic_api_rate`-deprecated in v24.2) on `client_id` +[,bash] +---- +rpk cluster quotas describe --name user=alice +---- -Redpanda recommends <> over from cluster configuration-managed quotas to Kafka-compatible quotas. You can re-create the configuration-based quotas with `rpk`, and then remove the cluster configurations. -endif::[] +Expected output: + +[,bash,role=no-copy] +---- +user=alice + producer_byte_rate=2000000 +---- + +==== Default quota for all users + +To set a fallback quota for any user without a more specific quota: + +[,bash] +---- +rpk cluster quotas alter --add consumer_byte_rate=5000000 --default user +---- + +This applies a 5 MB/s fetch quota to all authenticated users who don't have a more specific quota configured. + +=== Remove a user quota + +To remove a quota for a specific user: + +[,bash] +---- +rpk cluster quotas alter --delete consumer_byte_rate --name user=alice +---- + +To remove all quotas for a user: + +[,bash] +---- +rpk cluster quotas delete --name user=alice +---- + +=== Set client ID-based quotas -=== Individual client throughput limit +Client ID-based quotas apply to all users using a specific client ID. These quotas do not require authentication. Because the client ID is self-declared, client ID-based quotas are not suitable for guaranteeing isolation between tenants. + +For multi-tenant environments, Redpanda recommends user-based quotas for per-tenant isolation. + +==== Individual client ID throughput limit ifdef::env-cloud[] NOTE: The following sections show how to manage throughput with `rpk`. You can also manage throughput with the link:https://docs.redpanda.com/api/doc/cloud-dataplane/operation/operation-quotaservice_listquotas[Redpanda Cloud Data Plane API]. @@ -531,7 +592,7 @@ client-id=consumer-1 ---- -To set a throughput quota for a single client, use the xref:reference:rpk/rpk-cluster/rpk-cluster-quotas-alter.adoc[`rpk cluster quotas alter`] command. +To set a throughput quota for a single client, use the xref:reference:rpk/rpk-cluster/rpk-cluster-quotas-alter.adoc[`rpk cluster quotas alter`] command. [,bash] ---- @@ -544,7 +605,7 @@ ENTITY STATUS client-id=consumer-1 OK ---- -=== Group of clients throughput limit +==== Group of clients throughput limit Alternatively, you can view or configure throughput quotas for a group of clients based on a match on client ID prefix. The following example sets the `consumer_byte_rate` quota to client IDs prefixed with `consumer-`: @@ -553,12 +614,11 @@ Alternatively, you can view or configure throughput quotas for a group of client rpk cluster quotas alter --add consumer_byte_rate=200000 --name client-id-prefix=consumer- ---- -NOTE: A client group specified with `client-id-prefix` is not the equivalent of a Kafka consumer group. It is used only to match requests based on the `client_id` prefix. The `client_id` field is typically a configurable property when you create a client with Kafka libraries. - +NOTE: A `client-id-prefix` quota group is not related to Kafka consumer groups. The client ID is an application-defined identifier sent with every request. Client libraries typically default to their own name (such as `kgo`, `rdkafka`, `sarama`, or `perf-producer-client`), but applications can set it using the https://kafka.apache.org/documentation/#consumerconfigs_client.id[`client.id`^] configuration property. This makes prefix-based quotas useful for grouping related applications (for example, `inventory-service-` to match `inventory-service-1`, `inventory-service-2`, etc.). -=== Default client throughput limit +==== Default client throughput limit -You can apply default throughput limits to clients. Redpanda applies the default limits if no quotas are configured for a specific `client_id` or prefix. +You can apply default throughput limits to clients. Redpanda applies the default limits if no quotas are configured for a specific client ID or prefix. To specify a produce quota of 1 GB/s through the Kafka API (applies across all produce requests to a single broker), run: @@ -567,94 +627,70 @@ To specify a produce quota of 1 GB/s through the Kafka API (applies across all p rpk cluster quotas alter --default client-id --add producer_byte_rate=1000000000 ---- -=== Bulk manage client throughput limits +=== Set combined user and client quotas -To more easily manage multiple quotas, you can use the `cluster quotas describe` and xref:reference:rpk/rpk-cluster/rpk-cluster-quotas-import.adoc[`cluster quotas import`] commands to do a bulk export and update. +You can set quotas for specific (user, client ID) combinations for fine-grained control. -For example, to export all client quotas in JSON format: +==== User with specific client + +To limit a specific user when using a specific client: [,bash] ---- -rpk cluster quotas describe --format json +rpk cluster quotas alter --add consumer_byte_rate=1000000 --name user=alice --name client-id=consumer-1 ---- -`rpk cluster quotas import` accepts the output string from `rpk cluster quotas describe --format `: +User `alice` using `client-id=consumer-1` is limited to a 1 MB/s fetch rate. The same user with a different client ID would use a different quota (or fall back to less specific matches). + +To view combined quotas: [,bash] ---- -rpk cluster quotas import --from '{"quotas":[{"entity":[{"name":"foo","type":"client-id"}],"values":[{"key":"consumer_byte_rate","values":"12123123"}]},{"entity":[{"name":"foo-","type":"client-id-prefix"}],"values":[{"key":"producer_byte_rate","values":"12123123"},{"key":"consumer_byte_rate","values":"4444444"}]}]}' +rpk cluster quotas describe --name user=alice --name client-id=consumer-1 ---- -You can also save the JSON or YAML output to a file and pass the file path in the `--from` flag. +==== User with client prefix -[[migrate]] -=== Migrate cluster configuration quotas to Kafka API-based quotas +To set a shared quota for a user across multiple clients matching a prefix: -. Use xref:reference:rpk/rpk-cluster/rpk-cluster-config-get.adoc[`rpk cluster config get`] to view current client quotas managed with cluster configuration. The following example shows how to retrieve the `kafka_client_group_byte_rate_quota` for two groups of producers: -+ [,bash] ---- -rpk cluster config get kafka_client_group_byte_rate_quota - ----- -+ -[,bash,role=no-copy] ----- -"kafka_client_group_byte_rate_quota": [ - { - "group_name": "group_1", - "clients_prefix": "producer_group_alone_producer", - "quota": 10240 - }, - { "group_name": "group_2", - "clients_prefix": "producer_group_multiple", - "quota": 20480 - } -] +rpk cluster quotas alter --add producer_byte_rate=3000000 --name user=bob --name client-id-prefix=app- ---- -ifndef::env-cloud[] -. Each client quota cluster property (xref:upgrade:deprecated/index.adoc[deprecated in v24.2]) corresponds to a quota type in Kafka. Check the corresponding `rpk` arguments to use when setting the new quota values: -+ -|=== -| Cluster configuration property | `rpk cluster quotas` arguments -| `target_quota_byte_rate` -| `--default client-id --add producer_byte_rate=` +All clients used by user `bob` with a client ID starting with `app-` share a combined 3 MB/s produce quota. -| `target_fetch_quota_byte_rate` -| `--default client-id --add consumer_byte_rate=` +==== Default user with specific client -| `kafka_admin_topic_api_rate` -| `--default client-id --add controller_mutation_rate=` +To set a quota for a specific client across all users: -| `kafka_client_group_byte_rate_quota` -| `--name client-id-prefix= --add producer_byte_rate=` +[,bash] +---- +rpk cluster quotas alter --add producer_byte_rate=500000 --default user --name client-id=payment-processor +---- -| `kafka_client_group_fetch_byte_rate_quota` -| `--name client-id-prefix= --add consumer_byte_rate=` +Any user using `client-id=payment-processor` is limited to a 500 KB/s produce rate, unless they have a more specific quota configured. -|=== -+ -The client throughput quotas set through the Kafka API apply per broker, so you must convert the cluster configuration values that were applied on a per-shard (logical CPU core) basis. For example, if you set `target_fetch_quota_byte_rate` to 100 MBps/shard, and you run Redpanda on 16-core brokers, you can set the new consumer_byte_rate quota to 100 * 16 = 1600 MBps. -endif::[] +=== Bulk manage client throughput limits + +To more easily manage multiple quotas, you can use the `cluster quotas describe` and xref:reference:rpk/rpk-cluster/rpk-cluster-quotas-import.adoc[`cluster quotas import`] commands to do a bulk export and update. + +For example, to export all client quotas in JSON format: -. Use `rpk cluster quotas alter` to set the corresponding client throughput quotas based on the Kafka API: -+ [,bash] ---- -rpk cluster quotas alter --name client-id-prefix=producer_group_alone_producer --add producer_byte_rate= -rpk cluster quotas alter --name client-id-prefix=producer_group_multiple --add producer_byte_rate= +rpk cluster quotas describe --format json ---- -+ -Replace the placeholder values with the new quota values, accounting for the conversion to per-broker limits. For example, 10240 * broker core count = new quota. -. Use xref:reference:rpk/rpk-cluster/rpk-cluster-config-set.adoc[`rpk cluster config set`] to remove the configuration-based quotas: -+ +`rpk cluster quotas import` accepts the output string from `rpk cluster quotas describe --format `: + [,bash] ---- -rpk cluster config set kafka_client_group_byte_rate_quota= +rpk cluster quotas import --from '{"quotas":[{"entity":[{"name":"analytics-consumer","type":"client-id"}],"values":[{"key":"consumer_byte_rate","values":"10000000"}]},{"entity":[{"name":"analytics-","type":"client-id-prefix"}],"values":[{"key":"producer_byte_rate","values":"10000000"},{"key":"consumer_byte_rate","values":"5000000"}]}]}' ---- +You can also save the JSON or YAML output to a file and pass the file path in the `--from` flag. + === View throughput limits in {ui} You can also use {ui} to view enforced limits. In the side menu, go to **Quotas**. @@ -674,6 +710,11 @@ ifndef::env-cloud[] ** `/metrics` - xref:reference:internal-metrics-reference.adoc#vectorized_kafka_quotas_client_quota_throttle_time[`vectorized_kafka_quotas_client_quota_throttle_time`] endif::[] +To identify which clients are actively connected and generating traffic, see <>. + +Quota metrics use the `redpanda_quota_rule` label to identify which quota was applied to a request. The label distinguishes between different entity types (user, client, or combinations). See the label values in xref:reference:public-metrics-reference.adoc#redpanda_kafka_quotas_client_quota_throughput[`redpanda_kafka_quotas_client_quota_throughput`]. + +ifndef::env-cloud[] The `kafka_quotas` logger provides details at the trace level on client quota throttling: [,bash] @@ -684,9 +725,12 @@ TRACE 2024-06-14 15:37:44,835 [shard 2:main] kafka_quotas - quota_manager.cc:36 TRACE 2024-06-14 15:37:59,195 [shard 2:main] kafka_quotas - quota_manager.cc:361 - request: ctx:{quota_type: produce_quota, client_id: {rpk}}, key:k_client_id{rpk}, value:{limit: {1111}, rule: kafka_client_default}, bytes: 1316, delay:184518451ns, capped_delay:184518451ns TRACE 2024-06-14 15:37:59,195 [shard 2:main] kafka_quotas - connection_context.cc:605 - [127.0.0.1:58636] throttle request:{snc:0, client:184}, enforce:{snc:-14359, client:-14359}, key:0, request_size:1316 ---- +endif::[] == See also -- xref:manage:cluster-maintenance/configure-client-connections.adoc[Configure Client Connections] +- xref:manage:cluster-maintenance/about-throughput-quotas.adoc[] +- xref:manage:cluster-maintenance/configure-client-connections.adoc[] +- xref:{authentication-doc}[] // end::single-source[] diff --git a/modules/manage/pages/cluster-maintenance/topic-property-configuration.adoc b/modules/manage/pages/cluster-maintenance/topic-property-configuration.adoc index 8a182e138a..7f1d6410f1 100644 --- a/modules/manage/pages/cluster-maintenance/topic-property-configuration.adoc +++ b/modules/manage/pages/cluster-maintenance/topic-property-configuration.adoc @@ -220,6 +220,6 @@ For complete details about all available topic properties, see xref:reference:pr * xref:reference:properties/topic-properties.adoc[Topic Configuration Properties] - Complete reference of all available topic properties * xref:manage:cluster-maintenance/cluster-property-configuration.adoc[Configure Cluster Properties] - Configure cluster-wide defaults -* xref:develop:config-topics.adoc[Manage Topics] - Create and manage topics +* xref:develop:manage-topics/config-topics.adoc[Manage Topics] - Create and manage topics * xref:manage:kubernetes/k-manage-topics.adoc[Manage Topics in Kubernetes] - Topic management in Kubernetes deployments * xref:console:ui/edit-topic-configuration.adoc[Edit Topic Configuration in Redpanda Console] - Graphical topic configuration \ No newline at end of file diff --git a/modules/manage/pages/iceberg/about-iceberg-topics.adoc b/modules/manage/pages/iceberg/about-iceberg-topics.adoc index cc18710e04..4f74450f63 100644 --- a/modules/manage/pages/iceberg/about-iceberg-topics.adoc +++ b/modules/manage/pages/iceberg/about-iceberg-topics.adoc @@ -77,7 +77,7 @@ ifdef::env-cloud[] To create an Iceberg table for a Redpanda topic, you must set the cluster configuration property config_ref:iceberg_enabled,true,properties/cluster-properties[`iceberg_enabled`] to `true`, and also configure the topic property `redpanda.iceberg.mode`. You can choose to provide a schema if you need the Iceberg table to be structured with defined columns. endif::[] -. Set the `iceberg_enabled` configuration option on your cluster to `true`. +. Set the `iceberg_enabled` configuration option on your cluster to `true`. ifdef::env-cloud[] + [tabs] @@ -88,7 +88,7 @@ rpk:: [,bash] ---- rpk cloud login -rpk profile create --from-cloud +rpk profile create --from-cloud rpk cluster config set iceberg_enabled true ---- -- @@ -122,9 +122,13 @@ The link:/api/doc/cloud-controlplane/operation/operation-clusterservice_updatecl endif::[] ifndef::env-cloud[] + +When multiple clusters write to the same catalog, each cluster must use a distinct namespace to avoid table name collisions. This is especially critical for REST catalog providers that offer a single global catalog per account (such as AWS Glue), where there is no other isolation mechanism. By default, Redpanda creates Iceberg tables in a namespace called `redpanda`. To use a unique namespace for your cluster's REST catalog integration, set config_ref:iceberg_default_catalog_namespace,true,properties/cluster-properties[`iceberg_default_catalog_namespace`] at the same time. This property cannot be changed after you enable Iceberg topics on the cluster. ++ [,bash] ---- -rpk cluster config set iceberg_enabled true +rpk cluster config set iceberg_enabled true +# Optional: set a custom namespace (default is "redpanda") +# rpk cluster config set iceberg_default_catalog_namespace '[""]' ---- + [,bash,role=no-copy] diff --git a/modules/manage/pages/iceberg/iceberg-topics-aws-glue.adoc b/modules/manage/pages/iceberg/iceberg-topics-aws-glue.adoc index f25ab71625..a530eeec06 100644 --- a/modules/manage/pages/iceberg/iceberg-topics-aws-glue.adoc +++ b/modules/manage/pages/iceberg/iceberg-topics-aws-glue.adoc @@ -130,11 +130,15 @@ To configure your Redpanda cluster to enable Iceberg on a topic and integrate wi . Edit your cluster configuration to set the `iceberg_enabled` property to `true`, and set the catalog integration properties listed in the example below. ifndef::env-cloud[] + +By default, Redpanda creates Iceberg tables in a namespace called `redpanda`. Because AWS Glue provides a single catalog per account, each Redpanda cluster that writes to the same Glue catalog must use a distinct namespace to avoid table name collisions. To set a unique namespace, set config_ref:iceberg_default_catalog_namespace,true,properties/cluster-properties[`iceberg_default_catalog_namespace`] at the same time. This property cannot be changed after Iceberg is enabled. ++ Run `rpk cluster config edit` to update these properties: + [,bash] ---- iceberg_enabled: true +# Set a custom namespace instead of the default "redpanda" +iceberg_default_catalog_namespace: [""] # Glue requires Redpanda Iceberg tables to be manually deleted iceberg_delete: false iceberg_catalog_type: rest diff --git a/modules/manage/pages/iceberg/iceberg-topics-databricks-unity.adoc b/modules/manage/pages/iceberg/iceberg-topics-databricks-unity.adoc index 72a3318b4c..3fcef32425 100644 --- a/modules/manage/pages/iceberg/iceberg-topics-databricks-unity.adoc +++ b/modules/manage/pages/iceberg/iceberg-topics-databricks-unity.adoc @@ -191,8 +191,13 @@ echo "hello world\nfoo bar\nbaz qux" | rpk topic produce --format=' You should see the topic as a table with data in Unity Catalog. The data may take some time to become visible, depending on your config_ref:iceberg_target_lag_ms,true,properties/cluster-properties[`iceberg_target_lag_ms`] setting. +ifndef::env-cloud[] +. In Catalog Explorer, open your catalog. You should see a `redpanda` schema (or the namespace you configured with config_ref:iceberg_default_catalog_namespace,true,properties/cluster-properties[`iceberg_default_catalog_namespace`]), in addition to `default` and `information_schema`. +endif::[] +ifdef::env-cloud[] . In Catalog Explorer, open your catalog. You should see a `redpanda` schema, in addition to `default` and `information_schema`. -. The `redpanda` schema and the table residing within this schema are automatically added for you. The table name is the same as the topic name. +endif::[] +. The schema and the table residing within it are automatically added for you. The table name is the same as the topic name. == Query Iceberg table using Databricks SQL diff --git a/modules/manage/pages/iceberg/query-iceberg-topics.adoc b/modules/manage/pages/iceberg/query-iceberg-topics.adoc index 413cace9f6..939674dff5 100644 --- a/modules/manage/pages/iceberg/query-iceberg-topics.adoc +++ b/modules/manage/pages/iceberg/query-iceberg-topics.adoc @@ -97,6 +97,10 @@ endif::[] {"user_id": 2324, "event_type": "BUTTON_CLICK", "ts": "2024-11-25T20:23:59.380Z"} ---- +ifndef::env-cloud[] +NOTE: The query examples on this page use `redpanda` as the Iceberg namespace, which is the default. If you configured a different namespace using config_ref:iceberg_default_catalog_namespace,true,properties/cluster-properties[`iceberg_default_catalog_namespace`], replace `redpanda` with your configured namespace. +endif::[] + === Topic with schema (`value_schema_id_prefix` mode) NOTE: The steps in this section also apply to the `value_schema_latest` mode, except the produce step. The `value_schema_latest` mode is not compatible with the Schema Registry wire format. The xref:reference:rpk/rpk-topic/rpk-topic-produce[`rpk topic produce`] command embeds the wire format header, so you must use your own producer code with `value_schema_latest`. diff --git a/modules/manage/pages/iceberg/redpanda-topics-iceberg-snowflake-catalog.adoc b/modules/manage/pages/iceberg/redpanda-topics-iceberg-snowflake-catalog.adoc index bc53d3755f..7488014154 100644 --- a/modules/manage/pages/iceberg/redpanda-topics-iceberg-snowflake-catalog.adoc +++ b/modules/manage/pages/iceberg/redpanda-topics-iceberg-snowflake-catalog.adoc @@ -169,7 +169,12 @@ echo "hello world\nfoo bar\nbaz qux" | rpk topic produce --format=' You should see the topic as a table in Open Catalog. . In Open Catalog, select *Catalogs*, then open your catalog. -. Under your catalog, you should see the `redpanda` namespace, and a table with the name of your topic. The `redpanda` namespace and the table are automatically added for you. +ifndef::env-cloud[] +. Under your catalog, you should see the `redpanda` namespace (or the namespace you configured with config_ref:iceberg_default_catalog_namespace,true,properties/cluster-properties[`iceberg_default_catalog_namespace`]), and a table with the name of your topic. The namespace and the table are automatically added for you. +endif::[] +ifdef::env-cloud[] +. Under your catalog, you should see the `redpanda` namespace and a table with the name of your topic. The namespace and the table are automatically added for you. +endif::[] == Query Iceberg table in Snowflake diff --git a/modules/manage/pages/iceberg/specify-iceberg-schema.adoc b/modules/manage/pages/iceberg/specify-iceberg-schema.adoc index b98c0a1793..d8ccd38c37 100644 --- a/modules/manage/pages/iceberg/specify-iceberg-schema.adoc +++ b/modules/manage/pages/iceberg/specify-iceberg-schema.adoc @@ -275,7 +275,7 @@ Requirements: - Only JSON Schema Draft-07 is currently supported. - You must declare the JSON Schema dialect using the `$schema` keyword, for example `"$schema": "http://json-schema.org/draft-07/schema#"`. -- You must use a JSON Schema that constrains JSON documents to a strict type in order for Redpanda to translate to Iceberg; that is, each subschema must use the `type` keyword. +- You must use a JSON Schema that constrains JSON documents to a strict type so Redpanda can translate to Iceberg. In most cases this means each subschema uses the `type` keyword, but a subschema can also use `$ref` if the referenced schema resolves to a strict type. .Valid JSON Schema example [,json] @@ -310,7 +310,7 @@ Requirements: | null | -| The `null` type is not supported except when it is paired with another type to indicate nullability. +| The `null` type is only supported as a nullability marker, either in a `type` array (for example, `["string", "null"]`) or in an exclusive `oneOf` nullable pattern. | number | double @@ -325,9 +325,11 @@ Requirements: | The `format` keyword can be used for custom Iceberg types. See <> for details. | object -| struct -| The `properties` keyword must be used to define `struct` fields and constrain their types. The `additionalProperties` keyword is accepted only when it is set to `false`. - +| struct or map +a| * Use `properties` to define `struct` fields and constrain their types. `additionalProperties: false` is supported for closed objects. +* If `additionalProperties` contains a schema, it translates to an Iceberg `map`. +* You cannot combine `properties` and `additionalProperties` in an object if `additionalProperties` is set to a schema. + |=== [[format-translation]] @@ -341,13 +343,20 @@ Requirements: |=== +The following keywords have specific behavior: + +* The `$ref` keyword is supported for internal references resolved from schema resources declared in the same document (using `$id`), including relative and absolute URI forms. References to external resources and references to unknown keywords are not supported. A root-level `$ref` schema is not supported. +* The `oneOf` keyword is supported only for the nullable serializer pattern where exactly one branch is `{"type":"null"}` and the other branch is a non-null schema (`T|null`). +* In Iceberg output, Redpanda writes all fields as nullable regardless of serializer nullability annotations. + The following are not supported for JSON Schema: -* Relative and absolute (including external) references using `$ref` and `$dynamicRef` keywords +* The `$dynamicRef` keyword * The `default` keyword -* Conditional typing (`if`, `then`, `else`, `dependent` keywords) -* Boolean JSON Schema combinations (`allOf`, `anyOf`, `oneOf` keywords) -* Dynamic object members (`patternProperties` and `additionalProperties` (except when it is set to `false`) keywords) +* Conditional typing (`if`, `then`, `else`, `dependencies` keywords) +* Boolean JSON Schema combinations (`allOf`, `anyOf`, and non-nullable `oneOf` patterns) +* Dynamic object members with the `patternProperties` keyword +* The `additionalProperties` keyword when set to `true` -- ====== diff --git a/modules/manage/pages/iceberg/use-iceberg-catalogs.adoc b/modules/manage/pages/iceberg/use-iceberg-catalogs.adoc index b5f615ae13..4aa3f32a27 100644 --- a/modules/manage/pages/iceberg/use-iceberg-catalogs.adoc +++ b/modules/manage/pages/iceberg/use-iceberg-catalogs.adoc @@ -90,6 +90,14 @@ To connect to a REST catalog, set the following cluster configuration properties NOTE: You must set `iceberg_rest_catalog_endpoint` at the same time that you set `iceberg_catalog_type` to `rest`. +ifndef::env-cloud[] +==== Configure table namespace + +Check if your REST catalog provider has specific requirements or recommendations for namespaces. For example, AWS Glue offers only a single global catalog per account, and each cluster that writes to the same Glue catalog must use a distinct namespace to avoid table name collisions. + +By default, Redpanda creates Iceberg tables in a namespace called `redpanda`. To use a unique namespace, configure the config_ref:iceberg_default_catalog_namespace,true,properties/cluster-properties[`iceberg_default_catalog_namespace`] cluster property. You must set this property before enabling the Iceberg integration or at the same time. After you have enabled Iceberg, do not change this property value. +endif::[] + ==== Configure authentication To authenticate with the REST catalog, set the following cluster properties: @@ -272,7 +280,10 @@ The Spark engine can use the REST catalog to automatically discover the topic's SELECT * FROM streaming.redpanda.; ---- -The Iceberg table name is the name of your Redpanda topic. Redpanda puts the Iceberg table into a namespace called `redpanda`, creating the namespace if necessary. +The Iceberg table name is the name of your Redpanda topic. +ifndef::env-cloud[] +If you configured a different namespace using config_ref:iceberg_default_catalog_namespace,true,properties/cluster-properties[`iceberg_default_catalog_namespace`], replace `redpanda` with your configured namespace. +endif::[] TIP: You may need to explicitly create a table for the Iceberg data in your query engine. For an example, see xref:manage:iceberg/redpanda-topics-iceberg-snowflake-catalog.adoc[]. diff --git a/modules/manage/pages/kubernetes/k-cloud-topics.adoc b/modules/manage/pages/kubernetes/k-cloud-topics.adoc new file mode 100644 index 0000000000..140bc19ef9 --- /dev/null +++ b/modules/manage/pages/kubernetes/k-cloud-topics.adoc @@ -0,0 +1,505 @@ += Cloud Topics on Kubernetes +:description: Configure Cloud Topics on Kubernetes to optimize for latency-tolerant, high-throughput workloads using object storage as the primary data tier. +:page-categories: Management, Data Replication +:page-topic-type: how-to +:env-kubernetes: true + +Cloud Topics are a storage mode in Redpanda optimized for latency-tolerant, high-throughput workloads where cross-AZ networking costs significantly impact expenses. Instead of replicating data across network links, Cloud Topics use durable object storage (such as Amazon S3, Google Cloud Storage, Azure Blob Storage, or MinIO) as the primary mechanism to replicate data and serve it to consumers. + +This approach can eliminate over 90% of replication costs across network links in multi-AZ deployments. + +After reading this page, you will be able to: + +* [ ] Configure object storage for Cloud Topics on Kubernetes. +* [ ] Enable and create Cloud Topics using the Redpanda Operator or Helm. +* [ ] Verify topic storage mode configuration. + +== How Cloud Topics work + +With standard Redpanda topics, data is replicated across brokers using Raft consensus and stored locally on each replica. Cloud Topics change this model: data is acknowledged only after it is uploaded to object storage, making object storage the source of truth for both replication and consumption. + +Because data isn't acknowledged until uploaded to object storage, produce latency is higher than with standard topics. Expected end-to-end latencies range from 500 ms to 1 second with public cloud object stores. Lower latencies are achievable in certain environments. + +=== Storage modes + +Redpanda supports multiple storage modes that you can set at the cluster or topic level using the `redpanda.storage.mode` property: + +|=== +| Mode | Behavior + +| `unset` +| Follows legacy behavior. + +| `local` +| Data is stored only on local disk. No remote storage is used. + +| `tiered` +| Data is written locally and offloaded to object storage asynchronously using Tiered Storage. + +| `cloud` +| Data is managed primarily in object storage. Local storage acts as a cache. +|=== + +=== Ideal use cases + +Cloud Topics are best suited for latency-tolerant workloads, including: + +- Observability and logging streams +- Offline analytics pipelines +- AI/ML training data ingestion +- Development and staging environments with flexible latency requirements + +=== Limitations + +- Once created, a Cloud Topic cannot be converted back to a standard topic using `local` or `tiered` storage mode. +- Higher produce latency compared to standard topics (500 ms to 1 second with public cloud stores). + +== Prerequisites + +You must have the following: + +- **kubectl**: Ensure you have the https://kubernetes.io/docs/tasks/tools/#kubectl[`kubectl`^] command-line tool installed and configured to communicate with your cluster. +- **Redpanda**: A xref:deploy:deployment-option/self-hosted/kubernetes/kubernetes-deploy.adoc[Redpanda Operator and a Redpanda resource deployed] in your Kubernetes cluster running Redpanda v26.1 or later. +- **Object storage**: A configured object storage backend (Amazon S3, Google Cloud Storage, Azure Blob Storage, or an S3-compatible store such as MinIO). +- **Enterprise license**: A valid Redpanda Enterprise license applied to the cluster. ++ +[NOTE] +==== +include::shared:partial$enterprise-license.adoc[] +==== ++ +To check if you already have a license key applied to your cluster: + +[,bash] +---- +rpk cluster license info +---- + +== Configure object storage + +Cloud Topics use the same object storage configuration as Tiered Storage. If you have already configured object storage for Tiered Storage, you can skip this step and proceed to <>. + +For detailed instructions including IAM role configuration, access key management, and encryption options, see xref:manage:kubernetes/tiered-storage/k-tiered-storage.adoc#configure-object-storage[Configure object storage] in the Tiered Storage documentation. + +The following examples show the minimum required configuration for each cloud provider. + +=== Amazon S3 + +[tabs] +====== +Helm + Operator:: ++ +-- +.`redpanda-cluster.yaml` +[,yaml] +---- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Redpanda +metadata: + name: redpanda +spec: + clusterSpec: + storage: + tiered: + credentialsSecretRef: + accessKey: + name: storage-secrets + key: access-key + secretKey: + name: storage-secrets + key: secret-key + config: + cloud_storage_enabled: true + cloud_storage_credentials_source: config_file # <1> + cloud_storage_region: + cloud_storage_bucket: +---- +<1> Use `aws_instance_metadata` instead if you are using an IAM role attached to your nodes. +-- +Helm:: ++ +-- +.`cloud-storage.yaml` +[,yaml] +---- +storage: + tiered: + credentialsSecretRef: + accessKey: + name: storage-secrets + key: access-key + secretKey: + name: storage-secrets + key: secret-key + config: + cloud_storage_enabled: true + cloud_storage_credentials_source: config_file # <1> + cloud_storage_region: + cloud_storage_bucket: +---- +<1> Use `aws_instance_metadata` instead if you are using an IAM role attached to your nodes. +-- +====== + +For details on IAM roles, access keys, and AWS KMS encryption, see xref:manage:kubernetes/tiered-storage/k-tiered-storage.adoc#amazon-s3[Amazon S3] in the Tiered Storage documentation. + +=== Google Cloud Storage (GCS) + +[tabs] +====== +Helm + Operator:: ++ +-- +.`redpanda-cluster.yaml` +[,yaml] +---- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Redpanda +metadata: + name: redpanda +spec: + clusterSpec: + storage: + tiered: + credentialsSecretRef: + accessKey: + name: storage-secrets + key: access-key + secretKey: + name: storage-secrets + key: secret-key + config: + cloud_storage_enabled: true + cloud_storage_credentials_source: config_file # <1> + cloud_storage_api_endpoint: storage.googleapis.com + cloud_storage_region: + cloud_storage_bucket: +---- +<1> Use `gcp_instance_metadata` instead if you are using a GCP service account attached to your nodes. +-- +Helm:: ++ +-- +.`cloud-storage.yaml` +[,yaml] +---- +storage: + tiered: + credentialsSecretRef: + accessKey: + name: storage-secrets + key: access-key + secretKey: + name: storage-secrets + key: secret-key + config: + cloud_storage_enabled: true + cloud_storage_credentials_source: config_file # <1> + cloud_storage_api_endpoint: storage.googleapis.com + cloud_storage_region: + cloud_storage_bucket: +---- +<1> Use `gcp_instance_metadata` instead if you are using a GCP service account attached to your nodes. +-- +====== + +For details on IAM roles, HMAC access keys, and customer-managed encryption keys, see xref:manage:kubernetes/tiered-storage/k-tiered-storage.adoc#google-cloud-storage[Google Cloud Storage] in the Tiered Storage documentation. + +=== Azure Blob Storage + +[tabs] +====== +Helm + Operator:: ++ +-- +.`redpanda-cluster.yaml` +[,yaml] +---- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Redpanda +metadata: + name: redpanda +spec: + clusterSpec: + storage: + tiered: + credentialsSecretRef: + accessKey: + name: storage-secrets + key: shared-key + config: + cloud_storage_enabled: true + cloud_storage_credentials_source: config_file # <1> + cloud_storage_azure_storage_account: + cloud_storage_azure_container: +---- +<1> Use `azure_aks_oidc_federation` instead if you are using an Azure managed identity. When using managed identities, omit `credentialsSecretRef` and configure workload identity annotations on the service account. +-- +Helm:: ++ +-- +.`cloud-storage.yaml` +[,yaml] +---- +storage: + tiered: + credentialsSecretRef: + accessKey: + name: storage-secrets + key: shared-key + config: + cloud_storage_enabled: true + cloud_storage_credentials_source: config_file # <1> + cloud_storage_azure_storage_account: + cloud_storage_azure_container: +---- +<1> Use `azure_aks_oidc_federation` instead if you are using an Azure managed identity. When using managed identities, omit `credentialsSecretRef` and configure workload identity annotations on the service account. +-- +====== + +For details on managed identities and account access keys, see xref:manage:kubernetes/tiered-storage/k-tiered-storage.adoc#microsoft-absadls[Microsoft ABS/ADLS] in the Tiered Storage documentation. + +== Enable Cloud Topics + +To enable Cloud Topics, set the `cloud_topics_enabled` cluster property to `true` and set the default storage mode for all new topics to `cloud`. + +[tabs] +====== +Helm + Operator:: ++ +-- + +. Add the following to your Redpanda custom resource to enable Cloud Topics and set the default storage mode: ++ +.`redpanda-cluster.yaml` +[,yaml] +---- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Redpanda +metadata: + name: redpanda +spec: + clusterSpec: + storage: + tiered: + mountType: none + credentialsSecretRef: + accessKey: + name: cloud-storage-creds + key: access-key + secretKey: + name: cloud-storage-creds + key: secret-key + config: + cloud_storage_enabled: true + cloud_storage_region: + cloud_storage_bucket: + config: + cluster: + cloud_topics_enabled: true + default_redpanda_storage_mode: cloud # <1> +---- +<1> Optional. Set to `cloud` to make all new topics Cloud Topics by default. Omit this to create Cloud Topics individually. + +. Apply the configuration: ++ +[,bash] +---- +kubectl apply -f redpanda-cluster.yaml -n +---- + +. The Redpanda Operator reconciles the configuration. Wait for the cluster to be ready: ++ +[,bash] +---- +kubectl rollout status statefulset redpanda -n --watch +---- + +. Verify that Cloud Topics are enabled: ++ +[,bash] +---- +kubectl exec -n redpanda-0 -c redpanda -- \ + rpk cluster config get cloud_topics_enabled +---- ++ +Expected output: `true` + +-- +Helm:: ++ +-- + +. Add the following to your Helm values file: ++ +.`cloud-topics-values.yaml` +[,yaml] +---- +storage: + tiered: + mountType: none + credentialsSecretRef: + accessKey: + name: cloud-storage-creds + key: access-key + secretKey: + name: cloud-storage-creds + key: secret-key + config: + cloud_storage_enabled: true + cloud_storage_region: + cloud_storage_bucket: +config: + cluster: + cloud_topics_enabled: true + default_redpanda_storage_mode: cloud # <1> +---- +<1> Optional. Set to `cloud` to make all new topics Cloud Topics by default. Omit this to create Cloud Topics individually. + +. Deploy or upgrade the Helm chart: ++ +[,bash] +---- +helm upgrade --install redpanda redpanda/redpanda \ + -n --create-namespace \ + -f cloud-topics-values.yaml +---- + +. Wait for the cluster to be ready: ++ +[,bash] +---- +kubectl rollout status statefulset redpanda -n --watch +---- + +. Verify that Cloud Topics are enabled: ++ +[,bash] +---- +kubectl exec -n redpanda-0 -c redpanda -- \ + rpk cluster config get cloud_topics_enabled +---- ++ +Expected output: `true` +-- +====== + +== Create a Cloud Topic + +You can create Cloud Topics either by setting the cluster default storage mode to `cloud`, or by configuring individual topics. + +=== Create Cloud Topics by default + +If you set `default_redpanda_storage_mode` to `cloud` in the cluster configuration, all new topics inherit the `cloud` storage mode automatically. + +Verify the cluster default: + +[,bash] +---- +kubectl exec -n redpanda-0 -c redpanda -- \ + rpk cluster config get default_redpanda_storage_mode +---- + +Expected output: `cloud` + +Any new topic created without an explicit storage mode inherits this default: + +.`cloud-topic.yaml` +[,yaml] +---- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Topic +metadata: + name: my-cloud-topic + namespace: +spec: + partitions: 3 + replicationFactor: 3 + cluster: + clusterRef: + name: redpanda +---- + +[,bash] +---- +kubectl apply -f cloud-topic.yaml +---- + +=== Create individual Cloud Topics + +If the cluster default storage mode is not set to `cloud`, you can create individual Cloud Topics by setting the `redpanda.storage.mode` property on the topic. + +.`cloud-topic.yaml` +[,yaml] +---- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Topic +metadata: + name: my-cloud-topic + namespace: +spec: + partitions: 3 + replicationFactor: 3 + additionalConfig: + redpanda.storage.mode: "cloud" + cluster: + clusterRef: + name: redpanda +---- + +[,bash] +---- +kubectl apply -f cloud-topic.yaml +---- + +=== Override the cluster default + +Topic-level storage mode settings override the cluster default. For example, if the cluster default is `cloud`, you can create a topic that uses Tiered Storage instead: + +.`tiered-topic.yaml` +[,yaml] +---- +apiVersion: cluster.redpanda.com/v1alpha2 +kind: Topic +metadata: + name: my-tiered-topic + namespace: +spec: + partitions: 3 + replicationFactor: 3 + additionalConfig: + redpanda.storage.mode: "tiered" + cluster: + clusterRef: + name: redpanda +---- + +== Verify topic storage mode + +To verify the storage mode of a topic, inspect its configuration through the Topic resource status: + +[,bash] +---- +kubectl get topic -n -o jsonpath=\ + '{range .status.topicConfiguration[*]}{.name}={.value} ({.source}){"\n"}{end}' \ + | grep storage.mode +---- + +Expected output for a Cloud Topic: + +---- +storage.mode=cloud (DEFAULT_CONFIG) +---- + +Or, if explicitly set on the topic: + +---- +storage.mode=cloud (DYNAMIC_TOPIC_CONFIG) +---- + +The `source` field indicates whether the value was inherited from the cluster default (`DEFAULT_CONFIG`) or explicitly set on the topic (`DYNAMIC_TOPIC_CONFIG`). + +== Suggested reading + +- xref:manage:kubernetes/tiered-storage/k-tiered-storage.adoc[Use Tiered Storage on Kubernetes] +- xref:manage:kubernetes/k-manage-topics.adoc[Manage Topics with the Redpanda Operator] diff --git a/modules/manage/pages/kubernetes/k-decommission-brokers.adoc b/modules/manage/pages/kubernetes/k-decommission-brokers.adoc index bf71604385..fb0410ab7e 100644 --- a/modules/manage/pages/kubernetes/k-decommission-brokers.adoc +++ b/modules/manage/pages/kubernetes/k-decommission-brokers.adoc @@ -15,6 +15,16 @@ You may want to decommission a broker in the following situations: NOTE: When a broker is decommissioned, it cannot rejoin the cluster. If a broker with the same ID tries to rejoin the cluster, it is rejected. +== Decommissioning methods + +There are two ways to decommission brokers in Redpanda: + +* Manual decommissioning (described in this guide): Use `rpk` commands or Kubernetes automation to explicitly decommission a broker when you need full control over the timing and selection of brokers to remove. + +* Automatic decommissioning: When xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Continuous Data Balancing] is enabled, you can configure the xref:manage:cluster-maintenance/continuous-data-balancing.adoc#partition_autobalancing_node_autodecommission_timeout_sec[partition_autobalancing_node_autodecommission_timeout_sec] property to automatically decommission brokers that remain unavailable for a specified duration. + +Both methods permanently remove the broker from the cluster. Decommissioned brokers cannot rejoin. + == Prerequisites You must have the following: diff --git a/modules/manage/pages/kubernetes/k-manage-topics.adoc b/modules/manage/pages/kubernetes/k-manage-topics.adoc index aadc9ece3b..394b9c08c8 100644 --- a/modules/manage/pages/kubernetes/k-manage-topics.adoc +++ b/modules/manage/pages/kubernetes/k-manage-topics.adoc @@ -326,21 +326,9 @@ With `write_caching` enabled at the cluster level, Redpanda fsyncs to disk accor To override the cluster-level setting at the topic level, set the topic-level property xref:reference:topic-properties.adoc#writecaching[`write.caching`]: .`example-topic.yaml` -[,yaml,lines=9] +[,yaml,indent=0] ---- -apiVersion: cluster.redpanda.com/v1alpha2 -kind: Topic -metadata: - name: chat-room - namespace: -spec: - cluster: - clusterRef: - name: - partitions: 3 - replicationFactor: 3 - additionalConfig: - write.caching: true +include::manage:example$kubernetes/topic-crds.feature[tags=write-caching-topic-example,indent=0] ---- With `write.caching` enabled at the topic level, Redpanda fsyncs to disk according to xref:reference:topic-properties.adoc#flushms[`flush.ms`] and xref:reference:topic-properties.adoc#flushbytes[`flush.bytes`], whichever is reached first. @@ -627,21 +615,9 @@ CAUTION: Do not use `rpk` or any other Kafka clients to edit topics that you cre The following example changes the cleanup policy for a topic: .`example-topic.yaml` -[,yaml,lines=8-9] +[,yaml,indent=0] ---- -apiVersion: cluster.redpanda.com/v1alpha2 -kind: Topic -metadata: - name: - namespace: -spec: - cluster: - clusterRef: - name: - partitions: 3 - replicationFactor: 3 - additionalConfig: - cleanup.policy: "delete" +include::manage:example$kubernetes/topic-crds.feature[tags=cleanup-policy-topic-example,indent=0] ---- [,bash] diff --git a/modules/manage/pages/kubernetes/k-schema-controller.adoc b/modules/manage/pages/kubernetes/k-schema-controller.adoc index e2e5772aba..9a190fd961 100644 --- a/modules/manage/pages/kubernetes/k-schema-controller.adoc +++ b/modules/manage/pages/kubernetes/k-schema-controller.adoc @@ -172,28 +172,9 @@ Compatibility modes determine how schema versions within a subject can evolve wi For example, to set full compatibility, configure the Schema resource with: -[source,yaml] +[,yaml,indent=0] ---- -apiVersion: cluster.redpanda.com/v1alpha2 -kind: Schema -metadata: - name: fully-compatible-schema - namespace: redpanda -spec: - cluster: - clusterRef: - name: basic - schemaType: avro - compatibilityLevel: Full - text: | - { - "type": "record", - "name": "ExampleRecord", - "fields": [ - { "type": "string", "name": "field1" }, - { "type": "int", "name": "field2" } - ] - } +include::manage:example$kubernetes/schema-crds.feature[tags=full-compatibility-schema-manifest,indent=0] ---- Compatibility settings are essential for maintaining data consistency, especially when updating schemas over time. @@ -206,29 +187,9 @@ NOTE: This feature is supported for Avro and Protobuf schemas. Define a schema reference using the `references` field. The reference includes the name, subject, and version of the referenced schema: -[source,yaml] +[,yaml,indent=0] ---- -apiVersion: cluster.redpanda.com/v1alpha2 -kind: Schema -metadata: - name: order-schema - namespace: redpanda -spec: - cluster: - clusterRef: - name: basic - references: - - name: product-schema - subject: product - version: 1 - text: | - { - "type": "record", - "name": "Order", - "fields": [ - { "name": "product", "type": "Product" } - ] - } +include::manage:example$kubernetes/schema-crds.feature[tags=schema-references-manifest,indent=0] ---- == Update a schema @@ -278,4 +239,6 @@ internal-rpk registry subject list For more details on using schemas in Redpanda, see: * xref:manage:schema-reg/index.adoc[] +* xref:manage:kubernetes/security/authentication/k-schema-registry-acls.adoc[Manage Schema Registry ACLs (Operator)] +* xref:manage:schema-reg/schema-reg-authorization.adoc[] diff --git a/modules/manage/pages/kubernetes/security/authentication/k-schema-registry-acls.adoc b/modules/manage/pages/kubernetes/security/authentication/k-schema-registry-acls.adoc new file mode 100644 index 0000000000..7f5ee61f42 --- /dev/null +++ b/modules/manage/pages/kubernetes/security/authentication/k-schema-registry-acls.adoc @@ -0,0 +1,133 @@ += Manage Schema Registry ACLs with the Redpanda Operator +:description: Manage Schema Registry ACLs declaratively in Kubernetes using User, RedpandaRole, and Group custom resources with the Redpanda Operator. +:page-categories: Management, Security +:page-topic-type: how-to +:personas: platform_operator +:env-kubernetes: true + +With the Redpanda Operator, you can declaratively manage Schema Registry ACLs alongside standard Kafka ACLs using the existing xref:reference:k-crd.adoc#k8s-api-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-user[User], xref:reference:k-crd.adoc#k8s-api-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-role[RedpandaRole], and Group custom resources. This allows you to control which users and roles perform specific operations within Schema Registry. + +For Schema Registry Authorization concepts and the available operations, see xref:manage:schema-reg/schema-reg-authorization.adoc[]. + +== Prerequisites + +You must have the following: + +* *kubectl*: The https://kubernetes.io/docs/tasks/tools/#kubectl[kubectl^] command-line tool, installed and configured to communicate with your cluster. +* *Redpanda Operator v25.3 or later*: See xref:deploy:deployment-option/self-hosted/kubernetes/k-production-deployment.adoc[]. +* *Redpanda cluster with SASL enabled*: See xref:manage:kubernetes/security/authentication/k-authentication.adoc#enable[Enable SASL authentication]. +* *Schema Registry Authorization enabled*: See xref:manage:schema-reg/schema-reg-authorization.adoc#enable-schema-registry-authorization[Enable Schema Registry Authorization]. + +== Schema Registry ACL resource types + +The Redpanda Operator supports two Schema Registry ACL resource types in addition to the standard Kafka ACL resource types (`topic`, `group`, `cluster`, `transactionalId`): + +* `subject`: Controls ACL access for specific Schema Registry subjects. Specify the subject name in `resource.name`. Supports both `literal` and `prefixed` pattern types. +* `registry`: Controls access to global Schema Registry operations. The `registry` resource type does not require a `name` because it applies to all global registry operations. + +For a full list of supported operations by resource type, see xref:manage:schema-reg/schema-reg-authorization.adoc#supported-operations[Supported operations]. + +== Define Schema Registry ACLs in a User resource + +The xref:manage:kubernetes/security/authentication/k-user-controller.adoc[User resource] supports Schema Registry ACLs alongside standard Kafka ACLs. + +.`user-with-sr-acls.yaml` +[,yaml,indent=0] +---- +include::manage:example$kubernetes/user-crds.feature[tags=manage-authz-only-manifest,indent=0] +---- + +In this example, the User resource creates ACLs for an existing user called `travis` in the cluster called `sasl`. The first ACL rule grants read access to all topics whose names start with `some-topic` using a `prefixed` pattern type. The second ACL rule grants read access to Schema Registry subjects matching the same prefix. + +When both Kafka and Schema Registry ACLs are defined in the same User resource, the operator syncs them independently. Kafka ACLs are applied through the Kafka API and Schema Registry ACLs are applied through the Schema Registry API. + +== Define Schema Registry ACLs in a RedpandaRole resource + +The xref:manage:kubernetes/security/authorization/k-role-controller.adoc[RedpandaRole resource] groups Schema Registry ACLs into reusable permission sets for multiple users. + +.`role-with-sr-acls.yaml` +[,yaml,indent=0] +---- +include::manage:example$kubernetes/role-crds.feature[tags=manage-roles-with-authorization,indent=0] +---- + +In this example, a RedpandaRole called `read-only-role` is created in the cluster called `sasl`. The user `charlie` is assigned as a principal. The authorization rules grant `Read` and `Describe` access to all topics with names starting with `public-` using a `prefixed` pattern type, and the same `Read` and `Describe` access to Schema Registry subjects matching the same prefix. + +== Define Schema Registry ACLs in a Group resource + +The Group resource supports Schema Registry ACLs for OIDC groups. + +In this example, ACLs are created for an OIDC group called `engineering` in the cluster called `sasl`. The authorization rules grant `Read` and `Describe` access to all topics with names starting with `team-` using a `prefixed` pattern type, and the same `Read` and `Describe` access to Schema Registry subjects matching the same prefix. + +.`group-with-sr-acls.yaml` +[,yaml,indent=0] +---- +include::manage:example$kubernetes/group-crds.feature[tags=manage-group-acls,indent=0] +---- + +== Common use cases + +The following examples show common patterns for configuring Schema Registry ACLs using the User resource. + +=== Grant a user read access to a subject + +This example gives a consumer application read access to the `orders` topic and its associated Schema Registry subject `orders-value`. Both ACLs use a `literal` pattern type to match exact resource names. + +.`consumer-app.yaml` +[,yaml,indent=0] +---- +include::manage:example$kubernetes/user-crds.feature[tags=grant-user-read-access,indent=0] +---- + +=== Grant a producer write access using prefix patterns + +This example creates a user called `producer-app` with both authentication credentials and authorization rules. The ACLs grant `Write` and `Describe` access to all topics and Schema Registry subjects whose names start with `events-` using a `prefixed` pattern type. This allows the producer to register new schema versions for any subject matching the prefix. + +.`producer-app.yaml` +[,yaml,indent=0] +---- +include::manage:example$kubernetes/user-crds.feature[tags=grant-producer-write-access,indent=0] +---- + +=== Grant global Schema Registry access + +This example gives a schema administrator full access to all Schema Registry operations. The first ACL rule uses the `registry` resource type, which applies to global operations such as getting or setting the global compatibility level. The `registry` resource type does not require a `name` field. The second ACL rule uses a `subject` resource type with an empty name and `prefixed` pattern type to match all subjects. + +.`schema-admin.yaml` +[,yaml,indent=0] +---- +include::manage:example$kubernetes/user-crds.feature[tags=grant-global-sr-access,indent=0] +---- + +== Partial sync behavior + +When a resource includes both Kafka and Schema Registry ACLs, the operator syncs them independently. If the Kafka ACLs sync successfully but the Schema Registry ACLs fail (for example, if Schema Registry Authorization is not enabled), the resource enters a `PartiallySynced` state. Check the resource status conditions for details: + +[,bash] +---- +kubectl get user -o jsonpath='{.status.conditions}' --namespace +---- + +== Deploy and verify + +To deploy a resource with Schema Registry ACLs, apply the manifest to the same namespace as your Redpanda cluster: + +[,bash] +---- +kubectl apply -f .yaml --namespace +---- + +After deploying, verify that the Redpanda Operator reconciled the resource: + +[,bash] +---- +kubectl logs -l app.kubernetes.io/name=operator -c manager --namespace +---- + +== Next steps + +* xref:manage:schema-reg/schema-reg-authorization.adoc[] +* xref:manage:kubernetes/security/authentication/k-user-controller.adoc[] +* xref:manage:kubernetes/security/authorization/k-role-controller.adoc[] +* xref:reference:k-crd.adoc#k8s-api-github-com-redpanda-data-redpanda-operator-operator-api-redpanda-v1alpha2-aclresourcespec[ACLResourceSpec] +* xref:manage:security/authorization/acl.adoc[] diff --git a/modules/manage/pages/security/authorization/acl.adoc b/modules/manage/pages/security/authorization/acl.adoc index e371c402a3..35fec7a6f3 100644 --- a/modules/manage/pages/security/authorization/acl.adoc +++ b/modules/manage/pages/security/authorization/acl.adoc @@ -62,8 +62,8 @@ Understanding these terms helps you configure least-privilege access. | Term | Definition | Example | Principal -| The entity (user or role) requesting access -| `User:analytics-user`, `RedpandaRole:data-engineers` +| The entity (user, role, or group) requesting access +| `User:analytics-user`, `RedpandaRole:data-engineers`, `Group:engineering` | Resource | The Redpanda component being accessed (cluster, topic, consumer group, transactional ID, Schema Registry glossterm:subject[], and Schema Registry operation) @@ -91,7 +91,13 @@ ACL commands work on a multiplicative basis. If you specify two principals and t [[principals]] === Principals -All ACLs require a principal. A principal is composed of two parts: the type, and the name. Redpanda supports the types "User" and "RedpandaRole". When you create user "bar", Redpanda expects you to add ACLs for "User:bar". +All ACLs require a principal. A principal is composed of two parts: the type, and the name. Redpanda supports the types "User", "RedpandaRole", and "Group". When you create user "bar", Redpanda expects you to add ACLs for "User:bar". To grant permissions to an OIDC group, use the `Group:` prefix (for example, `Group:engineering`). +ifndef::env-cloud[] +See xref:manage:security/authorization/gbac.adoc[]. +endif::[] +ifdef::env-cloud[] +See xref:security:authorization/gbac.adoc[]. +endif::[] The `--allow-principal` and `--deny-principal` flags add this prefix for you, if necessary. diff --git a/modules/manage/pages/security/authorization/gbac.adoc b/modules/manage/pages/security/authorization/gbac.adoc new file mode 100644 index 0000000000..8bc8f876b8 --- /dev/null +++ b/modules/manage/pages/security/authorization/gbac.adoc @@ -0,0 +1,17 @@ += Configure Group-Based Access Control +:description: Manage Redpanda permissions at scale using your identity provider's groups. Define access once per group and let your IdP control membership, with no per-user configuration in Redpanda. +:page-topic-type: how-to +:page-categories: Management, Security +:personas: security_engineer, platform_engineer +:learning-objective-1: Configure the cluster properties that enable GBAC +:learning-objective-2: Assign an OIDC group to an RBAC role +:learning-objective-3: Create a group-based ACL using the Group: principal prefix + +ifndef::env-cloud[] +[NOTE] +==== +include::shared:partial$enterprise-license.adoc[] +==== +endif::[] + +include::manage:partial$gbac-dp.adoc[] diff --git a/modules/manage/pages/security/authorization/index.adoc b/modules/manage/pages/security/authorization/index.adoc index 16ff7a1f26..d426846189 100644 --- a/modules/manage/pages/security/authorization/index.adoc +++ b/modules/manage/pages/security/authorization/index.adoc @@ -1,8 +1,8 @@ = Configure Authorization -:description: Redpanda provides two mechanisms for controlling user permissions. +:description: Redpanda provides mechanisms for controlling user permissions, including ACLs, role-based access control, and group-based access control. :page-aliases: security:authorization/index.adoc, manage:security/authorization.adoc :page-categories: Management, Security :page-layout: index -Authorization works in tandem with xref:security/authentication.adoc[authentication]. Authentication grants permission to interact with Redpanda resources while authorization controls what a principal is permitted to do once authenticated. +Authorization works in tandem with xref:security/authentication.adoc[authentication]. Authentication verifies who a principal is. Authorization controls what that principal can do once authenticated. diff --git a/modules/manage/pages/security/fips-compliance.adoc b/modules/manage/pages/security/fips-compliance.adoc index 208af643d6..3e2d268c6e 100644 --- a/modules/manage/pages/security/fips-compliance.adoc +++ b/modules/manage/pages/security/fips-compliance.adoc @@ -1,8 +1,19 @@ -= Configure Redpanda for FIPS -:description: Configure Redpanda to operate in FIPS compliance mode. += Configure Redpanda for FIPS +:description: Configure Redpanda to operate in FIPS-compliant mode. :page-aliases: security:fips-compliance.adoc +:page-topic-type: how-to +:personas: platform_operator +:learning-objective-1: Configure a Redpanda broker to run in FIPS-compliant mode +:learning-objective-2: Set the required OpenSSL properties for FIPS mode +:learning-objective-3: Deploy Redpanda in FIPS-compliant mode using Docker -Redpanda provides FIPS-compliant cipher enforcement for brokers using OpenSSL 3.0.9, which is https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4282[validated^] for https://csrc.nist.gov/pubs/fips/140-2/upd2/final[140-2^] and is undergoing validation by NIST for https://csrc.nist.gov/pubs/fips/140-3/final[140-3^]. Both Redpanda and `rpk` leverage validated OpenSSL libraries for all security-related cryptography operations. +Redpanda provides Federal Information Processing Standards (FIPS)-compliant cipher enforcement for brokers using a https://csrc.nist.gov/pubs/fips/140-3/final[FIPS 140-3^]-validated OpenSSL cryptographic module. Redpanda and `rpk` both use the OpenSSL library for security-related cryptographic operations. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} [NOTE] ==== @@ -18,19 +29,27 @@ rpk cluster license info == Prerequisites -Before configuring brokers to run in FIPS compliance mode (FIPS mode), check to make sure the `redpanda-rpk-fips` and `redpanda-fips` packages are xref:deploy:deployment-option/self-hosted/manual/production/production-deployment.adoc#install-redpanda-for-fips-compliance[installed]. These packages are required by both the `redpanda` and `redpanda-tuner` install packages. +Before configuring brokers to run in FIPS mode on Linux, install the `redpanda-rpk-fips` and `redpanda-fips` xref:deploy:redpanda/manual/production/production-deployment.adoc#install-redpanda-for-fips-compliance[packages]. + +For Docker deployments, use the FIPS-specific image instead: `docker.redpanda.com/redpandadata/redpanda:-fips`. + +[WARNING] +==== +Before upgrading to Redpanda 26.1 with FIPS mode enabled, change any SASL/SCRAM user passwords shorter than 14 characters to at least 14 characters. FIPS 140-3 enforces stricter HMAC key size requirements than FIPS 140-2. Because Redpanda stores passwords in encrypted form, it cannot check the length of existing passwords. Clients with passwords shorter than 14 characters will fail to authenticate after the upgrade. +==== == Limitations -- Redpanda is not fully FIPS-compliant when used with the Redpanda Helm chart and Operator in a Kubernetes deployment. +- Redpanda FIPS mode requires a FIPS-enabled host when deployed with the Redpanda Helm chart or Operator. - Redpanda Console is not FIPS-compliant. -- PKCS#12 keys for xref:manage:security/encryption.adoc[TLS encryption] are not supported when FIPS mode is enabled in Redpanda. The PKCS12KDF algorithm used in PKCS#12 is not FIPS-compliant. To use Redpanda in FIPS mode with TLS enabled, configure your certificates and keys in PEM format instead. +- Redpanda does not support PKCS#12 keys for xref:manage:security/encryption.adoc[TLS encryption] when FIPS mode is enabled. The PKCS12KDF algorithm used in PKCS#12 is not FIPS-compliant. To use Redpanda in FIPS mode with TLS enabled, configure your certificates and keys in PEM format instead. +- When FIPS mode is `enabled` or `permissive`, SASL/SCRAM passwords must be at least 14 characters. == Configure FIPS mode When you configure a broker to run in FIPS mode: -* FIPS compliance is enforced _immediately_ upon the startup of Redpanda. +* Redpanda enforces FIPS compliance _immediately_ on startup. * Redpanda and its dependencies only use FIPS-validated cryptographic modules for all cryptographic algorithms used in a security context. @@ -42,43 +61,100 @@ Redpanda logs an error and exits immediately if: * It cannot detect a FIPS-validated library. -To place a broker in FIPS compliance mode, enable xref:reference:properties/broker-properties.adoc#fips_mode[`fips_mode`] in the Redpanda broker configuration file (typically located in `/etc/redpanda/redpanda.yaml`). All fields are within the `redpanda` object: +To place a broker in FIPS-compliant mode, enable xref:reference:properties/broker-properties.adoc#fips_mode[`fips_mode`] in the Redpanda broker configuration file (typically located in `/etc/redpanda/redpanda.yaml`). All fields are within the `redpanda` object: -```yaml +[,yaml] +---- redpanda: # .... fips_mode: enabled -``` +---- Available `fips_mode` values are: -* `disabled`: Redpanda is not running in FIPS compliance mode. +* `disabled`: Redpanda is not running in FIPS-compliant mode. * `enabled`: When Redpanda starts up, it looks for a value of `1` in the file `/proc/sys/crypto/fips_enabled`. If the file doesn't exist or doesn't contain `1`, Redpanda logs an error and exits immediately. -* `permissive`: This setting is a safety value option only, and _should not be used in a production environment_. If specified, Redpanda logs a WARNING, but continues operations even if the underlying operating system is not configured for FIPS. If set, your Redpanda instance is _not_ running in FIPS compliance mode. +* `permissive`: This setting is a safety value option only. Do not use it in a production environment. If specified, Redpanda logs a WARNING, but continues operations even if the underlying operating system is not configured for FIPS. If set, your Redpanda instance is _not_ running in FIPS-compliant mode. + +You must also configure OpenSSL properties for FIPS mode. === FIPS OpenSSL configuration -You must specify the following SSL configurations for brokers you want to run in FIPS compliance mode: +You must specify the following SSL configurations for brokers you want to run in FIPS-compliant mode: -* xref:reference:properties/broker-properties.adoc#openssl_config_file[`openssl_config_file`]: Specifies the path to the OpenSSL configuration file that was created as part of the `redpanda-fips` package installation. This file is used when OpenSSL is initialized to find the `fipsmodule.cnf` file that was created by the `openssl fipsinstall` command. Typically, this value should be `/opt/redpanda/openssl/openssl.cnf`. +* xref:reference:properties/broker-properties.adoc#openssl_config_file[`openssl_config_file`]: Specifies the path to the OpenSSL configuration file created during `redpanda-fips` package installation. OpenSSL uses this file during initialization to find the `fipsmodule.cnf` file that `openssl fipsinstall` creates. Typically, this value is `/opt/redpanda/openssl/openssl.cnf`. -* xref:reference:properties/broker-properties.adoc#openssl_module_directory[`openssl_module_directory`]: Specifies the path to the directory that contains the `fips.so` cryptographic provider. Typically, this value should be: `/opt/redpanda/lib/ossl-modules/`. +* xref:reference:properties/broker-properties.adoc#openssl_module_directory[`openssl_module_directory`]: Specifies the path to the directory that contains the `fips.so` cryptographic provider. Typically, this value is: `/opt/redpanda/lib/ossl-modules/`. + -The following configuration starts Redpanda in FIPS mode: +The following configuration starts Redpanda in FIPS mode: + -```yaml +[,yaml] +---- redpanda: # .... fips_mode: enabled openssl_config_file: /opt/redpanda/openssl/openssl.cnf openssl_module_directory: /opt/redpanda/lib/ossl-modules/ -``` - -== Suggested reading - -* xref:deploy:deployment-option/self-hosted/manual/production/production-deployment.adoc#install-redpanda-for-fips-compliance[Install Redpanda for FIPS Compliance] -* https://github.com/openssl/openssl/blob/openssl-3.0.9/README-FIPS.md[OpenSSL FIPS Readme^] -* https://www.openssl.org/docs/man3.0/man7/fips_module.html[OpenSSL fips_module^] -* https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp4282.pdf[OpenSSL FIPS Security Policy^] +---- + +== Configure FIPS mode with Docker + +The Redpanda FIPS Docker image (`docker.redpanda.com/redpandadata/redpanda:-fips`) is available for `amd64` and `arm64` architectures. The image includes the required OpenSSL files, pre-configured. + +Pass the FIPS broker configuration to the container the same way as any other Redpanda Docker deployment: either by mounting a configuration file or by passing settings as flags. + +[tabs] +====== +Mount a configuration file:: ++ +-- +. Create a `redpanda.yaml` with the required FIPS settings: ++ +[,yaml] +---- +redpanda: + fips_mode: enabled + openssl_config_file: /opt/redpanda/openssl/openssl.cnf + openssl_module_directory: /opt/redpanda/lib/ossl-modules/ +---- + +. Mount the file when starting the container: ++ +[,bash] +---- +docker run -d \ + --name=redpanda \ + -p 9092:9092 \ + -p 9644:9644 \ + -v /path/to/redpanda.yaml:/etc/redpanda/redpanda.yaml \ + docker.redpanda.com/redpandadata/redpanda:-fips \ + redpanda start --overprovisioned --smp 1 +---- +-- +Pass settings as flags:: ++ +-- +Pass the FIPS settings directly to `redpanda start`: + +[,bash] +---- +docker run -d \ + --name=redpanda \ + -p 9092:9092 \ + -p 9644:9644 \ + docker.redpanda.com/redpandadata/redpanda:-fips \ + redpanda start --overprovisioned --smp 1 \ + --set redpanda.fips_mode=enabled \ + --set redpanda.openssl_config_file=/opt/redpanda/openssl/openssl.cnf \ + --set redpanda.openssl_module_directory=/opt/redpanda/lib/ossl-modules/ +---- +-- +====== + +== Next steps + +* xref:deploy:redpanda/manual/production/production-deployment.adoc#install-redpanda-for-fips-compliance[Install Redpanda for FIPS Compliance] +// TODO: Confirm OpenSSL version and FIPS 140-3 certificate number with engineering (ENG-307). Update links below accordingly. +* https://github.com/openssl/openssl/blob/master/README-FIPS.md[OpenSSL FIPS Readme^] diff --git a/modules/manage/partials/audit-logging.adoc b/modules/manage/partials/audit-logging.adoc index ad1fe36a02..52020f974a 100644 --- a/modules/manage/partials/audit-logging.adoc +++ b/modules/manage/partials/audit-logging.adoc @@ -159,10 +159,11 @@ NOTE: The Included column captures whether the event itself is included (for exa |=== |Data Logging Level |Audit Event |Included? |Details -.11+|System Level +.12+|System Level |Date and time stamp for each entry |Yes |`time` field on each event |Successful and failed access attempts |Yes |The `status_id` field shows success/failure for all access attempts for which auditing is enabled |User ID |Yes |`user.name` +|User group memberships |Yes |`user.groups` field with type `idp_group`. Included in authentication events for OIDC users and in authorization events when a group ACL matches. See xref:manage:security/authorization/gbac.adoc[]. |User connect and disconnect time |No |Connect and disconnect time may be inferred from the presence or absence of activity. |Password change |Yes |For SCRAM users managed through Redpanda core, the Admin API call associated with the password change is logged. Note that this does not cover users synced from external IdPs, such as through OIDC. |Changes of security settings |Yes |For example, ACL creation is logged (kafka `create_acls`), and cluster configuration changes are logged (Admin API events) @@ -433,6 +434,6 @@ xref:manage:audit-logging/audit-log-samples.adoc[See samples of audit log messag include::shared:partial$suggested-reading.adoc[] - xref:reference:topic-properties.adoc[] -- xref:develop:config-topics.adoc[] +- xref:develop:manage-topics/config-topics.adoc[] endif::[] diff --git a/modules/manage/partials/authentication.adoc b/modules/manage/partials/authentication.adoc index ef2ae49d0e..aac5fdbe74 100644 --- a/modules/manage/partials/authentication.adoc +++ b/modules/manage/partials/authentication.adoc @@ -867,6 +867,8 @@ but can instead rely on the trusted authentication capabilities of established I Redpanda's implementation of OIDC provides SASL/OAUTHBEARER support for the Kafka API, and supports standard OIDC authentication across all other HTTP APIs, including Schema Registry, HTTP Proxy, and the Admin API. +TIP: With OIDC enabled, you can also use xref:manage:security/authorization/gbac.adoc[group-based access control (GBAC)] to assign Redpanda permissions to OIDC groups instead of individual users. To use GBAC, configure your IdP to include group claims in the access token (for example, a `groups` claim). See your IdP's documentation for how to add group claims to tokens. + include::manage:partial$security/oidc/limitations.adoc[leveloffset=+3] ===== OIDC credentials flow and access token validation diff --git a/modules/manage/partials/gbac-assign-group-role.adoc b/modules/manage/partials/gbac-assign-group-role.adoc new file mode 100644 index 0000000000..fb94b541be --- /dev/null +++ b/modules/manage/partials/gbac-assign-group-role.adoc @@ -0,0 +1,23 @@ +To assign a group to a role in {ui}: + +. From *Security* on the left navigation menu, select the *Roles* tab. + +. Select the role you want to assign the group to. + +. Click *Edit*. + +. In the *Principals* section, enter the group name using the `Group:` format. For example, `Group:engineering`. + +. Click *Update*. + +To remove a group from a role: + +. From *Security* on the left navigation menu, select the *Roles* tab. + +. Select the role that has the group assignment you want to remove. + +. Click *Edit*. + +. In the *Principals* section, remove the `Group:` entry. + +. Click *Update*. diff --git a/modules/manage/partials/gbac-create-group-acl.adoc b/modules/manage/partials/gbac-create-group-acl.adoc new file mode 100644 index 0000000000..6d80c65ad2 --- /dev/null +++ b/modules/manage/partials/gbac-create-group-acl.adoc @@ -0,0 +1,13 @@ +In {ui}, group-based ACLs are managed through roles. To create an ACL for an OIDC group: + +. From *Security* on the left navigation menu, select the *Roles* tab. + +. Click *Create role* to open the role creation form, or select an existing role and click *Edit*. + +. In the *Principals* field, enter the group principal using the `Group:` format. For example, `Group:engineering`. + +. Define the permissions (ACLs) you want to grant to users in the group. You can configure ACLs for clusters, topics, consumer groups, transactional IDs, Schema Registry subjects, and Schema Registry operations. + +. Click *Create* (or *Update* if editing an existing role). + +NOTE: {ui} assigns ACLs through roles. To grant permissions to a group, create a role for that group, add the group as a principal, and define the ACLs on the role. To create ACLs with a `Group:` principal directly (without creating a role), use `rpk`. diff --git a/modules/manage/partials/gbac-dp.adoc b/modules/manage/partials/gbac-dp.adoc new file mode 100644 index 0000000000..cfa7d0875d --- /dev/null +++ b/modules/manage/partials/gbac-dp.adoc @@ -0,0 +1,554 @@ +// tag::single-source[] +ifdef::env-cloud[:oidc-doc: security:cloud-authentication.adoc#single-sign-on] +ifndef::env-cloud[:oidc-doc: manage:security/authentication.adoc#oidc] +ifdef::env-cloud[:acl-doc: security:authorization/acl.adoc] +ifndef::env-cloud[:acl-doc: manage:security/authorization/acl.adoc] +ifdef::env-cloud[:rbac-doc: security:authorization/rbac/index.adoc] +ifndef::env-cloud[:rbac-doc: manage:security/authorization/rbac.adoc] + +Group-based access control (GBAC) lets you manage Redpanda permissions at scale using the groups that already exist in your identity provider (IdP). Instead of creating and maintaining per-user permissions in Redpanda, you define access once for a group and your IdP controls who belongs to it. When users join or leave a team, their Redpanda access updates automatically at next login with no changes needed in Redpanda. + +GBAC extends xref:{oidc-doc}[OIDC authentication] and supports two ways to grant permissions to groups: create xref:{acl-doc}[ACLs] with `Group:` principals, or assign groups as members of xref:{rbac-doc}[RBAC] roles. Both approaches can be used independently or together. + +After reading this page, you will be able to: + +* [ ] {learning-objective-1} +* [ ] {learning-objective-2} +* [ ] {learning-objective-3} + +== Prerequisites + +To use GBAC, you need: + +ifndef::env-cloud[] +* An xref:get-started:licensing/overview.adoc[Enterprise Edition] license applied to your cluster. +* Superuser access to configure cluster properties and manage ACLs. +endif::[] +* xref:{oidc-doc}[OIDC authentication] configured and enabled on your cluster. +* Your IdP configured to include group claims in the OIDC access token (for example, a `groups` claim). + +== How GBAC works + +When a user authenticates with OIDC, Redpanda reads a configurable claim from the JWT access token (for example, `$.groups`) and extracts the list of groups the user belongs to. Redpanda then matches those group names against `Group:` principals in its ACLs and role assignments. + +Group membership is managed entirely by your IdP. Redpanda never stores or manages group membership directly. It reads group information from the OIDC token at authentication time. Changes you make in the IdP (adding or removing group memberships) take effect at the user's next authentication, when a new token is issued. + +GBAC works across the following Redpanda APIs: + +* Kafka API +* Schema Registry +* HTTP Proxy + +=== Authorization patterns + +GBAC supports two usage patterns: + +* Group as an ACL principal: Create an ACL with a `Group:` principal. Users in that group receive that permission directly. +* Group assigned to a role: Assign a group as a member of a role-based access control (RBAC) role. All users in the group inherit the role's ACLs. + +Both patterns can be used together. When a user belongs to multiple groups, they inherit the combined permissions of all groups. + +Redpanda evaluates all authorization sources (user ACLs, role ACLs, group ACLs, and group-to-role ACLs) in a single unified flow. Deny rules are checked first across all sources. If any source produces a deny, Redpanda rejects the request regardless of allows from other sources. If no deny is found, Redpanda checks for an allow across all sources. If no allow is found, Redpanda denies the request by default. + +.Authorization evaluation flow +[mermaid,width=100%] +.... +flowchart LR + A[Request] --> B{"Check all sources\nfor deny"} + + B -- "Deny found" --> DENY["❌ Deny"] + B -- "No deny found" --> C{"Check all sources\nfor allow"} + + C -- "Allow found" --> ALLOW["✅ Allow"] + C -- "No allow found" --> DEFAULT["❌ Default deny"] + + style DENY fill:#f44,color:#fff + style ALLOW fill:#4a4,color:#fff + style DEFAULT fill:#f44,color:#fff + + subgraph sources [" "] + direction LR + S1["User ACLs"] + S2["Role ACLs\n(RBAC)"] + S3["Group ACLs"] + S4["Group→Role\nACLs"] + end +.... + +== Supported identity providers + +GBAC works with any OIDC-compliant identity provider. These providers are commonly used with Redpanda: + +* https://auth0.com/docs/secure/tokens/json-web-tokens/create-custom-claims[Auth0^]: Configure group claims in Auth0 Actions or Rules. +* https://developer.okta.com/docs/concepts/universal-directory/[Okta^]: Assign groups to applications and include them in token claims. +* https://learn.microsoft.com/en-us/entra/identity/hybrid/connect/how-to-connect-fed-group-claims[Microsoft Entra ID (Azure AD)^]: Configure group claims in the application manifest. + +For IdP-specific configuration steps, see your provider's documentation. + +== Limitations + +* Azure AD group limit: Users with more than 200 group memberships in Azure AD receive a URL reference in their token instead of a list of group names. Redpanda does not follow that URL and cannot resolve groups in this case. ++ +Mitigation: Filter token claims to include only the groups relevant to Redpanda. + +* Nested groups: Redpanda does not recursively resolve nested group hierarchies. If group A contains group B, only the direct memberships reported in the token are used. Use xref:reference:properties/cluster-properties.adoc#nested_group_behavior[`nested_group_behavior: suffix`] to extract the last path segment from hierarchical group names when needed. + +* No wildcard ACLs for groups: ACL matching for `Group:` principals uses literal string comparison only. Wildcard patterns are not supported. + +ifdef::env-cloud[] +== Register groups in Redpanda Cloud + +To assign an IdP group to a role or ACL, you must first register the group: + +[tabs] +==== +Cloud UI:: ++ +-- +. In the left navigation menu, select *Organization IAM*, then select the *Groups* tab. +. Click *Create group*. +. Enter a *Name* that matches the group in your IdP exactly (for example, `engineering`). +. Optionally, enter a *Description*, and configure a *Role binding* to assign the group to a role with a specific scope and resource. +. Click *Create*. +-- + +Control Plane API:: ++ +-- +Make a link:/api/doc/cloud-controlplane/operation/operation-groupservice_creategroup[`POST /v1/groups`] request to the xref:redpanda-cloud:manage:api/cloud-byoc-controlplane-api.adoc[Control Plane API]: + +[,bash] +---- +curl -X POST 'https://api.redpanda.com/v1/groups' \ + -H 'Content-Type: application/json' \ + -H 'Authorization: Bearer ' \ + -d '{ + "group": { + "name": "", + "description": "" + } + }' +---- + +Replace `` with the name that matches the group in your IdP (for example, `engineering`). The name must match exactly for GBAC to map the group correctly. +-- +==== +endif::[] + +== Create group-based ACLs + +You can grant permissions directly to a group by creating an xref:{acl-doc}[ACL] with a `Group:` principal. This works the same as creating an ACL for a user, but uses the `Group:` prefix instead of `User:`. + +[tabs] +==== +rpk:: ++ +-- +To grant cluster-level access to the `engineering` group: + +[,bash] +---- +rpk security acl create --allow-principal Group:engineering --operation describe --cluster +---- + +To grant topic-level access: + +[,bash] +---- +rpk security acl create \ + --allow-principal Group:engineering \ + --operation read,describe \ + --topic 'analytics-' \ + --resource-pattern-type prefixed +---- + +-- +{ui}:: ++ +-- +include::manage:partial$gbac-create-group-acl.adoc[] +-- +ifdef::env-cloud[] +Data Plane API:: ++ +-- +Make a link:/api/doc/cloud-dataplane/operation/operation-aclservice_createacl[`POST /v1/acls`] request with a `Group:` principal. For example, to grant the `engineering` group read access to a topic: + +[,bash] +---- +curl -X POST "${DATAPLANE_API_URL}/v1/acls" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "resource_type": "RESOURCE_TYPE_TOPIC", + "resource_name": "analytics-events", + "resource_pattern_type": "RESOURCE_PATTERN_TYPE_LITERAL", + "principal": "Group:engineering", + "host": "*", + "operation": "OPERATION_READ", + "permission_type": "PERMISSION_TYPE_ALLOW" + }' +---- +-- +endif::[] +==== + +== Assign groups to roles + +To manage permissions at scale, assign a group to an xref:{rbac-doc}[RBAC] role. All users in the group inherit the role's ACLs automatically. + +[tabs] +==== +rpk:: ++ +-- +To assign a group to a role: + +[,bash] +---- +rpk security role assign --principal Group: +---- + +For example, to assign the `engineering` group to the `DataEngineers` role: + +[,bash] +---- +rpk security role assign DataEngineers --principal Group:engineering +---- + +To remove a group from a role: + +[,bash] +---- +rpk security role unassign --principal Group: +---- + +For example: + +[,bash] +---- +rpk security role unassign DataEngineers --principal Group:engineering +---- +-- +{ui}:: ++ +-- +include::manage:partial$gbac-assign-group-role.adoc[] +-- +ifdef::env-cloud[] +Data Plane API:: ++ +-- +First, retrieve your cluster's Data Plane API URL: + +[,bash] +---- +export DATAPLANE_API_URL=$(curl -s https://api.redpanda.com/v1/clusters/ \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer " | jq -r .cluster.dataplane_api) +---- + +Make a link:/api/doc/cloud-dataplane/operation/operation-securityservice_updaterolemembership[`PUT /v1/roles/\{role_name}`] request to assign a group to a role: + +[,bash] +---- +curl -X PUT "${DATAPLANE_API_URL}/v1/roles/DataEngineers" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "add": [{"principal": "Group:engineering"}] + }' +---- + +To remove a group from a role, use the `remove` field: + +[,bash] +---- +curl -X PUT "${DATAPLANE_API_URL}/v1/roles/DataEngineers" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "remove": [{"principal": "Group:engineering"}] + }' +---- +-- +endif::[] +ifndef::env-cloud[] +Admin API:: ++ +-- +Use the Admin API link:/api/doc/admin/v2/group/endpoint-securityservice[`SecurityService`] operations to manage group-to-role assignments. Send all requests as `POST` with a JSON body. For guidance on using the Admin API (ConnectRPC), see xref:manage:use-admin-api.adoc[]. + +To assign a group to a role, make a link:/api/doc/admin/v2/operation/operation-redpanda-core-admin-v2-securityservice-addrolemembers[`POST AddRoleMembers`] request: + +[,bash] +---- +curl -u : \ + --request POST 'http://localhost:9644/redpanda.core.admin.v2.SecurityService/AddRoleMembers' \ + --header 'Content-Type: application/json' \ + --data '{ + "roleName": "DataEngineers", + "members": [{"group": {"name": "engineering"}}] + }' +---- + +To remove a group from a role, make a link:/api/doc/admin/v2/operation/operation-redpanda-core-admin-v2-securityservice-removerolemembers[`POST RemoveRoleMembers`] request: + +[,bash] +---- +curl -u : \ + --request POST 'http://localhost:9644/redpanda.core.admin.v2.SecurityService/RemoveRoleMembers' \ + --header 'Content-Type: application/json' \ + --data '{ + "roleName": "DataEngineers", + "members": [{"group": {"name": "engineering"}}] + }' +---- +-- +endif::[] +==== + +== View groups and roles + +Use the following commands to inspect group assignments and role memberships. + +=== List groups assigned to a role + +[tabs] +==== +rpk:: ++ +-- +To see which groups are assigned to a role, use `--print-members`. Groups are listed alongside other principals such as `User:` and appear as `Group:` entries: + +[,bash] +---- +rpk security role describe --print-members +---- + +For example: + +[,bash] +---- +rpk security role describe DataEngineers --print-members +---- + +To list all roles assigned to a specific group: + +[,bash] +---- +rpk security role list --principal Group: +---- + +For example: + +[,bash] +---- +rpk security role list --principal Group:engineering +---- +-- +{ui}:: ++ +-- +To view groups assigned to a role in {ui}: + +. From *Security* on the left navigation menu, select the *Roles* tab. + +. Select the role you want to inspect. + +. The role details page lists all principals, including any `Group:` entries. +-- +ifdef::env-cloud[] +Data Plane API:: ++ +-- +To list all members of a role (including groups), make a link:/api/doc/cloud-dataplane/operation/operation-securityservice_listrolemembers[`GET /v1/roles/\{role_name}/members`] request: + +[,bash] +---- +curl -X GET "${DATAPLANE_API_URL}/v1/roles/DataEngineers/members" \ + -H "Authorization: Bearer " +---- + +The response includes a `members` array. Group members appear with the `Group:` prefix in the `principal` field. + +To list all roles assigned to a specific group, make a link:/api/doc/cloud-dataplane/operation/operation-securityservice_listroles[`GET /v1/roles`] request with a principal filter: + +[,bash] +---- +curl -X GET "${DATAPLANE_API_URL}/v1/roles?filter.principal=Group:engineering" \ + -H "Authorization: Bearer " +---- +-- +endif::[] +ifndef::env-cloud[] +Admin API:: ++ +-- +These operations use the link:/api/doc/admin/v2/group/endpoint-securityservice[Admin API v2] `SecurityService`. Send all requests as `POST` with a JSON body. + +To retrieve a role's details including all members (users and groups), make a link:/api/doc/admin/v2/operation/operation-redpanda-core-admin-v2-securityservice-getrole[`POST GetRole`] request: + +[,bash] +---- +curl -u : \ + --request POST 'http://localhost:9644/redpanda.core.admin.v2.SecurityService/GetRole' \ + --header 'Content-Type: application/json' \ + --data '{"name": "DataEngineers"}' +---- + +The response includes a `members` array with both `user` and `group` entries. + +To list all roles, make a link:/api/doc/admin/v2/operation/operation-redpanda-core-admin-v2-securityservice-listroles[`POST ListRoles`] request: + +[,bash] +---- +curl -u : \ + --request POST 'http://localhost:9644/redpanda.core.admin.v2.SecurityService/ListRoles' \ + --header 'Content-Type: application/json' \ + --data '{}' +---- + +To verify how Redpanda resolves groups from an OIDC token, make a link:/api/doc/admin/v2/operation/operation-redpanda-core-admin-v2-securityservice-resolveoidcidentity[`POST ResolveOidcIdentity`] request. Pass the token in the `Authorization` header. The response includes the resolved `principal`, token expiry, and a `groups` field listing all groups extracted from the token: + +[,bash] +---- +curl \ + --request POST 'http://localhost:9644/redpanda.core.admin.v2.SecurityService/ResolveOidcIdentity' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer ' \ + --data '{}' +---- +-- +endif::[] +==== + +== Customize token claim extraction + +Different identity providers store group information in different locations within the JWT token. + +ifdef::env-cloud[] +In Redpanda Cloud, group claim extraction is configured through your SSO connection settings. + +. In the Cloud UI, navigate to *Organization IAM > Single sign-on*, then select your IdP connection. +. Ensure the mapping mode is set to *use_map*. +. Configure *Attributes (JSON)* to map attribute names to claim paths, including `federated_groups` for group claims. ++ +A claim path is a https://goessner.net/articles/JsonPath/[JSON path^] expression that tells Redpanda where to find group information in the OIDC token. The appropriate claim path for each attribute may vary per IdP. ++ +For example, Okta exposes group claims in `${context.userinfo.groups}`. In this case, you must also include `groups` in *Userinfo scope*. +endif::[] + +ifndef::env-cloud[] +Two cluster properties control how Redpanda extracts group names: + +* xref:reference:properties/cluster-properties.adoc#oidc_group_claim_path[`oidc_group_claim_path`]: A https://goessner.net/articles/JsonPath/[JSON path^] expression that tells Redpanda where to find group information in the OIDC token. For example, Auth0 and Okta typically use a top-level `groups` claim (`$.groups`), while Keycloak nests roles inside `realm_access` (`$.realm_access.roles`). Default: `$.groups`. +* xref:reference:properties/cluster-properties.adoc#nested_group_behavior[`nested_group_behavior`]: Controls how Redpanda handles group names that use path-style notation (for example, `/departments/eng/platform`). Set to `none` to use the full path as-is, or `suffix` to extract only the last segment. Default: `none`. ++ +NOTE: When `nested_group_behavior` is set to `suffix`, groups that share a leaf name (for example, `/departments/eng/groupA` and `/departments/sales/groupA`) both resolve to `Group:groupA`. ACLs or role assignments for that principal apply to members of both groups. Design your group naming conventions to avoid unintended collisions. + +To update these properties, use xref:manage:cluster-maintenance/cluster-property-configuration.adoc[any configuration method] (`rpk cluster config set`, the Admin API, or Redpanda Console). Changes take effect immediately without a restart. +endif::[] + +=== Token structure examples + +The following examples show how Redpanda extracts group principals from different token formats. + +==== Flat group values (default) + +With `oidc_group_claim_path: "$.groups"`, Redpanda extracts principals `Group:engineering` and `Group:analytics` from the token. + +[,json] +---- +{"groups": ["engineering", "analytics"]} +---- + +==== Nested claim + +With `oidc_group_claim_path: "$.realm_access.roles"`, Redpanda extracts principals `Group:eng` and `Group:fin` from the token. + +[,json] +---- +{"realm_access": {"roles": ["eng", "fin"]}} +---- + +==== Path-style group names with no suffix extraction (default) + +With `nested_group_behavior: "none"` (the default), Redpanda maps the full path to principals `Group:/departments/eng/platform` and `Group:/departments/eng/infra`. + +[,json] +---- +{"groups": ["/departments/eng/platform", "/departments/eng/infra"]} +---- + +// Not supported in Cloud +ifndef::env-cloud[] +==== Path-style group names with suffix extraction + +When xref:reference:properties/cluster-properties.adoc#nested_group_behavior[`nested_group_behavior`] is set to `suffix`, Redpanda maps the last path segment to principals `Group:platform` and `Group:infra`. + +[,json] +---- +{"groups": ["/departments/eng/platform", "/departments/eng/infra"]} +---- +endif::[] + +==== CSV-formatted group claim + +Some identity providers return group claims as a single comma-separated string instead of an array. + +[,json] +---- +{"groups": "engineering,analytics,finance"} +---- + +Redpanda automatically splits comma-separated values and extracts principals `Group:engineering`, `Group:analytics`, and `Group:finance`. + +ifndef::env-cloud[] +== Troubleshoot GBAC + +If group-based permissions are not working as expected: + +* Decode the JWT access token from your IdP and verify that the expected group claims are present: ++ +[,bash] +---- +echo $ACCESS_TOKEN | cut -d. -f2 | base64 -d 2>/dev/null | jq . +---- ++ +Look for the claim that matches your xref:reference:properties/cluster-properties.adoc#oidc_group_claim_path[`oidc_group_claim_path`] configuration (default: `$.groups`). +* Use the `ResolveOidcIdentity` Admin API endpoint to verify which groups Redpanda extracts from a token. See <>. +* Verify that your cluster configuration matches the token structure: ++ +[,bash] +---- +rpk cluster config get oidc_group_claim_path +rpk cluster config get nested_group_behavior +---- +* Temporarily enable debug logging for the `security` logger to see all claims in the validated JWT: ++ +[,bash] +---- +rpk redpanda admin config log-level set security --level debug +---- ++ +This helps diagnose incorrect claim paths, missing groups, or token content issues. The debug level reverts automatically after the expiry period (default: 300 seconds). +endif::[] + +== Audit logging + +When xref:manage:audit-logging.adoc[audit logging] is enabled, Redpanda includes group information in the following event types: + +ifndef::env-cloud[] +* Authentication events: Events across Kafka API, HTTP Proxy, Schema Registry, and Admin API include the user's IdP group memberships in the `user.groups` field with type `idp_group`. +endif::[] +ifdef::env-cloud[] +* Authentication events: Events across Kafka API, HTTP Proxy, and Schema Registry include the user's IdP group memberships in the `user.groups` field with type `idp_group`. +endif::[] +* Authorization events: When an authorization decision matches a group ACL, the matched group appears in the `actor.user.groups` field with type `idp_group`. + +== Next steps + +* xref:manage:audit-logging.adoc[Set up audit logging] to monitor group-based access events. + +// end::single-source[] diff --git a/modules/manage/partials/remote-read-replicas.adoc b/modules/manage/partials/remote-read-replicas.adoc index ab0ee72961..027a59235b 100644 --- a/modules/manage/partials/remote-read-replicas.adoc +++ b/modules/manage/partials/remote-read-replicas.adoc @@ -36,9 +36,9 @@ You need the following: * An origin cluster with xref:{tiered-storage-link}#set-up-tiered-storage[Tiered Storage] set up. Multi-region buckets or containers are not supported. * A topic on the origin cluster, which you can use as a Remote Read Replica topic on the remote cluster. * A separate remote cluster. -** AWS: The remote cluster must be in the same region as the origin cluster's storage bucket/container. +** AWS: The remote cluster can be in the same or a different region as the origin cluster's S3 bucket. For cross-region Remote Read Replica topics, see <>. ** GCP: The remote cluster can be in the same or a different region as the bucket/container. -** Azure: Remote read replicas are not supported. +** Azure: Remote read replicas are not supported. include::shared:partial$enterprise-license.adoc[] @@ -56,7 +56,7 @@ You must configure access to the same object storage as the origin cluster. ifndef::env-kubernetes[] To set up a Remote Read Replica topic on a separate remote cluster: -. Create a remote cluster for the Remote Read Replica topic. For AWS, the remote cluster must be in the same region as the origin cluster's storage bucket/container. For GCP, the remote cluster can be in the same or a different region as the bucket/container. +. Create a remote cluster for the Remote Read Replica topic. For GCP, the remote cluster can be in the same or a different region as the bucket/container. For AWS, the remote cluster can be in the same or a different region, but cross-region Remote Read Replica topics require additional configuration. See <>. . Run `rpk cluster config edit`, and then specify properties specific to your object storage provider (your cluster will require a restart after any changes to these properties): + @@ -437,6 +437,50 @@ rpk topic create -c redpanda.remote.readreplica= * Do not use `redpanda.remote.read` or `redpanda.remote.write` with `redpanda.remote.readreplica`. Redpanda ignores the values for remote read and remote write properties on read replica topics. ==== +[[create-cross-region-rrr-topic]] +=== Create a cross-region Remote Read Replica topic on AWS + +Use this configuration only when the remote cluster is in a *different AWS region* than the origin cluster's S3 bucket. For same-region AWS or GCP deployments, use the standard <>. + +==== Prerequisites + +You must explicitly set the xref:reference:properties/object-storage-properties.adoc#cloud_storage_url_style[`cloud_storage_url_style`] cluster property to `virtual_host` or `path` on the remote cluster. The default value does not support cross-region Remote Read Replicas. + +==== Create the topic + +To create a cross-region Remote Read Replica topic, append `region` and `endpoint` query-string parameters to the bucket name. + +In the following example, replace the placeholders: + +- ``: The name of the topic in the cluster hosting the Remote Read Replica. +- ``: The S3 bucket configured on the origin cluster (`cloud_storage_bucket`). +- ``: The AWS region of the origin cluster's S3 bucket (not the remote cluster's region). + +[,bash] +---- +rpk topic create \ + -c redpanda.remote.readreplica=?region=&endpoint=s3..amazonaws.com +---- + +For example, if the origin cluster stores data in a bucket called `my-bucket` in `us-east-1`: + +[,bash] +---- +rpk topic create my-topic \ + -c redpanda.remote.readreplica=my-bucket?region=us-east-1&endpoint=s3.us-east-1.amazonaws.com +---- + +NOTE: The `endpoint` value must not include the bucket name. When using `virtual_host` URL style, Redpanda automatically prepends the bucket name to the endpoint. When using `path` URL style, Redpanda appends the bucket name as a path segment. + +==== Limits + +Each unique combination of region and endpoint creates a separate object storage target on the remote cluster. A cluster supports a maximum of 10 targets. + +How targets are counted depends on `cloud_storage_url_style`: + +- `virtual_host`: Each unique combination of bucket, region, and endpoint counts as one target. You can create up to 10 distinct cross-region Remote Read Replica topics for each cluster. +- `path`: Each unique combination of region and endpoint counts as one target (the bucket name is not part of the key). You can create cross-region Remote Read Replica topics for multiple buckets using the same region/endpoint combination, with a maximum of 10 distinct region/endpoint combinations for each cluster. + == Reduce lag in data availability :config-ref: cloud_storage_segment_max_upload_interval_sec diff --git a/modules/manage/partials/tiered-storage.adoc b/modules/manage/partials/tiered-storage.adoc index d2434598bc..a3f8fc6dc8 100644 --- a/modules/manage/partials/tiered-storage.adoc +++ b/modules/manage/partials/tiered-storage.adoc @@ -869,32 +869,50 @@ To enable Tiered Storage for a cluster (in addition to setting `cloud_storage_en * config_ref:cloud_storage_enable_remote_write,true,properties/object-storage-properties[] * config_ref:cloud_storage_enable_remote_read,true,properties/object-storage-properties[] -When you enable Tiered Storage for a cluster, you enable it for all existing topics in the cluster. When cluster-level properties are changed, the changes apply only to new topics, not existing topics. You must restart your cluster after enabling Tiered Storage. +When you enable Tiered Storage for a cluster, you enable it for all existing topics in the cluster. You must restart your cluster after enabling Tiered Storage. -NOTE: The `cloud_storage_enable_remote_write` and `cloud_storage_enable_remote_read` cluster-level properties are essentially creation-time defaults for the `redpanda.remote.write` and `redpanda.remote.read` topic-level properties. +NOTE: `cloud_storage_enable_remote_write` and `cloud_storage_enable_remote_read` act as creation-time defaults only for topics with `redpanda.storage.mode=unset`. They have no effect on topics where `redpanda.storage.mode` is explicitly set to `local`, `tiered`, or `cloud`. To apply a default storage mode to all new topics, use the config_ref:default_redpanda_storage_mode,true,properties/cluster-properties[] cluster property instead. ==== Enable Tiered Storage for specific topics -To enable Tiered Storage for a new or existing topic (in addition to setting `cloud_storage_enabled` to `true`), set the following topic-level properties to `true`: +Starting in Redpanda v26.1, the recommended way to enable Tiered Storage for a topic is to set the `redpanda.storage.mode` topic property to `tiered`: -* `redpanda.remote.write` -* `redpanda.remote.read` +[,bash] +---- +rpk topic create -c redpanda.storage.mode=tiered +---- -For example, to create a new topic with Tiered Storage: +To enable Tiered Storage on an existing topic that was created in `local` mode: + +[,bash] +---- +rpk topic alter-config --set redpanda.storage.mode=tiered +---- + +When `redpanda.storage.mode=tiered` is set, Tiered Storage is fully enabled for the topic. The `redpanda.remote.read` and `redpanda.remote.write` topic properties have no effect on a topic's storage when `redpanda.storage.mode` is set to any value other than `unset`. + +==== Configure Tiered Storage using legacy topic properties + +For topics with `redpanda.storage.mode=unset` (the default when `default_redpanda_storage_mode` is not configured), Tiered Storage is controlled by the `redpanda.remote.read` and `redpanda.remote.write` topic properties: + +* `redpanda.remote.write`: Uploads data from Redpanda to object storage. +* `redpanda.remote.read`: Fetches data from object storage to Redpanda. + +For example, to create a topic using the legacy properties when the storage mode is `unset`: [,bash] ---- rpk topic create -c redpanda.remote.read=true -c redpanda.remote.write=true ---- -To enable Tiered Storage on an existing topic, run: +To enable Tiered Storage on an existing topic where the storage mode is `unset`: [,bash] ---- rpk topic alter-config --set redpanda.remote.read=true --set redpanda.remote.write=true ---- -Topic-level properties override cluster-level properties. For example, for new topics, if `cloud_storage_enable_remote_write` is set to `true`, you can set `redpanda.remote.write` to `false` to turn it off for a particular topic. +For newly-created unset topics, the cluster-level `cloud_storage_enable_remote_write` and `cloud_storage_enable_remote_read` properties dictate the topic-level properties `redpanda.remote.write` and `redpanda.remote.read` at topic creation time, respectively. Altering the cluster properties has no effect on existing topics, only newly-created ones. To alter the permissions for existing topics, you can set these topic properties directly. For example, `redpanda.remote.write=false` to disable uploads for a specific topic. Tiered Storage topic-level properties: @@ -902,23 +920,25 @@ Tiered Storage topic-level properties: | Property | Description | `redpanda.remote.write` -| Uploads data from Redpanda to object storage. Overrides the cluster-level `cloud_storage_enable_remote_write` configuration for the topic. +| Uploads data from Redpanda to object storage. For topics with `redpanda.storage.mode=unset`, overrides the cluster-level `cloud_storage_enable_remote_write` setting. Has no effect on topics with `redpanda.storage.mode` set to `local`, `tiered`, or `cloud`. | `redpanda.remote.read` -| Fetches data from object storage to Redpanda. Overrides the cluster-level `cloud_storage_enable_remote_read` configuration for the topic. +| Fetches data from object storage to Redpanda. For topics with `redpanda.storage.mode=unset`, overrides the cluster-level `cloud_storage_enable_remote_read` setting. Has no effect on topics with `redpanda.storage.mode` set to `local`, `tiered`, or `cloud`. | `redpanda.remote.recovery` | Recovers or reproduces a topic from object storage. Use this property during topic creation. It does not apply to existing topics. | `redpanda.remote.delete` -| When set to `true`, deleting a topic also deletes its objects in object storage. Both `redpanda.remote.write` and `redpanda.remote.read` must be enabled, and the topic must not be a Remote Read Replica topic. +| When set to `true`, deleting a topic also deletes its objects in object storage. Applies to both Tiered Storage topics and Cloud Topics. + +For Tiered Storage topics, the topic must not be a Remote Read Replica topic. When set to `false`, deleting a topic does not delete its objects in object storage. Default is `true` for new topics. |=== -The following tables list outcomes for combinations of cluster-level and topic-level configurations: +The following tables show outcomes for combinations of cluster-level and topic-level configurations for topics with `redpanda.storage.mode=unset` at topic creation time: |=== | Cluster-level configuration with `cloud_storage_enable_remote_write` | Topic-level configuration with `redpanda.remote.write` | Outcome: whether remote write is enabled or disabled on the topic diff --git a/modules/reference/attachments/redpanda-properties-v26.1.1-rc2.json b/modules/reference/attachments/redpanda-properties-v26.1.1-rc2.json new file mode 100644 index 0000000000..48a66e5441 --- /dev/null +++ b/modules/reference/attachments/redpanda-properties-v26.1.1-rc2.json @@ -0,0 +1,13775 @@ +{ + "definitions": { + "config::broker_authn_endpoint": { + "defined_in": "config/broker_authn_endpoint.h", + "properties": { + "address": { + "type": "object" + }, + "authn_method": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "config::endpoint_tls_config": { + "defined_in": "config/endpoint_tls_config.h", + "properties": { + "config": { + "type": "string" + }, + "err": { + "type": "string" + }, + "name": { + "type": "string" + }, + "o": { + "type": "string" + } + }, + "type": "object" + }, + "config::leaders_preference": { + "defined_in": "config/leaders_preference.h", + "properties": { + "none_str": { + "type": "string" + }, + "ordered_racks_prefix": { + "type": "string" + }, + "ordered_racks_str": { + "type": "string" + }, + "racks": { + "type": "array" + }, + "racks_prefix": { + "type": "string" + }, + "racks_str": { + "type": "string" + } + }, + "type": "object" + }, + "config::node_id_override": { + "defined_in": "config/node_overrides.h", + "properties": { + "id": { + "type": "object" + }, + "ignore_existing_node_id": { + "type": "boolean" + }, + "key": { + "type": "object" + }, + "uuid": { + "type": "object" + } + }, + "type": "object" + }, + "config::rest_authn_endpoint": { + "defined_in": "config/rest_authn_endpoint.h", + "properties": { + "address": { + "type": "object" + }, + "authn_method": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "config::sasl_mechanisms_override": { + "defined_in": "config/sasl_mechanisms.h", + "properties": { + "listener": { + "type": "string" + }, + "sasl_mechanisms": { + "type": "string" + } + }, + "type": "object" + }, + "model::broker_endpoint": { + "defined_in": "model/metadata.h", + "properties": { + "address": { + "type": "object" + }, + "err": { + "type": "string" + }, + "false": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "model::cleanup_policy_bitflags": { + "defined_in": "model/fundamental.h", + "enum": [ + "none", + "delete", + "compact" + ], + "enum_string_mappings": { + "compaction": "compact", + "deletion": "delete" + }, + "type": "enum" + }, + "model::cloud_credentials_source": { + "defined_in": "model/metadata.h", + "enum": [ + "config_file", + "aws_instance_metadata", + "sts", + "gcp_instance_metadata", + "azure_aks_oidc_federation", + "azure_vm_instance_metadata" + ], + "type": "enum" + }, + "model::cloud_storage_backend": { + "defined_in": "model/metadata.h", + "enum": [ + "aws", + "google_s3_compat", + "azure", + "minio", + "oracle_s3_compat", + "linode_s3_compat", + "unknown" + ], + "enum_string_mappings": { + "aws": "aws", + "azure": "azure", + "google_s3_compat": "google_s3_compat", + "linode_s3_compat": "linode_s3_compat", + "minio": "minio", + "oracle_s3_compat": "oracle_s3_compat", + "unknown": "unknown" + }, + "type": "enum" + }, + "model::cloud_storage_chunk_eviction_strategy": { + "defined_in": "model/metadata.h", + "enum": [ + "eager", + "capped", + "predictive" + ], + "enum_string_mappings": { + "capped": "capped", + "eager": "eager", + "predictive": "predictive" + }, + "type": "enum" + }, + "model::compression": { + "defined_in": "model/compression.h", + "enum": [ + "none", + "gzip", + "snappy", + "lz4", + "zstd", + "count", + "producer" + ], + "type": "enum" + }, + "model::fetch_read_strategy": { + "defined_in": "model/metadata.h", + "enum": [ + "polling", + "non_polling", + "non_polling_with_debounce", + "non_polling_with_pid" + ], + "enum_string_mappings": { + "non_polling": "non_polling", + "non_polling_with_debounce": "non_polling_with_debounce", + "non_polling_with_pid": "non_polling_with_pid", + "polling": "polling" + }, + "type": "enum" + }, + "model::iceberg_invalid_record_action": { + "defined_in": "model/metadata.h", + "enum": [ + "drop", + "dlq_table" + ], + "enum_string_mappings": { + "dlq_table": "dlq_table", + "drop": "drop" + }, + "type": "enum" + }, + "model::kafka_batch_validation_mode": { + "defined_in": "model/metadata.h", + "enum": [ + "legacy", + "relaxed", + "strict" + ], + "enum_string_mappings": { + "legacy": "legacy", + "relaxed": "relaxed", + "strict": "strict" + }, + "type": "enum" + }, + "model::leader_balancer_mode": { + "defined_in": "model/metadata.h", + "enum": [ + "calibrated", + "random" + ], + "enum_string_mappings": { + "calibrated": "calibrated", + "random": "random" + }, + "type": "enum" + }, + "model::node_id": { + "alias_for": "named_type", + "defined_in": "model/fundamental.h", + "maximum": 2147483647, + "minimum": -2147483648, + "type": "integer" + }, + "model::partition_autobalancing_mode": { + "defined_in": "model/metadata.h", + "enum": [ + "off", + "node_add", + "continuous" + ], + "type": "enum" + }, + "model::rack_id": { + "alias_for": "named_type", + "defined_in": "model/metadata.h", + "type": "string" + }, + "model::recovery_validation_mode": { + "defined_in": "model/metadata.h", + "enum": [ + "check_manifest_existence", + "check_manifest_and_segment_metadata", + "no_check" + ], + "enum_string_mappings": { + "check_manifest_and_segment_metadata": "check_manifest_and_segment_metadata", + "check_manifest_existence": "check_manifest_existence", + "no_check": "no_check" + }, + "type": "enum" + }, + "model::redpanda_storage_mode": { + "defined_in": "model/metadata.h", + "enum": [ + "local", + "tiered", + "cloud", + "unset" + ], + "enum_string_mappings": { + "cloud": "cloud", + "local": "local", + "tiered": "tiered", + "unset": "unset" + }, + "type": "enum" + }, + "model::timestamp_type": { + "defined_in": "model/timestamp.h", + "enum": [ + "CreateTime", + "LogAppendTime" + ], + "enum_string_mappings": { + "append_time": "LogAppendTime", + "create_time": "CreateTime" + }, + "type": "enum" + }, + "model::write_caching_mode": { + "defined_in": "model/metadata.h", + "enum": [ + "true", + "false", + "disabled" + ], + "enum_string_mappings": { + "default_false": "false", + "default_true": "true", + "disabled": "disabled" + }, + "type": "enum" + }, + "net::unresolved_address": { + "defined_in": "utils/unresolved_address.h", + "properties": { + "family": { + "type": "string" + }, + "host": { + "type": "string" + }, + "o": { + "type": "string" + }, + "port": { + "type": "string" + } + }, + "type": "object" + } + }, + "properties": { + "abort_index_segment_size": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 50000, + "defined_in": "src/v/config/configuration.cc", + "description": "Capacity (in number of txns) of an abort index segment.\n\nEach partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "abort_index_segment_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "abort_timed_out_transactions_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, at which Redpanda looks for inactive transactions and aborts them.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "abort_timed_out_transactions_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "admin": { + "c_type": "model::broker_endpoint", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": { + "address": "127.0.0.1", + "port": 9644 + }, + "defined_in": "src/v/config/node_config.cc", + "description": "Network address for the glossterm:Admin API[] server.", + "example": "[,yaml]\n----\nredpanda:\n admin:\n - name: \n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: Name for the Admin API listener (TLS configuration is handled separately in the <> broker property)\n* ``: The externally accessible hostname or IP address that clients use to connect to this broker\n* ``: The port number for the Admin API endpoint", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "admin", + "needs_restart": true, + "nullable": false, + "type": "object", + "visibility": "user" + }, + "admin_api_doc_dir": { + "c_type": "ss::sstring", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "/usr/share/redpanda/admin-api-doc", + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the API specifications for the Admin API.", + "is_deprecated": false, + "is_enterprise": false, + "name": "admin_api_doc_dir", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "admin_api_require_auth": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Whether Admin API clients must provide HTTP basic authentication headers.", + "is_deprecated": false, + "is_enterprise": false, + "name": "admin_api_require_auth", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "admin_api_tls": { + "c_type": "endpoint_tls_config", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/config/node_config.cc", + "description": "Specifies the TLS configuration for the HTTP Admin API.", + "example": "[,yaml]\n----\nredpanda:\n admin_api_tls:\n - name: \n enabled: true\n cert_file: \n key_file: \n truststore_file: \n require_client_auth: true\n----\n\nReplace the following placeholders with your values:\n\n* ``: Name that matches your Admin API listener (defined in the <> broker property)\n* ``: Full path to the TLS certificate file\n* ``: Full path to the TLS private key file\n* ``: Full path to the Certificate Authority file", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "endpoint_tls_config" + }, + "name": "admin_api_tls", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "advertised_kafka_api": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Address of the Kafka API published to the clients. If not set, the <> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect.", + "example": "[,yaml]\n----\nredpanda:\n advertised_kafka_api:\n - name: \n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: Name that matches your Kafka API listener (defined in the <> broker property)\n* ``: The externally accessible hostname or IP address that clients use to connect to this broker\n* ``: The port number for the Kafka API endpoint", + "is_deprecated": false, + "is_topic_property": false, + "name": "advertised_kafka_api", + "type": "string", + "visibility": "user" + }, + "advertised_pandaproxy_api": { + "c_type": "model::broker_endpoint", + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "Network address for the HTTP Proxy API server to publish to clients.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "advertised_pandaproxy_api", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "advertised_rpc_api": { + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Address of RPC endpoint published to other cluster members. If not set, the <> broker property is used. This should be the address other brokers can use to communicate with this broker.", + "example": "[,yaml]\n----\nredpanda:\n advertised_rpc_api:\n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: The externally accessible hostname or IP address that other brokers use to communicate with this broker\n* ``: The port number for the RPC endpoint (default is 33145)", + "is_deprecated": false, + "is_topic_property": false, + "name": "advertised_rpc_api", + "type": "string", + "visibility": "user" + }, + "aggregate_metrics": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable aggregation of metrics returned by the xref:reference:internal-metrics-reference.adoc[`/metrics`] endpoint. Aggregation can simplify monitoring by providing summarized data instead of raw, per-instance metrics. Metric aggregation is performed by summing the values of samples by labels and is done when it makes sense by the shard and/or partition labels.", + "is_deprecated": false, + "is_enterprise": false, + "name": "aggregate_metrics", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:internal-metrics-reference.adoc[`/metrics`]" + ], + "type": "boolean" + }, + "alive_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "default_human_readable": "5 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The amount of time since the last broker status heartbeat. After this time, a broker is considered offline and not alive.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "alive_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "api_doc_dir": { + "c_type": "ss::sstring", + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "/usr/share/redpanda/proxy-api-doc", + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API.", + "is_deprecated": false, + "is_enterprise": false, + "name": "api_doc_dir", + "needs_restart": true, + "nullable": false, + "type": "string" + }, + "append_chunk_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 16384, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of direct write operations to disk in bytes. A larger chunk size can improve performance for write-heavy workloads, but increase latency for these writes as more data is collected before each write operation. A smaller chunk size can decrease write latency, but potentially increase the number of disk I/O operations.", + "example": "`32768`", + "is_deprecated": false, + "is_enterprise": false, + "name": "append_chunk_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "audit_client_max_buffer_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 16777216, + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the number of bytes allocated by the internal audit client for audit messages. When changing this, you must disable audit logging and then re-enable it for the change to take effect. Consider increasing this if your system generates a very large number of audit records in a short amount of time.", + "is_deprecated": false, + "is_enterprise": false, + "name": "audit_client_max_buffer_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "audit_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables or disables audit logging. When you set this to true, Redpanda checks for an existing topic named `_redpanda.audit_log`. If none is found, Redpanda automatically creates one for you.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "true" + ], + "enterprise_value": [ + "true" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "audit_enabled", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "boolean", + "visibility": "user" + }, + "audit_enabled_event_types": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "management", + "authenticate", + "admin" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "List of strings in JSON style identifying the event types to include in the audit log. This may include any of the following: `management, produce, consume, describe, heartbeat, authenticate, schema_registry, admin`.", + "example": "`[\"management\", \"describe\"]`", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "audit_enabled_event_types", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "audit_excluded_principals": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "List of user principals to exclude from auditing.", + "example": "`[\"User:principal1\",\"User:principal2\"]`", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "audit_excluded_principals", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "audit_excluded_topics": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "List of topics to exclude from auditing.", + "example": "`[\"topic1\",\"topic2\"]`", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "audit_excluded_topics", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "audit_failure_policy": { + "c_type": "audit_failure_policy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "reject", + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the policy for rejecting audit log messages when the audit log queue is full. If set to 'permit', then new audit messages are dropped and the operation is permitted. If set to 'reject', then the operation is rejected.", + "enum": [ + "reject", + "permit" + ], + "is_deprecated": false, + "is_enterprise": false, + "name": "audit_failure_policy", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "audit_log_num_partitions": { + "c_type": "int32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": 12, + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the number of partitions used by a newly-created audit topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "audit_log_num_partitions", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "audit_log_replication_factor": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the replication factor for a newly-created audit log topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics. Setting this value is optional. If a value is not provided, Redpanda will use the value specified for `internal_topic_replication_factor`.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "audit_log_replication_factor", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "audit_queue_drain_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 500, + "default_human_readable": "500 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, at which Redpanda flushes the queued audit log messages to the audit log topic. Longer intervals may help prevent duplicate messages, especially in high throughput scenarios, but they also increase the risk of data loss during shutdowns where the queue is lost.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "audit_queue_drain_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "audit_queue_max_buffer_size_per_shard": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1048576, + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the maximum amount of memory in bytes used by the audit buffer in each shard. Once this size is reached, requests to log additional audit messages will return a non-retryable error. Limiting the buffer size per shard helps prevent any single shard from consuming excessive memory due to audit log messages.", + "is_deprecated": false, + "is_enterprise": false, + "name": "audit_queue_max_buffer_size_per_shard", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "audit_use_rpc": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Use Redpanda's internal communication system to write audit logs. When disabled, Redpanda uses a Kafka client to write audit logs instead.", + "is_deprecated": false, + "is_enterprise": false, + "name": "audit_use_rpc", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "auto_create_topics_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Allow automatic topic creation. To prevent excess topics, this property is not supported on Redpanda Cloud BYOC and Dedicated clusters. You should explicitly manage topic creation for these Redpanda Cloud clusters.\n\nIf you produce to a topic that doesn't exist, the topic will be created with defaults if this property is enabled.", + "is_deprecated": false, + "is_enterprise": false, + "name": "auto_create_topics_enabled", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "broker_tls": { + "c_type": "config::tls_config", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": { + "crl_file": null, + "enable_renegotiation": null, + "enabled": null, + "key_cert": null, + "min_tls_version": null, + "require_client_auth": null, + "tls_v1_2_cipher_suites": null, + "tls_v1_3_cipher_suites": null, + "truststore_file": null + }, + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect.", + "is_deprecated": false, + "is_enterprise": false, + "name": "broker_tls", + "needs_restart": true, + "nullable": false, + "type": "object" + }, + "brokers": { + "c_type": "net::unresolved_address", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Network addresses of the Kafka API servers to which the HTTP Proxy client should connect.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "brokers", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "cleanup.policy": { + "acceptable_values": "[`delete`, `compact`, `compact,delete`]", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "log_cleanup_policy", + "default": "delete", + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The cleanup policy to apply for log segments of a topic.\nWhen `cleanup.policy` is set, it overrides the cluster property xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] for the topic.\n\n**Values**:\n\n- `delete` - Deletes data according to size-based or time-based retention limits, or both.\n- `compact` - Deletes data according to a key-based retention policy, discarding all but the latest value for each key.\n- `compact,delete` - The latest values are kept for each key, while the remaining data is deleted according to retention limits.", + "enum": [ + "none", + "delete", + "compact" + ], + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "cleanup.policy", + "needs_restart": false, + "related_topics": [ + "xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-segment-size[Configure segment size]", + "xref:manage:tiered-storage.adoc#compacted-topics-in-tiered-storage[Compacted topics in Tiered Storage]", + "xref:reference:properties/cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`]" + ], + "type": "string" + }, + "client_cache_max_size": { + "c_type": "size_t", + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 10, + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "The maximum number of Kafka client connections that Redpanda can cache in the LRU (least recently used) cache. The LRU cache helps optimize resource utilization by keeping the most recently used clients in memory, facilitating quicker reconnections for frequent clients while limiting memory usage.", + "is_deprecated": false, + "is_enterprise": false, + "name": "client_cache_max_size", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "client_identifier": { + "c_type": "ss::sstring", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "test_client", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities.", + "is_deprecated": false, + "is_enterprise": false, + "name": "client_identifier", + "needs_restart": true, + "nullable": true, + "type": "string" + }, + "client_keep_alive": { + "c_type": "std::chrono::milliseconds", + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 300000, + "default_human_readable": "5 minutes", + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "Time, in milliseconds, that an idle client connection may remain open to the HTTP Proxy API.", + "example": "`300000`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "client_keep_alive", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "cloud_storage_access_key": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <> to form the complete credentials required for authentication.\nTo authenticate using IAM roles, see <>.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_access_key", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_api_endpoint": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service.\n\n- AWS: If not set, this is automatically generated using <> and <>. Otherwise, this uses the value assigned.\n- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <>.\n- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <>.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_api_endpoint", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_api_endpoint_port": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 443, + "defined_in": "src/v/config/configuration.cc", + "description": "TLS port override.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_api_endpoint_port", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "cloud_storage_attempt_cluster_restore_on_bootstrap": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "When set to `true`, Redpanda automatically retrieves cluster metadata from a specified object storage bucket at the cluster's first startup. This option is ideal for orchestrated deployments, such as Kubernetes. Ensure any previous cluster linked to the bucket is fully decommissioned to prevent conflicts between Tiered Storage subsystems.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_attempt_cluster_restore_on_bootstrap", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_azure_adls_endpoint": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Azure Data Lake Storage v2 endpoint override. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.\n\nIf not set, this is automatically generated using `dfs.core.windows.net` and <>.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_azure_adls_endpoint", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_azure_adls_port": { + "c_type": "uint16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Azure Data Lake Storage v2 port override. See also: <>. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "maximum": 65535, + "minimum": 0, + "name": "cloud_storage_azure_adls_port", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "cloud_storage_azure_container": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The name of the Azure container to use with Tiered Storage. If `null`, the property is disabled.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_azure_container", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_azure_hierarchical_namespace_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Force Redpanda to use or not use an Azure Data Lake Storage (ADLS) Gen2 hierarchical namespace-compliant client in <>. \n\nWhen this property is not set, <> must be set, and each broker checks at startup if a hierarchical namespace is enabled. \n\nWhen set to `true`, this property disables the check and assumes a hierarchical namespace is enabled. \n\nWhen set to `false`, this property disables the check and assumes a hierarchical namespace is not enabled. \n\nThis setting should be used only in emergencies where Redpanda fails to detect the correct a hierarchical namespace status.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_azure_hierarchical_namespace_enabled", + "needs_restart": true, + "nullable": true, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_azure_managed_identity_id": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The managed identity ID to use for access to the Azure storage account. To use Azure managed identities, you must set <> to `azure_vm_instance_metadata`. See xref:manage:security/iam-roles.adoc[IAM Roles] for more information on managed identities.\n\n*Type*: string\n\n*Default*: null\n\n*Requires restart*: No\n\n*Supported versions*: Redpanda v24.1 or later\n\n---", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_azure_managed_identity_id", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:security/iam-roles.adoc[IAM Roles]" + ], + "type": "string", + "visibility": "user" + }, + "cloud_storage_azure_shared_key": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The account access key to be used for Azure Shared Key authentication with the Azure storage account configured by <>. If `null`, the property is disabled.\n\nNOTE: Redpanda expects this key string to be Base64 encoded.\n\n*Requires restart*: Yes", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "is_secret": true, + "name": "cloud_storage_azure_shared_key", + "needs_restart": false, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_azure_storage_account": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The name of the Azure storage account to use with Tiered Storage. If `null`, the property is disabled.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_azure_storage_account", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_backend": { + "c_type": "model::cloud_storage_backend", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "unknown", + "defined_in": "src/v/config/configuration.cc", + "description": "Optional object storage backend variant used to select API capabilities. If not supplied, this will be inferred from other configuration properties.", + "enum": [ + "aws", + "google_s3_compat", + "azure", + "minio", + "oracle_s3_compat", + "linode_s3_compat", + "unknown" + ], + "example": "`aws`", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_backend", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "cloud_storage_background_jobs_quota": { + "c_type": "int32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "defined_in": "src/v/config/configuration.cc", + "description": "The total number of requests the object storage background jobs can make during one background housekeeping run. This is a per-shard limit. Adjusting this limit can optimize object storage traffic and impact shard performance.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "cloud_storage_background_jobs_quota", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_bucket": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS or GCP bucket that should be used to store data.\n\nWARNING: Modifying this property after writing data to a bucket could cause data loss.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_bucket", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_cache_check_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "default_human_readable": "5 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum interval between Tiered Storage cache trims, measured in milliseconds. This setting dictates the cooldown period after a cache trim operation before another trim can occur. If a cache fetch operation requests a trim but the interval since the last trim has not yet passed, the trim will be postponed until this cooldown expires. Adjusting this interval helps manage the balance between cache size and retrieval performance.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_cache_check_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cache_chunk_size": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 16777216, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of chunks of segments downloaded into object storage cache. Reduces space usage by only downloading the necessary chunk from a segment.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_storage_cache_chunk_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cache_directory": { + "c_type": "ss::sstring", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory.", + "example": "[,yaml]\n----\nredpanda:\n cloud_storage_cache_directory: \n----\n\n\nReplace `` with the full path to your desired cache directory.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_cache_directory", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`]" + ], + "type": "string", + "visibility": "user" + }, + "cloud_storage_cache_max_objects": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100000, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of objects that may be held in the Tiered Storage cache. This applies simultaneously with <>, and whichever limit is hit first will trigger trimming of the cache.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_cache_max_objects", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cache_num_buckets": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0, + "defined_in": "src/v/config/configuration.cc", + "description": "Divide the object storage cache across the specified number of buckets. This only works for objects with randomized prefixes. The names are not changed when the value is set to zero.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_cache_num_buckets", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cache_size": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of the object storage cache, in bytes.\n\nThis property works together with <> to define cache behavior:\n\n- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes.\n\n- If one of these properties is set to `0`, Redpanda uses the non-zero value.\n\n- These properties cannot both be `0`.\n\n- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_storage_cache_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "cloud_storage_cache_size_percent": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 20.0, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of the cache as a percentage, minus the space that Redpanda avoids using defined by the xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`] cluster property. This is calculated at startup and dynamically updated if either this property, `disk_reservation_percent`, or <> changes.\n\nThis property works together with <> to define cache behavior:\n\n- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes.\n\n- If one of these properties is set to `0`, Redpanda uses the non-zero value.\n\n- These properties cannot both be `0`.\n\n- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`.", + "example": "`20.0`", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_cache_size_percent", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`]" + ], + "type": "number", + "visibility": "user" + }, + "cloud_storage_cache_trim_carryover_bytes": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0, + "defined_in": "src/v/config/configuration.cc", + "description": "The cache performs a recursive directory inspection during the cache trim. The information obtained during the inspection can be carried over to the next trim operation. This parameter sets a limit on the memory occupied by objects that can be carried over from one trim to next, and allows cache to quickly unblock readers before starting the directory inspection (deprecated)", + "is_deprecated": true, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_cache_trim_carryover_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "deprecated" + }, + "cloud_storage_cache_trim_threshold_percent_objects": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cache trimming is triggered when the number of objects in the cache reaches this percentage relative to its maximum object count. If unset, the default behavior is to start trimming when the cache is full.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_cache_trim_threshold_percent_objects", + "needs_restart": false, + "nullable": true, + "type": "number", + "version": "24.1.10", + "visibility": "tunable" + }, + "cloud_storage_cache_trim_threshold_percent_size": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cache trimming is triggered when the cache size reaches this percentage relative to its maximum capacity. If unset, the default behavior is to start trimming when the cache is full.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_cache_trim_threshold_percent_size", + "needs_restart": false, + "nullable": true, + "type": "number", + "version": "24.1.10", + "visibility": "tunable" + }, + "cloud_storage_cache_trim_walk_concurrency": { + "c_type": "uint16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of concurrent tasks launched for traversing the directory structure during cache trimming. A higher number allows cache trimming to run faster but can cause latency spikes due to increased pressure on I/O subsystem and syscall threads.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 65535, + "minimum": 0, + "name": "cloud_storage_cache_trim_walk_concurrency", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_chunk_eviction_strategy": { + "c_type": "model::cloud_storage_chunk_eviction_strategy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "eager", + "defined_in": "src/v/config/configuration.cc", + "description": "Selects a strategy for evicting unused cache chunks.", + "enum": [ + "eager", + "capped", + "predictive" + ], + "example": "`eager`", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_chunk_eviction_strategy", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_chunk_prefetch": { + "c_type": "uint16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of chunks to prefetch ahead of every downloaded chunk. Prefetching additional chunks can enhance read performance by reducing wait times for sequential data access. A value of `0` disables prefetching, relying solely on on-demand downloads. Adjusting this property allows for tuning the balance between improved read performance and increased network and storage I/O.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 65535, + "minimum": 0, + "name": "cloud_storage_chunk_prefetch", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_client_lease_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 900000, + "default_human_readable": "15 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum time Redpanda holds a connection to object storage before closing it. After this timeout, any active connection is immediately closed and must be re-established for subsequent operations.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_client_lease_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_metadata_num_consumer_groups_per_upload": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of groups to upload in a single snapshot object during consumer offsets upload. Setting a lower value will mean a larger number of smaller snapshots are uploaded.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_cluster_metadata_num_consumer_groups_per_upload", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_metadata_retries": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of attempts metadata operations may be retried.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_cluster_metadata_retries", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_metadata_upload_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3600000, + "default_human_readable": "1 hour", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval to wait between cluster metadata uploads.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_cluster_metadata_upload_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_metadata_upload_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60000, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for cluster metadata uploads.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_cluster_metadata_upload_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_cluster_name": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "A unique name for this cluster's metadata in object storage. Use this when multiple clusters share the same storage bucket (for example, for xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]). The name must be unique within the bucket, 1-64 characters, and use only letters, numbers, underscores, and hyphens. Don't change this value once set.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_cluster_name", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]" + ], + "type": "string", + "visibility": "user" + }, + "cloud_storage_credentials_host": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The hostname to connect to for retrieving role based credentials. Derived from <> if not set. Only required when using IAM role based access. To authenticate using access keys, see <>.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_credentials_host", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_credentials_source": { + "c_type": "model::cloud_credentials_source", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "config_file", + "defined_in": "src/v/config/configuration.cc", + "description": "The source of credentials used to authenticate to object storage services.\nRequired for AWS or GCP authentication with IAM roles.\n\nTo authenticate using access keys, see <>.", + "enum": [ + "config_file", + "aws_instance_metadata", + "sts", + "gcp_instance_metadata", + "azure_aks_oidc_federation", + "azure_vm_instance_metadata" + ], + "example": "`config_file`", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_credentials_source", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "cloud_storage_crl_file": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to certificate revocation list for <>.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_crl_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_disable_archival_stm_rw_fence": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disables the concurrency control mechanism in Tiered Storage. This safety feature keeps data organized and correct when multiple processes access it simultaneously. Disabling it can cause data consistency problems, so use this setting only for testing, never in production systems.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_disable_archival_stm_rw_fence", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_archiver_manager": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Use legacy upload mode and do not start archiver_manager.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_disable_archiver_manager", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "cloud_storage_disable_chunk_reads": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disable chunk reads and switch back to legacy mode where full segments are downloaded. When set to `true`, this option disables the more efficient chunk-based reads, causing Redpanda to download entire segments. This legacy behavior might be useful in specific scenarios where chunk-based fetching is not optimal.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_disable_chunk_reads", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_read_replica_loop_for_tests": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_disable_read_replica_loop_for_tests", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_remote_labels_for_tests": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "If `true`, Redpanda disables remote labels and falls back on the hash-based object naming scheme for new topics. \n\nCAUTION: This property exists to simplify testing and shouldn't be set in production.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_disable_remote_labels_for_tests", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_tls": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disable TLS for all object storage connections.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_disable_tls", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "cloud_storage_disable_upload_consistency_checks": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disable all upload consistency checks to allow Redpanda to upload logs with gaps and replicate metadata with consistency violations. Do not change the default value unless requested by Redpanda Support.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_disable_upload_consistency_checks", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_disable_upload_loop_for_tests": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Begins the upload loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_disable_upload_loop_for_tests", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_compacted_topic_reupload": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable re-uploading data for compacted topics.\nWhen set to `true`, Redpanda can re-upload data for compacted topics to object storage, ensuring that the most current state of compacted topics is available in the cloud. Disabling this property (`false`) may reduce storage and network overhead but at the risk of not having the latest compacted data state in object storage.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_enable_compacted_topic_reupload", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_remote_allow_gaps": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Controls the eviction of locally stored log segments when Tiered Storage uploads are paused. Set to `false` to only evict data that has already been uploaded to object storage. If the retained data fills the local volume, Redpanda throttles producers. Set to `true` to allow the eviction of locally stored log segments, which may create gaps in offsets.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_enable_remote_allow_gaps", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_remote_read": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Default remote read config value for new topics.\nWhen set to `true`, new topics are by default configured to allow reading data directly from object storage, facilitating access to older data that might have been offloaded as part of Tiered Storage. With the default set to `false`, remote reads must be explicitly enabled at the topic level.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_enable_remote_read", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_remote_write": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Default remote write value for new topics.\nWhen set to `true`, new topics are by default configured to upload data to object storage. With the default set to `false`, remote write must be explicitly enabled at the topic level.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_enable_remote_write", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_scrubbing": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable routine checks (scrubbing) of object storage partitions. The scrubber validates the integrity of data and metadata uploaded to object storage.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_enable_scrubbing", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_segment_merging": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables adjacent segment merging. The segments are reuploaded if there is an opportunity for that and if it will improve the tiered-storage performance", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_enable_segment_merging", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:tiered-storage.adoc#object-storage-housekeeping[Object storage housekeeping]" + ], + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enable_segment_uploads": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_enable_segment_uploads", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:properties/topic-properties.adoc#redpandaremoteallowgaps[`redpanda.remote.allowgaps`]" + ], + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable object storage. Must be set to `true` to use Tiered Storage or Remote Read Replicas.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "true" + ], + "enterprise_value": [ + "true" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "cloud_storage_enabled", + "needs_restart": true, + "nullable": false, + "related_topics": [], + "type": "boolean", + "visibility": "user" + }, + "cloud_storage_full_scrub_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 43200000, + "default_human_readable": "12 hours", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, between a final scrub and the next scrub.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_full_scrub_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_garbage_collect_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for running the cloud storage garbage collection, in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_garbage_collect_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_graceful_transfer_timeout_ms": { + "aliases": [ + "cloud_storage_graceful_transfer_timeout" + ], + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "default_human_readable": "5 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_graceful_transfer_timeout_ms", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_housekeeping_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 300000, + "default_human_readable": "5 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, between object storage housekeeping tasks.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_housekeeping_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_hydrated_chunks_per_segment_ratio": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.7, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed.\n\nA segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_hydrated_chunks_per_segment_ratio", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "cloud_storage_hydration_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 600000, + "default_human_readable": "10 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Time to wait for a hydration request to be fulfilled. If hydration is not completed within this time, the consumer is notified with a timeout error.\n\nNegative doesn't make sense, but it may not be checked-for/enforced. Large is subjective, but a huge timeout also doesn't make sense. This particular config doesn't have a min/max bounds control, but it probably should to avoid mistakes.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_hydration_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_idle_threshold_rps": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10.0, + "defined_in": "src/v/config/configuration.cc", + "description": "The object storage request rate threshold for idle state detection. If the average request rate for the configured period is lower than this threshold, the object storage is considered idle.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_idle_threshold_rps", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "cloud_storage_idle_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The timeout, in milliseconds, used to detect the idle state of the object storage API. If the average object storage request rate is below this threshold for a configured amount of time, the object storage is considered idle and the housekeeping jobs are started.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_idle_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_initial_backoff_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Initial backoff time for exponential backoff algorithm (ms).", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_initial_backoff_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_inventory_based_scrub_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Scrubber uses the latest cloud storage inventory report, if available, to check if the required objects exist in the bucket or container.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_inventory_based_scrub_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_inventory_hash_path_directory": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", + "example": "[,yaml]\n----\nredpanda:\n cloud_storage_inventory_hash_store: \n----\n\nReplace `` with the full path to your desired inventory hash storage directory.", + "is_deprecated": false, + "is_topic_property": false, + "name": "cloud_storage_inventory_hash_path_directory", + "type": "string", + "visibility": "user" + }, + "cloud_storage_inventory_hash_store": { + "c_type": "ss::sstring", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.", + "example": "[,yaml]\n----\nredpanda:\n cloud_storage_inventory_hash_store: \n----", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_inventory_hash_store", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_inventory_id": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "redpanda_scrubber_inventory", + "defined_in": "src/v/config/configuration.cc", + "description": "The name of the scheduled inventory job created by Redpanda to generate bucket or container inventory reports.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_inventory_id", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_inventory_max_hash_size_during_parse": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 67108864, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum bytes of hashes held in memory before writing data to disk during inventory report parsing. This affects the number of files written to disk during inventory report parsing. When this limit is reached, new files are written to disk.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_storage_inventory_max_hash_size_during_parse", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_inventory_report_check_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 21600000, + "default_human_readable": "6 hours", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval between checks for a new inventory report in the cloud storage bucket or container.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_inventory_report_check_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_inventory_reports_prefix": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "redpanda_scrubber_inventory", + "defined_in": "src/v/config/configuration.cc", + "description": "The prefix to the path in the cloud storage bucket or container where inventory reports will be placed.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_inventory_reports_prefix", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_inventory_self_managed_report_config": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "If enabled, Redpanda will not attempt to create the scheduled report configuration using cloud storage APIs. The scrubbing process will look for reports in the expected paths in the bucket or container, and use the latest report found. Primarily intended for use in testing and on backends where scheduled inventory reports are not supported.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_inventory_self_managed_report_config", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_storage_manifest_cache_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1048576, + "defined_in": "src/v/config/configuration.cc", + "description": "Amount of memory that can be used to handle Tiered Storage metadata.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_manifest_cache_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_manifest_cache_ttl_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The time interval that determines how long the materialized manifest can stay in cache under contention. This parameter is used for performance tuning. When the spillover manifest is materialized and stored in cache and the cache needs to evict it it will use 'cloud_storage_materialized_manifest_ttl_ms' value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_manifest_cache_ttl_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_manifest_max_upload_interval_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum interval, in seconds, between partition manifest uploads. Actual time between uploads may be greater than this interval. If this is `null`, metadata is updated after each segment upload.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "cloud_storage_manifest_max_upload_interval_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_manifest_upload_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Manifest upload timeout, in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_manifest_upload_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_materialized_manifest_ttl_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.", + "is_deprecated": false, + "is_topic_property": false, + "name": "cloud_storage_materialized_manifest_ttl_ms", + "type": "string", + "visibility": "user" + }, + "cloud_storage_max_concurrent_hydrations_per_shard": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_max_concurrent_hydrations_per_shard", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_max_connection_idle_time_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "default_human_readable": "5 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Defines the maximum duration an HTTPS connection to object storage can stay idle, in milliseconds, before being terminated.\nThis setting reduces resource utilization by closing inactive connections. Adjust this property to balance keeping connections ready for subsequent requests and freeing resources associated with idle connections.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_max_connection_idle_time_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_max_connections": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 20, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum simultaneous object storage connections per shard, applicable to upload and download activities.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_max_connections", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "cloud_storage_max_materialized_segments_per_shard": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum concurrent readers of remote data per CPU core. If unset, value of `topic_partitions_per_shard` multiplied by 2 is used.", + "is_deprecated": true, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_max_materialized_segments_per_shard", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "deprecated" + }, + "cloud_storage_max_partition_readers_per_shard": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum partition readers per shard (deprecated)", + "is_deprecated": true, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_max_partition_readers_per_shard", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "deprecated" + }, + "cloud_storage_max_segment_readers_per_shard": { + "aliases": [ + "cloud_storage_max_readers_per_shard" + ], + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum concurrent I/O cursors of materialized remote segments per CPU core. If unset, the value of `topic_partitions_per_shard` is used, where one segment reader per partition is used if the shard is at its maximum partition capacity. These readers are cached across Kafka consume requests and store a readahead buffer.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_max_segment_readers_per_shard", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_max_segments_pending_deletion_per_partition": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "defined_in": "src/v/config/configuration.cc", + "description": "The per-partition limit for the number of segments pending deletion from the cloud. Segments can be deleted due to retention or compaction. If this limit is breached and deletion fails, then segments are orphaned in the cloud and must be removed manually.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_max_segments_pending_deletion_per_partition", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_max_throughput_per_shard": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1073741824, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum bandwidth allocated to Tiered Storage operations per shard, in bytes per second.\nThis setting limits the Tiered Storage subsystem's throughput per shard, facilitating precise control over bandwidth usage in testing scenarios. In production environments, use `cloud_storage_throughput_limit_percent` for more dynamic throughput management based on actual storage capabilities.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_max_throughput_per_shard", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_metadata_sync_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_metadata_sync_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_min_chunks_per_segment_threshold": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5, + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum number of chunks per segment for trimming to be enabled. If the number of chunks in a segment is below this threshold, the segment is small enough that all chunks in it can be hydrated at any given time.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_storage_min_chunks_per_segment_threshold", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_partial_scrub_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3600000, + "default_human_readable": "1 hour", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval between two partial scrubs of the same partition.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_partial_scrub_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_readreplica_manifest_sync_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout to check if new data is available for partitions in object storage for read replicas.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_readreplica_manifest_sync_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_recovery_temporary_retention_bytes_default": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1073741824, + "defined_in": "src/v/config/configuration.cc", + "description": "Retention in bytes for topics created during automated recovery.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_recovery_temporary_retention_bytes_default", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_recovery_topic_validation_depth": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of metadata segments to validate, from newest to oldest, when <> is set to `check_manifest_and_segment_metadata`.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "cloud_storage_recovery_topic_validation_depth", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_recovery_topic_validation_mode": { + "c_type": "model::recovery_validation_mode", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "check_manifest_existence", + "defined_in": "src/v/config/configuration.cc", + "description": "Validation performed before recovering a topic from object storage. In case of failure, the reason for the failure appears as `ERROR` lines in the Redpanda application log. For each topic, this reports errors for all partitions, but for each partition, only the first error is reported.\n\nThis property accepts the following parameters:\n\n- `no_check`: Skips the checks for topic recovery.\n- `check_manifest_existence`: Runs an existence check on each `partition_manifest`. Fails if there are connection issues to the object storage.\n- `check_manifest_and_segment_metadata`: Downloads the manifest and runs a consistency check, comparing the metadata with the cloud storage objects. The process fails if metadata references any missing cloud storage objects.\n\nExample: Redpanda validates the topic `kafka/panda-topic-recovery-NOT-OK` and stops due to a fatal error on partition 0:\n\n```bash\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - [fiber11|0|299996ms recovery validation of {kafka/panda-topic-recovery-NOT-OK/0}/24] - manifest metadata check: missing segment, validation not ok\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - topics_frontend.cc:519 - Stopping recovery of {kafka/panda-topic-recovery-NOT-OK} due to validation error\n```\n\nEach failing partition error message has the following format:\n\n```bash\nERROR .... [... recovery validation of {}...] - , validation not ok\n```\n\nAt the end of the process, Redpanda outputs a final ERROR message: \n\n```bash\nERROR ... ... - Stopping recovery of {} due to validation error\n```", + "enum": [ + "check_manifest_existence", + "check_manifest_and_segment_metadata", + "no_check" + ], + "example": "`check_manifest_existence`", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_recovery_topic_validation_mode", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "cloud_storage_region": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cloud provider region that houses the bucket or container used for storage.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_region", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_roles_operation_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for IAM role related operations (ms).", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_roles_operation_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_scrubbing_interval_jitter_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 600000, + "default_human_readable": "10 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Jitter applied to the object storage scrubbing interval.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_scrubbing_interval_jitter_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_secret_key": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cloud provider secret key.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "is_secret": true, + "name": "cloud_storage_secret_key", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_segment_max_upload_interval_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3600, + "default_human_readable": "1 hour", + "defined_in": "src/v/config/configuration.cc", + "description": "Time that a segment can be kept locally without uploading it to the object storage, in seconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "cloud_storage_segment_max_upload_interval_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_segment_size_min": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Smallest acceptable segment size in the object storage. Default: `cloud_storage_segment_size_target`/2.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_segment_size_min", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_segment_size_target": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Desired segment size in the object storage. The default is set in the topic-level `segment.bytes` property.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_segment_size_target", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_segment_upload_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 90000, + "default_human_readable": "90 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Log segment upload timeout, in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_segment_upload_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_spillover_manifest_max_segments": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of segments in the spillover manifest that can be offloaded to the object storage. This setting serves as a threshold for triggering data offload based on the number of segments, rather than the total size of the manifest. It is designed for use in testing environments to control the offload behavior more granularly. In production settings, manage offloads based on the manifest size through `cloud_storage_spillover_manifest_size` for more predictable outcomes.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_spillover_manifest_max_segments", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_spillover_manifest_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 65536, + "defined_in": "src/v/config/configuration.cc", + "description": "The size of the manifest which can be offloaded to the cloud. If the size of the local manifest stored in Redpanda exceeds `cloud_storage_spillover_manifest_size` by two times the spillover mechanism will split the manifest into two parts and one will be uploaded to object storage.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_spillover_manifest_size", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_throughput_limit_percent": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 50, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum throughput used by Tiered Storage per broker expressed as a percentage of the disk bandwidth. If the server has several disks, Redpanda uses the one that stores the Tiered Storage cache. Even if Tiered Storage is allowed to use the full bandwidth of the disk (100%), it won't necessarily use it in full. The actual usage depends on your workload and the state of the Tiered Storage cache. This setting is a safeguard that prevents Tiered Storage from using too many system resources: it is not a performance tuning knob.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_throughput_limit_percent", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_topic_purge_grace_period_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Grace period during which the purger refuses to purge the topic.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_topic_purge_grace_period_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_trust_file": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to certificate that should be used to validate server certificate during TLS handshake.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_trust_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_storage_upload_ctrl_d_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.0, + "defined_in": "src/v/config/configuration.cc", + "description": "Derivative coefficient for upload PID controller.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_upload_ctrl_d_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "cloud_storage_upload_ctrl_max_shares": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of I/O and CPU shares that archival upload can use.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_upload_ctrl_max_shares", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_upload_ctrl_min_shares": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum number of I/O and CPU shares that archival upload can use.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "cloud_storage_upload_ctrl_min_shares", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_upload_ctrl_p_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": -2.0, + "defined_in": "src/v/config/configuration.cc", + "description": "Proportional coefficient for upload PID controller.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_upload_ctrl_p_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "cloud_storage_upload_ctrl_update_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60000, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval (in milliseconds) for updating the controller that manages the priority of Tiered Storage uploads. This property determines how frequently the system recalculates and adjusts the work scheduling for uploads to object storage.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_upload_ctrl_update_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_upload_loop_initial_backoff_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Initial backoff interval when there is nothing to upload for a partition, in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_upload_loop_initial_backoff_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_upload_loop_max_backoff_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum backoff interval when there is nothing to upload for a partition, in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_storage_upload_loop_max_backoff_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_storage_url_style": { + "c_type": "s3_url_style", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage.\n\nLeave this property unset (`null`) to use automatic configuration:\n\n* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed\n* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration\n\nSet this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format.\n\nCAUTION: AWS requires virtual-hosted addressing for buckets created after September 30, 2020. If you use AWS S3 with buckets created after this date, use `virtual_host` addressing.\n\nNOTE: For MinIO deployments, Redpanda defaults to `path` style when this property is unset. To use `virtual_host` addressing with a configured `MINIO_DOMAIN`, set this property explicitly to `virtual_host`. For other S3-compatible storage backends, consult your provider's documentation to determine the required URL style.", + "enum": [ + "virtual_host", + "path" + ], + "example": "`virtual_host`", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_storage_url_style", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "cloud_topics_compaction_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "How often to trigger background compaction for cloud topics.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_compaction_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_compaction_key_map_memory": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 134217728, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes that may be used on each shard by cloud topics compaction key-offset maps.", + "example": "`134217728`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "cloud_topics_compaction_key_map_memory", + "needs_restart": true, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_compaction_max_object_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 134217728, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size in bytes for L1 objects produced by cloud topics compaction.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_compaction_max_object_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_disable_level_zero_gc_for_tests": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disables the level-zero garbage collector in cloud topics. This property exists to simplify testing and shouldn't be set in production.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_disable_level_zero_gc_for_tests", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_disable_metastore_flush_loop_for_tests": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disables the metastore flush loop in cloud topics. The property exists to simplify testing of read replicas and shouldn't be set in production.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_disable_metastore_flush_loop_for_tests", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_disable_reconciliation_loop": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disables the cloud topics reconciliation loop. Disabling the loop can negatively impact performance and stability of the cluster.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_disable_reconciliation_loop", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "cloud_topics_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable Cloud Topics for the cluster. Cloud Topics are optimized for high-throughput, cost-sensitive workloads that can tolerate higher latencies compared to standard Kafka topics.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "true" + ], + "enterprise_value": [ + "true" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "cloud_topics_enabled", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics]" + ], + "type": "boolean", + "visibility": "user" + }, + "cloud_topics_epoch_service_epoch_increment_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 600000, + "default_human_readable": "10 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval, in milliseconds, at which the cluster epoch is incremented.\n\nThe cluster epoch is a frozen point in time of the committed offset of the controller log, used to coordinate partition creation and track changes in Tiered Storage. This property controls how frequently the epoch is refreshed. More frequent updates provide finer-grained coordination but may increase overhead.\n\nDecrease this interval if you need more frequent epoch updates for faster coordination in Tiered Storage operations, or increase it to reduce coordination overhead in stable clusters.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_epoch_service_epoch_increment_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v25.3.3", + "visibility": "tunable" + }, + "cloud_topics_epoch_service_local_epoch_cache_duration": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60000, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "The duration, in milliseconds, for which a cluster-wide epoch is cached locally on each broker.\n\nCaching the epoch locally reduces the need for frequent coordination with the controller. This property controls how long each broker can use a cached epoch value before fetching the latest value.\n\nIncrease this value to reduce coordination overhead in clusters with stable workloads. Decrease it if you need brokers to react more quickly to epoch changes in Tiered Storage.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_epoch_service_local_epoch_cache_duration", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v25.3.3", + "visibility": "tunable" + }, + "cloud_topics_fetch_debounce_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables fetch debouncing in cloud topics. This mechanism guarantees that the broker fetches every object only once improving the performance and lowering the cost.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_fetch_debounce_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "version": "v26.1.1-rc2", + "visibility": "user" + }, + "cloud_topics_l1_indexing_interval": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 4194304, + "defined_in": "src/v/config/configuration.cc", + "description": "The byte interval at which index entries are created within long term storage objects for cloud topics. Index entries are stored in the object metadata and enable efficient seeking by offset or timestamp within a partition. Lower values produce more index entries (better seek granularity) at the cost of a larger footer.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_l1_indexing_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_long_term_flush_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 600000, + "default_human_readable": "10 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval at which long term storage metadata is flushed to object storage.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_long_term_flush_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_long_term_garbage_collection_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 300000, + "default_human_readable": "5 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval after which data is garbage collected from long term storage.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_long_term_garbage_collection_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "cloud_topics_parallel_fetch_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable parallel fetching in cloud topics. This mechanism improves the throughput by allowing the broker to download data needed by the fetch request using multiple shards.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_parallel_fetch_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "version": "v26.1.1-rc2", + "visibility": "user" + }, + "cloud_topics_preregistered_object_ttl": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3600000, + "default_human_readable": "1 hour", + "defined_in": "src/v/config/configuration.cc", + "description": "Time-to-live for pre-registered L1 objects before they are expired.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_preregistered_object_ttl", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_produce_batching_size_threshold": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 4194304, + "defined_in": "src/v/config/configuration.cc", + "description": "The size limit for the object size in cloud topics. When the amount of data on a shard reaches this limit, an upload is triggered.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_produce_batching_size_threshold", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "cloud_topics_produce_cardinality_threshold": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold for the object cardinality in cloud topics. When the number of partitions in waiting for the upload reach this limit, an upload is triggered.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_produce_cardinality_threshold", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "cloud_topics_produce_upload_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 250, + "default_human_readable": "250 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval after which data is uploaded to object storage for Cloud Topics. When this time threshold is reached, Redpanda triggers an upload of buffered data to the object storage backend (S3, GCS, or MinIO), regardless of whether the size or cardinality thresholds have been met.\n\nThis property works together with <> and <> to control when uploads occur. An upload is triggered when any of these three thresholds is reached.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_produce_upload_interval", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics]", + "xref:reference:properties/cluster-properties.adoc#cloud_topics_produce_batching_size_threshold[`cloud_topics_produce_batching_size_threshold`]", + "xref:reference:properties/cluster-properties.adoc#cloud_topics_produce_cardinality_threshold[`cloud_topics_produce_cardinality_threshold`]" + ], + "type": "integer", + "visibility": "user" + }, + "cloud_topics_reconciliation_interval": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "Time interval at which Redpanda reconciles data between short-term local storage and long-term object storage for Cloud Topics. During this reconciliation process, Redpanda optimizes the storage layout of data in short-term storage to improve the cost and performance associated with accessing data. After the reconciliation process has moved data into long-term storage, the data in short-term storage is subject to removal by a garbage collection process.", + "is_deprecated": false, + "is_topic_property": false, + "name": "cloud_topics_reconciliation_interval", + "related_topics": [ + "xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics]", + "xref:reference:properties/cluster-properties.adoc#cloud_topics_long_term_garbage_collection_interval[`cloud_topics_long_term_garbage_collection_interval`]" + ], + "type": "string", + "visibility": "user" + }, + "cloud_topics_reconciliation_max_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum reconciliation interval for adaptive scheduling.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_reconciliation_max_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_reconciliation_max_object_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 83886080, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size in bytes for L1 objects produced by the reconciler. With the default target fill ratio of 0.8, this gives an effective target object size of 64 MiB.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_reconciliation_max_object_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_reconciliation_min_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 250, + "default_human_readable": "250 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum reconciliation interval for adaptive scheduling. The reconciler will not run more frequently than this.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_reconciliation_min_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_reconciliation_parallelism": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 8, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number, per shard, of concurrent objects built by reconciliation", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_reconciliation_parallelism", + "needs_restart": true, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_reconciliation_slowdown_blend": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.4, + "defined_in": "src/v/config/configuration.cc", + "description": "Blend factor for slowing down reconciliation (0.0 to 1.0). Higher values mean reconciliation lowers its frequency faster when trying to find a frequency that produces well-sized objects. Generally this should be lower than the speedup blend, because reconciliation has less opportunities to adapt its frequency when it runs less frequently.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_reconciliation_slowdown_blend", + "needs_restart": false, + "nullable": false, + "type": "number", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_reconciliation_speedup_blend": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.9, + "defined_in": "src/v/config/configuration.cc", + "description": "Blend factor for speeding up reconciliation (0.0 to 1.0). Higher values mean reconciliation increases its frequency faster when trying to find a frequency that produces well-sized objects.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_reconciliation_speedup_blend", + "needs_restart": false, + "nullable": false, + "type": "number", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_reconciliation_target_fill_ratio": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.8, + "defined_in": "src/v/config/configuration.cc", + "description": "Target fill ratio for L1 objects. The reconciler adapts its interval to produce objects at approximately this fill level (0.0 to 1.0).", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_reconciliation_target_fill_ratio", + "needs_restart": false, + "nullable": false, + "type": "number", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cloud_topics_short_term_gc_backoff_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60000, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval, in milliseconds, between invocations of the L0 garbage collection work loop when no progress is being made or errors are occurring.\n\nL0 (level-zero) objects are short-term data objects in Tiered Storage that are periodically garbage collected. When GC encounters errors or cannot make progress (for example, if there are no objects eligible for deletion), this backoff interval prevents excessive retries.\n\nIncrease this value to reduce system load when GC cannot make progress. Decrease it if you need faster retry attempts after transient errors.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_short_term_gc_backoff_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v25.3.3", + "visibility": "tunable" + }, + "cloud_topics_short_term_gc_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval, in milliseconds, between invocations of the L0 (level-zero) garbage collection work loop when progress is being made.\n\nL0 objects are short-term data objects in Tiered Storage associated with global epochs. This property controls how frequently GC runs when it successfully deletes objects. Lower values increase GC frequency, which can help maintain lower object counts but may increase S3 API usage.\n\nDecrease this value if L0 object counts are growing too quickly and you need more aggressive garbage collection. Increase it to reduce S3 API costs in clusters with lower ingestion rates.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_short_term_gc_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v25.3.3", + "visibility": "tunable" + }, + "cloud_topics_short_term_gc_minimum_object_age": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 43200000, + "default_human_readable": "12 hours", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum age, in milliseconds, of an L0 (level-zero) object before it becomes eligible for garbage collection.\n\nThis grace period delays deletion of L0 objects even after they become eligible based on epoch. The delay provides a safety buffer that can support recovery in cases involving accidental deletion or other operational issues.\n\nIncrease this value to extend the retention window for L0 objects, providing more time for recovery from operational errors. Decrease it to free up object storage space more quickly, but with less protection against accidental deletion.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cloud_topics_short_term_gc_minimum_object_age", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v25.3.3", + "visibility": "tunable" + }, + "cloud_topics_upload_part_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 16777216, + "defined_in": "src/v/config/configuration.cc", + "description": "The part size in bytes used for multipart uploads. The minimum of 5 MiB is the smallest non-terminal part size allowed by cloud object storage providers.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cloud_topics_upload_part_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "cluster_id": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Cluster identifier.", + "gets_restored": false, + "is_deprecated": false, + "is_enterprise": false, + "name": "cluster_id", + "needs_restart": false, + "nullable": true, + "type": "string" + }, + "compacted_log_segment_size": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 268435456, + "defined_in": "src/v/config/configuration.cc", + "description": "Size (in bytes) for each compacted log segment.", + "example": "`268435456`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "compacted_log_segment_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "compaction.strategy": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Specifies the strategy used to determine which records to remove during log compaction. The compaction strategy controls how Redpanda identifies and removes duplicate records while preserving the latest value for each key.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "compaction.strategy", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#compaction_strategy[`compaction_strategy`]" + ], + "type": "string" + }, + "compaction_ctrl_backlog_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Target backlog size for compaction controller. If not set the max backlog size is configured to 80% of total disk space available.", + "is_deprecated": false, + "is_enterprise": false, + "name": "compaction_ctrl_backlog_size", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "compaction_ctrl_d_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.2, + "defined_in": "src/v/config/configuration.cc", + "description": "Derivative coefficient for compaction PID controller.", + "is_deprecated": false, + "is_enterprise": false, + "name": "compaction_ctrl_d_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "compaction_ctrl_i_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.0, + "defined_in": "src/v/config/configuration.cc", + "description": "Integral coefficient for compaction PID controller.", + "is_deprecated": false, + "is_enterprise": false, + "name": "compaction_ctrl_i_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "compaction_ctrl_max_shares": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of I/O and CPU shares that compaction process can use.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "compaction_ctrl_max_shares", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "compaction_ctrl_min_shares": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10, + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum number of I/O and CPU shares that compaction process can use.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "compaction_ctrl_min_shares", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "compaction_ctrl_p_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": -12.5, + "defined_in": "src/v/config/configuration.cc", + "description": "Proportional coefficient for compaction PID controller. This must be negative, because the compaction backlog should decrease when the number of compaction shares increases.", + "is_deprecated": false, + "is_enterprise": false, + "name": "compaction_ctrl_p_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "compaction_ctrl_update_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval (in milliseconds) for updating the controller responsible for compaction tasks. The controller uses this interval to decide how to prioritize background compaction work, which is essential for maintaining efficient storage use.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "compaction_ctrl_update_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "compression.type": { + "acceptable_values": "[`producer`, `none`, `gzip`, `snappy`, `lz4`, `zstd`]", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "log_compression_type", + "default": "producer", + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "Redpanda ignores this property and always uses producer compression semantics. If producers send compressed data, Redpanda stores and serves it as-is. If producers send uncompressed data, Redpanda stores it uncompressed.\n\nThis property exists for Apache Kafka compatibility. Configure compression in your producers instead of using this topic property.\n\nCompression reduces message size and improves throughput, but increases CPU utilization. Enable producer batching to increase compression efficiency.\n\nWhen set, this property overrides the cluster property xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] for the topic.", + "enum": [ + "none", + "gzip", + "snappy", + "lz4", + "zstd", + "count", + "producer" + ], + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "compression.type", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#log_compression_type[`log_compression_type`]", + "xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching]", + "xref:develop:produce-data/configure-producers.adoc#commonly-used-producer-configuration-options[Common producer configuration options]" + ], + "type": "string" + }, + "confluent.key.schema.validation": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Enable validation of the schema ID for keys on a record. This is a compatibility alias for `redpanda.key.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's key is registered in the Schema Registry according to the configured subject name strategy.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "confluent.key.schema.validation", + "needs_restart": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "string" + }, + "confluent.key.subject.name.strategy": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The subject name strategy for keys when `confluent.key.schema.validation` is enabled. This is a compatibility alias for `redpanda.key.subject.name.strategy` that determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "confluent.key.subject.name.strategy", + "needs_restart": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "string" + }, + "confluent.value.schema.validation": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for <>. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "confluent.value.schema.validation", + "needs_restart": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "string" + }, + "confluent.value.subject.name.strategy": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for <>. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "confluent.value.subject.name.strategy", + "needs_restart": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "string" + }, + "consumer_group_lag_collection_interval": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "How often Redpanda runs the collection loop when `enable_consumer_group_metrics` is set to `consumer_lag`. Updates will not be more frequent than `health_monitor_max_metadata_age`.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "consumer_group_lag_collection_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "consumer_group_lag_collection_interval_sec": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "How often to run the collection loop when <> contains `consumer_lag`.\n\nReducing the value of `consumer_group_lag_collection_interval_sec` increases the metric collection frequency, which may raise resource utilization. In most environments, this impact is minimal, but it's best practice to monitor broker resource usage in high-scale settings.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_group_lag_collection_interval_sec", + "type": "string", + "visibility": "user" + }, + "consumer_heartbeat_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 500, + "default_human_readable": "500 milliseconds", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Interval (in milliseconds) for consumer heartbeats.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_heartbeat_interval", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_heartbeat_interval_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Interval (in milliseconds) for consumer heartbeats.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_heartbeat_interval_ms", + "type": "string", + "visibility": "user" + }, + "consumer_instance_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "5 minutes", + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_instance_timeout", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_instance_timeout_ms": { + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_instance_timeout_ms", + "type": "string", + "visibility": "user" + }, + "consumer_offsets_topic_batch_cache_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart.", + "is_deprecated": false, + "is_enterprise": false, + "name": "consumer_offsets_topic_batch_cache_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "consumer_rebalance_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 2000, + "default_human_readable": "2 seconds", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Timeout (in milliseconds) for consumer rebalance.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_rebalance_timeout", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_rebalance_timeout_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Timeout (in milliseconds) for consumer rebalance.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_rebalance_timeout_ms", + "type": "string", + "visibility": "user" + }, + "consumer_request_max_bytes": { + "c_type": "int32_t", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 1048576, + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Maximum bytes to fetch per request.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "consumer_request_max_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_request_min_bytes": { + "c_type": "int32_t", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 1, + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Minimum bytes to fetch per request.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "consumer_request_min_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_request_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Interval (in milliseconds) for consumer request timeout.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_request_timeout", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_request_timeout_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Interval (in milliseconds) for consumer request timeout.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_request_timeout_ms", + "type": "string", + "visibility": "user" + }, + "consumer_session_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Timeout (in milliseconds) for consumer session.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "consumer_session_timeout", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "consumer_session_timeout_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Timeout (in milliseconds) for consumer session.", + "is_deprecated": false, + "is_topic_property": false, + "name": "consumer_session_timeout_ms", + "type": "string", + "visibility": "user" + }, + "controller_backend_housekeeping_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "default_human_readable": "1 second", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval between iterations of controller backend housekeeping loop.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "controller_backend_housekeeping_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "controller_backend_reconciliation_concurrency": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1024, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of cluster updates the controller can process at the same time. Higher values speed up cluster changes but use more resources.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "controller_backend_reconciliation_concurrency", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_acls_and_users_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller ACLs and users operations limit.", + "is_deprecated": false, + "is_enterprise": false, + "name": "controller_log_accummulation_rps_capacity_acls_and_users_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_configuration_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller configuration operations limit.", + "is_deprecated": false, + "is_enterprise": false, + "name": "controller_log_accummulation_rps_capacity_configuration_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_move_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller move operations limit.", + "is_deprecated": false, + "is_enterprise": false, + "name": "controller_log_accummulation_rps_capacity_move_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_node_management_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller node management operations limit.", + "is_deprecated": false, + "is_enterprise": false, + "name": "controller_log_accummulation_rps_capacity_node_management_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_log_accummulation_rps_capacity_topic_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum capacity of rate limit accumulation in controller topic operations limit.", + "is_deprecated": false, + "is_enterprise": false, + "name": "controller_log_accummulation_rps_capacity_topic_operations", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "controller_snapshot_max_age_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum amount of time before Redpanda attempts to create a controller snapshot after a new controller command appears.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "controller_snapshot_max_age_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "coproc_supervisor_server": { + "c_type": "deprecated_property", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "defined_in": "src/v/config/node_config.cc", + "description": null, + "is_deprecated": true, + "is_enterprise": false, + "name": "coproc_supervisor_server", + "needs_restart": true, + "nullable": false, + "type": "object" + }, + "core_balancing_continuous": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "If set to `true`, move partitions between cores in runtime to maintain balanced partition distribution.", + "enterprise_constructor": "restricted_with_sanctioned", + "enterprise_restricted_value": [ + "true" + ], + "enterprise_sanctioned_value": [ + "false" + ], + "enterprise_value": [ + "true" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "core_balancing_continuous", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "boolean", + "visibility": "user" + }, + "core_balancing_debounce_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval, in milliseconds, between trigger and invocation of core balancing.\n\n*Unit*: milliseconds", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "core_balancing_debounce_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "core_balancing_on_core_count_change": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "If set to `true`, and if after a restart the number of cores changes, Redpanda will move partitions between cores to maintain balanced partition distribution.", + "is_deprecated": false, + "is_enterprise": false, + "name": "core_balancing_on_core_count_change", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "cpu_profiler_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables CPU profiling for Redpanda.", + "is_deprecated": false, + "is_enterprise": false, + "name": "cpu_profiler_enabled", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "cpu_profiler_sample_period_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The sample period for the CPU profiler.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "cpu_profiler_sample_period_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "crash_loop_limit": { + "c_type": "uint32_t", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 5, + "defined_in": "src/v/config/node_config.cc", + "description": "A limit on the number of consecutive times a broker can crash within one hour before its crash-tracking logic is reset. This limit prevents a broker from getting stuck in an infinite cycle of crashes.\n\nIf `null`, the property is disabled and no limit is applied.\n\nThe crash-tracking logic is reset (to zero consecutive crashes) by any of the following conditions:\n\n* The broker shuts down cleanly.\n* One hour passes since the last crash.\n* The `redpanda.yaml` broker configuration file is updated.\n* The `startup_log` file in the broker's <> broker property is manually deleted.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "crash_loop_limit", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "crash_loop_sleep_sec": { + "c_type": "std::chrono::seconds", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "The amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments.\n\nIf `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit.\n\nFor information about how to reset the crash loop limit, see the <> broker property.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "crash_loop_sleep_sec", + "needs_restart": true, + "nullable": true, + "type": "integer", + "version": "v24.3.4", + "visibility": "user" + }, + "dashboard_dir": { + "c_type": "deprecated_property", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "defined_in": "src/v/config/node_config.cc", + "description": null, + "is_deprecated": true, + "is_enterprise": false, + "name": "dashboard_dir", + "needs_restart": true, + "nullable": false, + "type": "object" + }, + "data_directory": { + "c_type": "data_directory_path", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the directory for storing Redpanda's streaming data files.", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_directory", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "data_transforms_binary_max_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": 10485760, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum size for a deployable WebAssembly binary that the broker can store.", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_transforms_binary_max_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_commit_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3000, + "default_human_readable": "3 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The commit interval at which data transforms progress.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "data_transforms_commit_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables WebAssembly-powered data transforms directly in the broker. When `data_transforms_enabled` is set to `true`, Redpanda reserves memory for data transforms, even if no transform functions are currently deployed. This memory reservation ensures that adequate resources are available for transform functions when they are needed, but it also means that some memory is allocated regardless of usage.", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_transforms_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "data_transforms_logging_buffer_capacity_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 512000, + "defined_in": "src/v/config/configuration.cc", + "description": "Buffer capacity for transform logs, per shard. Buffer occupancy is calculated as the total size of buffered log messages; that is, logs emitted but not yet produced.", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_transforms_logging_buffer_capacity_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_logging_flush_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 500, + "default_human_readable": "500 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Flush interval for transform logs. When a timer expires, pending logs are collected and published to the `transform_logs` topic.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "data_transforms_logging_flush_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_logging_line_max_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": 1024, + "defined_in": "src/v/config/configuration.cc", + "description": "Transform log lines truncate to this length. Truncation occurs after any character escaping.", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_transforms_logging_line_max_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_per_core_memory_reservation": { + "aliases": [ + "wasm_per_core_memory_reservation" + ], + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": 20971520, + "defined_in": "src/v/config/configuration.cc", + "description": "The amount of memory to reserve per core for data transform (Wasm) virtual machines. Memory is reserved on boot. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "example": "`26214400`", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_transforms_per_core_memory_reservation", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "data_transforms_per_function_memory_limit": { + "aliases": [ + "wasm_per_function_memory_limit" + ], + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": 2097152, + "defined_in": "src/v/config/configuration.cc", + "description": "The amount of memory to give an instance of a data transform (Wasm) virtual machine. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.", + "example": "`5242880`", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_transforms_per_function_memory_limit", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "data_transforms_read_buffer_memory_percentage": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 45, + "defined_in": "src/v/config/configuration.cc", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for read buffers.", + "example": "`25`", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_transforms_read_buffer_memory_percentage", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_runtime_limit_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3000, + "default_human_readable": "3 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum amount of runtime to start up a data transform, and the time it takes for a single record to be transformed.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "data_transforms_runtime_limit_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "data_transforms_write_buffer_memory_percentage": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 45, + "defined_in": "src/v/config/configuration.cc", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for write buffers.", + "example": "`25`", + "is_deprecated": false, + "is_enterprise": false, + "name": "data_transforms_write_buffer_memory_percentage", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_coordinator_snapshot_max_delay_secs": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "15 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum amount of time the coordinator waits to snapshot after a command appears in the log.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "datalake_coordinator_snapshot_max_delay_secs", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_disk_space_monitor_enable": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Option to explicitly disable enforcement of datalake disk space usage.", + "is_deprecated": false, + "is_enterprise": false, + "name": "datalake_disk_space_monitor_enable", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "datalake_disk_usage_overage_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 2.0, + "defined_in": "src/v/config/configuration.cc", + "description": "The datalake disk usage monitor reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations.", + "example": "`1.8`", + "is_deprecated": false, + "is_enterprise": false, + "name": "datalake_disk_usage_overage_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "datalake_scheduler_block_size_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 4194304, + "defined_in": "src/v/config/configuration.cc", + "description": "Size, in bytes, of each memory block reserved for record translation, as tracked by the datalake scheduler.", + "is_deprecated": false, + "is_enterprise": false, + "name": "datalake_scheduler_block_size_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scheduler_disk_reservation_block_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 52428800, + "defined_in": "src/v/config/configuration.cc", + "description": "The size, in bytes, of the block of disk reservation that the datalake manager will assign to each datalake scheduler when it runs out of local reservation.", + "example": "`10000000`", + "is_deprecated": false, + "is_enterprise": false, + "name": "datalake_scheduler_disk_reservation_block_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scheduler_max_concurrent_translations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 4, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of translations that the datalake scheduler will allow to run at a given time. If a translation is requested, but the number of running translations exceeds this value, the request will be put to sleep temporarily, polling until capacity becomes available.", + "is_deprecated": false, + "is_enterprise": false, + "name": "datalake_scheduler_max_concurrent_translations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scheduler_time_slice_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Time, in milliseconds, for a datalake translation as scheduled by the datalake scheduler. After a translation is scheduled, it will run until either the time specified has elapsed or all pending records on its source partition have been translated.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "datalake_scheduler_time_slice_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scratch_space_size_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5368709120, + "defined_in": "src/v/config/configuration.cc", + "description": "Size, in bytes, of the amount of scratch space datalake should use.", + "is_deprecated": false, + "is_enterprise": false, + "name": "datalake_scratch_space_size_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "datalake_scratch_space_soft_limit_size_percent": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 80.0, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of the scratch space datalake soft limit expressed as a percentage of the `datalake_scratch_space_size_bytes` configuration value.", + "example": "`80.0`", + "is_deprecated": false, + "is_enterprise": false, + "name": "datalake_scratch_space_soft_limit_size_percent", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "user" + }, + "datalake_translator_flush_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 33554432, + "defined_in": "src/v/config/configuration.cc", + "description": "Size, in bytes, of the amount of per translator data that may be flushed to disk before the translator will upload and remove its current on disk data.", + "is_deprecated": false, + "is_enterprise": false, + "name": "datalake_translator_flush_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "debug_bundle_auto_removal_seconds": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "If set, how long debug bundles are kept in the debug bundle storage directory after they are created. If not set, debug bundles are kept indefinitely.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "debug_bundle_auto_removal_seconds", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "debug_bundle_storage_dir": { + "c_type": "std::filesystem::path", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to the debug bundle storage directory. Note: Changing this path does not clean up existing debug bundles. If not set, the debug bundle is stored in the Redpanda data directory specified in the redpanda.yaml broker configuration file.", + "is_deprecated": false, + "is_enterprise": false, + "name": "debug_bundle_storage_dir", + "needs_restart": false, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "debug_load_slice_warning_depth": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The recursion depth after which debug logging is enabled automatically for the log reader.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "debug_load_slice_warning_depth", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "default_leaders_preference": { + "c_type": "config::leaders_preference", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "none", + "defined_in": "src/v/config/configuration.cc", + "description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:,,...\" (prefer brokers with rack ID from the list).\n\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\n\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.", + "enterprise_constructor": "simple", + "is_deprecated": false, + "is_enterprise": true, + "name": "default_leaders_preference", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:develop:produce-data/leader-pinning.adoc[Leader pinning]" + ], + "type": "object", + "visibility": "user" + }, + "default_num_windows": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10, + "defined_in": "src/v/config/configuration.cc", + "description": "Default number of quota tracking windows.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "default_num_windows", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "default_redpanda_storage_mode": { + "c_type": "model::redpanda_storage_mode", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "unset", + "defined_in": "src/v/config/configuration.cc", + "description": "Default storage mode for newly-created topics. Determines how topic data is stored: `local` for broker-local storage only, `tiered` for both local and object storage, `cloud` for object-only storage using the Cloud Topics architecture, or `unset` to use legacy remote.read/write configs for backwards compatibility.", + "enum": [ + "local", + "tiered", + "cloud", + "unset" + ], + "example": "`tiered`", + "is_deprecated": false, + "is_enterprise": false, + "name": "default_redpanda_storage_mode", + "needs_restart": false, + "nullable": false, + "type": "string", + "version": "v26.1.1-rc2", + "visibility": "user" + }, + "default_topic_partitions": { + "c_type": "int32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1, + "defined_in": "src/v/config/configuration.cc", + "description": "Default number of partitions per topic.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "default_topic_partitions", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "default_topic_replication": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1, + "defined_in": "src/v/config/configuration.cc", + "description": "Default replication factor for new topics.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "default_topic_replication", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "default_window_sec": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1000 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Default quota tracking window size in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "default_window_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "delete.retention.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "tombstone_retention_ms", + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded.\n\nIf you have enabled Tiered Storage and set <> or <> for the topic, you cannot enable tombstone removal.\n\nIf both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic.\n\nThis property supports three states:\n\n* Positive value: Sets the milliseconds to retain tombstone records before removal.\n* 0: Tombstone records are immediately eligible for removal.\n* Negative value: Disables tombstone removal entirely for this topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "delete.retention.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal]" + ], + "type": "integer" + }, + "delete_topic_enable": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable or disable topic deletion via the Kafka DeleteTopics API. When set to false, all topic deletion requests are rejected with error code 73 (TOPIC_DELETION_DISABLED). This is a cluster-wide safety setting that cannot be overridden by superusers. Topics in kafka_nodelete_topics are always protected regardless of this setting.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "false" + ], + "enterprise_value": [ + "false" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "delete_topic_enable", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "version": "v26.1.1-rc2", + "visibility": "user" + }, + "developer_mode": { + "c_type": "bool", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": false, + "defined_in": "src/v/config/node_config.cc", + "description": "CAUTION: Enabling `developer_mode` isn't recommended for production use.\n\nEnable developer mode, which skips most of the checks performed at startup.", + "is_deprecated": false, + "is_enterprise": false, + "name": "developer_mode", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "disable_batch_cache": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disable batch cache in log manager.", + "is_deprecated": false, + "is_enterprise": false, + "name": "disable_batch_cache", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "disable_cluster_recovery_loop_for_tests": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables the cluster recovery loop. This property is used to simplify testing and should not be set in production.", + "is_deprecated": false, + "is_enterprise": false, + "name": "disable_cluster_recovery_loop_for_tests", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "disable_metrics": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disable registering the metrics exposed on the internal `/metrics` endpoint.", + "is_deprecated": false, + "is_enterprise": false, + "name": "disable_metrics", + "needs_restart": true, + "nullable": false, + "type": "boolean" + }, + "disable_public_metrics": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disable registering the metrics exposed on the `/public_metrics` endpoint.", + "is_deprecated": false, + "is_enterprise": false, + "name": "disable_public_metrics", + "needs_restart": true, + "nullable": false, + "type": "boolean" + }, + "disk_reservation_percent": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 25.0, + "defined_in": "src/v/config/configuration.cc", + "description": "The percentage of total disk capacity that Redpanda will avoid using. This applies both when cloud cache and log data share a disk, as well \nas when cloud cache uses a dedicated disk. \n\nIt is recommended to not run disks near capacity to avoid blocking I/O due to low disk space, as well as avoiding performance issues associated with SSD garbage collection.", + "example": "`25.0`", + "is_deprecated": false, + "is_enterprise": false, + "name": "disk_reservation_percent", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "election_timeout_ms": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "Raft election timeout expressed in milliseconds.", + "is_deprecated": false, + "is_topic_property": false, + "name": "election_timeout_ms", + "type": "string", + "visibility": "user" + }, + "emergency_disable_data_transforms": { + "c_type": "bool", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": false, + "defined_in": "src/v/config/node_config.cc", + "description": "Override the cluster property xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] and disable Wasm-powered data transforms. This is an emergency shutoff button.", + "is_deprecated": false, + "is_enterprise": false, + "name": "emergency_disable_data_transforms", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`]" + ], + "type": "boolean", + "visibility": "user" + }, + "empty_seed_starts_cluster": { + "c_type": "bool", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": true, + "defined_in": "src/v/config/node_config.cc", + "description": "Controls how a new cluster is formed. All brokers in a cluster must have the same value.\n\n<> to form a cluster.\n\nTIP: For backward compatibility, `true` is the default. Redpanda recommends using `false` in production environments to prevent accidental cluster formation.", + "is_deprecated": false, + "is_enterprise": false, + "name": "empty_seed_starts_cluster", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_auto_rebalance_on_node_add": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable automatic partition rebalancing when new nodes are added", + "is_deprecated": true, + "is_enterprise": false, + "name": "enable_auto_rebalance_on_node_add", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "deprecated" + }, + "enable_central_config": { + "c_type": "deprecated_property", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "defined_in": "src/v/config/node_config.cc", + "description": null, + "is_deprecated": true, + "is_enterprise": false, + "name": "enable_central_config", + "needs_restart": true, + "nullable": false, + "type": "object" + }, + "enable_cluster_metadata_upload_loop": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables cluster metadata uploads. Required for xref:manage:whole-cluster-restore.adoc[whole cluster restore].", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_cluster_metadata_upload_loop", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:manage:whole-cluster-restore.adoc[whole cluster restore]" + ], + "type": "boolean", + "visibility": "tunable" + }, + "enable_consumer_group_metrics": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": [ + "group", + "partition" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "List of enabled consumer group metrics. Accepted values include:\n\n- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics.\n- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric.\n- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics\n+\nEnabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions.\n+", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "enable_consumer_group_metrics", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`]", + "xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`]", + "xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`]", + "self-managed-only: xref:manage:monitoring.adoc#consumers[Monitor consumer group lag]", + "cloud-only: xref:manage:monitor-cloud.adoc#consumers[Monitor consumer group lag]" + ], + "type": "array" + }, + "enable_controller_log_rate_limiting": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Limits the write rate for the controller log.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_controller_log_rate_limiting", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_developmental_unrecoverable_data_corrupting_features": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "Configuration property: enable_developmental_unrecoverable_data_corrupting_features", + "exclude_from_docs": true, + "is_deprecated": false, + "is_topic_property": false, + "name": "enable_developmental_unrecoverable_data_corrupting_features", + "type": "string", + "visibility": "user" + }, + "enable_host_metrics": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable exporting of some host metrics like `/proc/diskstats`, `/proc/snmp` and `/proc/net/netstat`.\n\nHost metrics are prefixed with xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`] and are available on the `/metrics` endpoint.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_host_metrics", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`]" + ], + "type": "boolean", + "visibility": "tunable" + }, + "enable_idempotence": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable idempotent producers.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_idempotence", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_leader_balancer": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable automatic leadership rebalancing.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_leader_balancer", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_metrics_reporter": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable the cluster metrics reporter. If `true`, the metrics reporter collects and exports to Redpanda Data a set of customer usage metrics at the interval set by <>.\n\n[NOTE]\n====\nThe cluster metrics of the metrics reporter are different from xref:manage:monitoring.adoc[monitoring metrics].\n\n* The metrics reporter exports customer usage metrics for consumption by Redpanda Data.\n* Monitoring metrics are exported for consumption by Redpanda users.\n====", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_metrics_reporter", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:monitoring.adoc[monitoring metrics]" + ], + "type": "boolean", + "visibility": "user" + }, + "enable_mpx_extensions": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable Redpanda extensions for MPX.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_mpx_extensions", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "enable_pid_file": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable PID file. You should not need to change.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_pid_file", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "enable_rack_awareness": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable rack-aware replica assignment.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_rack_awareness", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_sasl": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable SASL authentication for Kafka connections. Authorization is required to modify this property. See also <>.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_sasl", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_schema_id_validation": { + "c_type": "pandaproxy::schema_registry::schema_id_validation_mode", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "none", + "defined_in": "src/v/config/configuration.cc", + "description": "Controls whether Redpanda validates schema IDs in records and which topic properties are enforced.\n\nValues:\n\n* `none`: Schema validation is disabled (no schema ID checks are done). Associated topic properties cannot be modified.\n* `redpanda`: Schema validation is enabled. Only Redpanda topic properties are accepted.\n* `compat`: Schema validation is enabled. Both Redpanda and compatible topic properties are accepted.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "compat", + "redpanda" + ], + "enterprise_value": [ + "compat", + "redpanda" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "enable_schema_id_validation", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "string", + "visibility": "user" + }, + "enable_shadow_linking": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable creating shadow links from this cluster to a remote source cluster for data replication.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "true" + ], + "enterprise_value": [ + "true" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "enable_shadow_linking", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_transactions": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable transactions (atomic writes).", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_transactions", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "enable_usage": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables the usage tracking mechanism, storing windowed history of kafka/cloud_storage metrics over time.", + "is_deprecated": false, + "is_enterprise": false, + "name": "enable_usage", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "features_auto_enable": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Whether new feature flags auto-activate after upgrades (true) or must wait for manual activation via the Admin API (false).", + "is_deprecated": false, + "is_enterprise": false, + "name": "features_auto_enable", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "fetch_max_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 57671680, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes returned in a fetch request.", + "is_deprecated": false, + "is_enterprise": false, + "name": "fetch_max_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "fetch_max_read_concurrency": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of concurrent partition reads per fetch request on each shard. Setting this higher than the default can lead to partition starvation and unneeded memory usage.", + "example": "`1`", + "is_deprecated": false, + "is_enterprise": false, + "name": "fetch_max_read_concurrency", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v25.3.3", + "visibility": "tunable" + }, + "fetch_pid_d_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.0, + "defined_in": "src/v/config/configuration.cc", + "description": "Derivative coefficient for fetch PID controller.", + "is_deprecated": false, + "is_enterprise": false, + "name": "fetch_pid_d_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "fetch_pid_i_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.01, + "defined_in": "src/v/config/configuration.cc", + "description": "Integral coefficient for fetch PID controller.", + "is_deprecated": false, + "is_enterprise": false, + "name": "fetch_pid_i_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "fetch_pid_max_debounce_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum debounce time the fetch PID controller will apply, in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "fetch_pid_max_debounce_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "fetch_pid_p_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100.0, + "defined_in": "src/v/config/configuration.cc", + "description": "Proportional coefficient for fetch PID controller.", + "is_deprecated": false, + "is_enterprise": false, + "name": "fetch_pid_p_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "fetch_pid_target_utilization_fraction": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.2, + "defined_in": "src/v/config/configuration.cc", + "description": "A fraction, between 0 and 1, for the target reactor utilization of the fetch scheduling group.", + "is_deprecated": false, + "is_enterprise": false, + "name": "fetch_pid_target_utilization_fraction", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "fetch_read_strategy": { + "c_type": "model::fetch_read_strategy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "non_polling", + "defined_in": "src/v/config/configuration.cc", + "description": "The strategy used to fulfill fetch requests.\n\n* `polling`: If `fetch_reads_debounce_timeout` is set to its default value, then this acts exactly like `non_polling`; otherwise, it acts like `non_polling_with_debounce` (deprecated).\n* `non_polling`: The backend is signaled when a partition has new data, so Redpanda does not need to repeatedly read from every partition in the fetch. Redpanda Data recommends using this value for most workloads, because it can improve fetch latency and CPU utilization.\n* `non_polling_with_debounce`: This option behaves like `non_polling`, but it includes a debounce mechanism with a fixed delay specified by `fetch_reads_debounce_timeout` at the start of each fetch. By introducing this delay, Redpanda can accumulate more data before processing, leading to fewer fetch operations and returning larger amounts of data. Enabling this option reduces reactor utilization, but it may also increase end-to-end latency.", + "enum": [ + "polling", + "non_polling", + "non_polling_with_debounce", + "non_polling_with_pid" + ], + "example": "`model::fetch_read_strategy_to_string( model::fetch_read_strategy::non_polling)`", + "is_deprecated": false, + "is_enterprise": false, + "name": "fetch_read_strategy", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "fetch_reads_debounce_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1, + "default_human_readable": "1 millisecond", + "defined_in": "src/v/config/configuration.cc", + "description": "Time to wait for the next read in fetch requests when the requested minimum bytes was not reached.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "fetch_reads_debounce_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "fetch_session_eviction_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60000, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "Time duration after which the inactive fetch session is removed from the fetch session cache. Fetch sessions are used to implement the incremental fetch requests where a consumer does not send all requested partitions to the server but the server tracks them for the consumer.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "fetch_session_eviction_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "fips_mode": { + "c_type": "fips_mode_flag", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "disabled", + "defined_in": "src/v/config/node_config.cc", + "description": "Controls whether Redpanda starts in FIPS mode. This property allows for three values: \n\n* Disabled - Redpanda does not start in FIPS mode.\n\n* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods.\n\n* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits.", + "enum": [ + "disabled", + "permissive", + "enabled" + ], + "is_deprecated": false, + "is_enterprise": false, + "name": "fips_mode", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "flush.bytes": { + "acceptable_values": "bytes (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "raft_replica_max_pending_flush_bytes", + "default": 262144, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The maximum bytes not fsynced per partition. If this configured threshold is reached, the log is automatically fsynced, even though it wasn't explicitly requested.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "flush.bytes", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#flush_bytes[`flush_bytes`]" + ], + "type": "integer" + }, + "flush.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "raft_replica_max_flush_delay_ms", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The maximum delay (in ms) between two subsequent fsyncs. After this delay, the log is automatically fsynced.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "flush.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#flush_ms[`flush_ms`]" + ], + "type": "integer" + }, + "group_initial_rebalance_delay": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3000, + "default_human_readable": "3 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Delay added to the rebalance phase to wait for new members.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_initial_rebalance_delay", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "group_max_session_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 300000, + "default_human_readable": "5 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_max_session_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer" + }, + "group_min_session_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 6000, + "default_human_readable": "6 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_min_session_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer" + }, + "group_new_member_join_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for new member joins.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_new_member_join_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "group_offset_retention_check_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 600000, + "default_human_readable": "10 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Frequency rate at which the system should check for expired group offsets.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "group_offset_retention_check_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "group_offset_retention_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": 604800, + "default_human_readable": "1 week", + "defined_in": "src/v/config/configuration.cc", + "description": "Consumer group offset retention seconds. To disable offset retention, set this to null.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "group_offset_retention_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "group_topic_partitions": { + "c_type": "int32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 16, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of partitions in the internal group membership topic.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "group_topic_partitions", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "health_manager_tick_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 180000, + "default_human_readable": "3 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "How often the health manager runs.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "health_manager_tick_interval", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "health_monitor_max_metadata_age": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum age of the metadata cached in the health monitor of a non-controller broker.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "health_monitor_max_metadata_age", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "health_monitor_tick_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "How often health monitor refresh cluster state", + "is_deprecated": true, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "health_monitor_tick_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "deprecated" + }, + "http_authentication": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": [ + "BASIC" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of supported HTTP authentication mechanisms. Accepted Values: `BASIC`, `OIDC`.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "OIDC" + ], + "enterprise_value": [ + "OIDC" + ], + "is_deprecated": false, + "is_enterprise": true, + "items": { + "type": "string" + }, + "name": "http_authentication", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "array", + "visibility": "user" + }, + "iceberg_backlog_controller_i_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.005, + "defined_in": "src/v/config/configuration.cc", + "description": "Controls how much past backlog (unprocessed work) affects the priority of processing new data in the Iceberg system. The system accumulates backlog errors over time, and this coefficient determines how much that accumulated backlog influences the urgency of data translation.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_backlog_controller_i_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "iceberg_backlog_controller_p_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1e-05, + "defined_in": "src/v/config/configuration.cc", + "description": "Proportional coefficient for the Iceberg backlog controller. Number of shares assigned to the datalake scheduling group will be proportional to the backlog size error. A negative value means larger and faster changes in the number of shares in the datalake scheduling group.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_backlog_controller_p_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "iceberg_catalog_base_location": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "redpanda-iceberg-catalog", + "defined_in": "src/v/config/configuration.cc", + "description": "Base path for the object-storage-backed Iceberg catalog. After Iceberg is enabled, do not change this value.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_catalog_base_location", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_catalog_commit_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "The frequency at which the Iceberg coordinator commits topic files to the catalog. This is the interval between commit transactions across all topics monitored by the coordinator, not the interval between individual commits.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "iceberg_catalog_commit_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "iceberg_catalog_type": { + "c_type": "datalake_catalog_type", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "object_storage", + "defined_in": "src/v/config/configuration.cc", + "description": "Iceberg catalog type that Redpanda will use to commit table metadata updates. Supported types: `rest`, `object_storage`.\nNOTE: You must set <> at the same time that you set `iceberg_catalog_type` to `rest`.", + "enum": [ + "object_storage", + "rest" + ], + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_catalog_type", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_default_catalog_namespace": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "redpanda" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "The default namespace (database name) for Iceberg tables. All tables created by Redpanda will be placed in this namespace within the Iceberg catalog. Supports nested namespaces as an array of strings.\n\nIMPORTANT: This value must be configured before enabling Iceberg and must not be changed afterward. Changing it will cause Redpanda to lose track of existing tables.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "iceberg_default_catalog_namespace", + "needs_restart": true, + "nullable": false, + "type": "array", + "version": "v25.3.5", + "visibility": "user" + }, + "iceberg_default_partition_spec": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "(hour(redpanda.timestamp))", + "defined_in": "src/v/config/configuration.cc", + "description": "Default value for the `redpanda.iceberg.partition.spec` topic property that determines the partition spec for the Iceberg table corresponding to the topic.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_default_partition_spec", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`]", + "xref:manage:iceberg/about-iceberg-topics.adoc#enable-iceberg-integration[Enable Iceberg integration]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_delete": { + "c_type": "bool", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Default value for the `redpanda.iceberg.delete` topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_delete", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "iceberg_disable_automatic_snapshot_expiry": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Whether to disable automatic Iceberg snapshot expiry. This property may be useful if the Iceberg catalog expects to perform snapshot expiry on its own.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_disable_automatic_snapshot_expiry", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "iceberg_disable_snapshot_tagging": { + "c_type": "bool", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly-once delivery of records. Disabling tags is therefore not recommended, but it may be useful if the Iceberg catalog does not support tags.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_disable_snapshot_tagging", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "iceberg_dlq_table_suffix": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "~dlq", + "defined_in": "src/v/config/configuration.cc", + "description": "The suffix added to Iceberg table names when creating dead-letter queue (DLQ) tables for invalid records. Choose a suffix that won't conflict with existing table names. This is especially important for catalogs that don't support the tilde (~) character in table names. Don't change this value after creating DLQ tables.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_dlq_table_suffix", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_enabled": { + "c_type": "bool", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the `redpanda.iceberg.enabled` topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "true" + ], + "enterprise_value": [ + "true" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "iceberg_enabled", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`]" + ], + "type": "boolean", + "visibility": "user" + }, + "iceberg_invalid_record_action": { + "c_type": "model::iceberg_invalid_record_action", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "dlq_table", + "defined_in": "src/v/config/configuration.cc", + "description": "Default value for the `redpanda.iceberg.invalid.record.action` topic property.", + "enum": [ + "drop", + "dlq_table" + ], + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_invalid_record_action", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`]", + "self-managed-only: xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_latest_schema_cache_ttl_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "5 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "The TTL for caching the latest schema during translation when using the xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`] iceberg mode. This setting controls how long the latest schema remains cached during translation, which affects schema refresh behavior and performance.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "iceberg_latest_schema_cache_ttl_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`]" + ], + "type": "integer", + "visibility": "tunable" + }, + "iceberg_rest_catalog_authentication_mode": { + "c_type": "datalake_catalog_auth_mode", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "none", + "defined_in": "src/v/config/configuration.cc", + "description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4.", + "enum": [ + "none", + "bearer", + "oauth2", + "aws_sigv4", + "gcp" + ], + "example": "`none`", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_authentication_mode", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_access_key": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS access key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`] when using aws_sigv4 authentication mode.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_aws_access_key", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_credentials_source": { + "aliases": [ + "iceberg_rest_catalog_aws_credentials_source" + ], + "c_type": "model::cloud_credentials_source", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.", + "enum": [ + "config_file", + "aws_instance_metadata", + "sts", + "gcp_instance_metadata", + "azure_aks_oidc_federation", + "azure_vm_instance_metadata" + ], + "example": "`config_file`", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_aws_credentials_source", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_region": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS region for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`] when using aws_sigv4 authentication mode.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_aws_region", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_secret_key": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "AWS secret key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`] when using aws_sigv4 authentication mode.", + "is_deprecated": false, + "is_enterprise": false, + "is_secret": true, + "name": "iceberg_rest_catalog_aws_secret_key", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_secret_key[`cloud_storage_secret_key`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_aws_service_name": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "glue", + "defined_in": "src/v/config/configuration.cc", + "description": "AWS service name for SigV4 signing when using aws_sigv4 authentication mode. Defaults to 'glue' for AWS Glue Data Catalog. Can be changed to support other AWS services that provide Iceberg REST catalog APIs.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_aws_service_name", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_base_location": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": "Base URI for the Iceberg REST catalog. If unset, the REST catalog server determines the location. Some REST catalogs, like AWS Glue, require the client to set this. After Iceberg is enabled, do not change this value.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_base_location", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_client_id": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Iceberg REST catalog user ID. This ID is used to query the catalog API for the OAuth token. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_client_id", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_client_secret": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Secret used with the client ID to query the OAuth token endpoint for Iceberg REST catalog authentication. Required if catalog type is set to `rest` and `iceberg_rest_catalog_authentication_mode` is set to `oauth2`.", + "is_deprecated": false, + "is_enterprise": false, + "is_secret": true, + "name": "iceberg_rest_catalog_client_secret", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_credentials_source": { + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "topic", + "default": null, + "defined_in": "override", + "description": "Configuration property: iceberg_rest_catalog_credentials_source", + "is_deprecated": false, + "is_topic_property": true, + "name": "iceberg_rest_catalog_credentials_source", + "related_topics": [ + "xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]" + ], + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_crl": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The contents of a certificate revocation list for `iceberg_rest_catalog_trust`. Takes precedence over `iceberg_rest_catalog_crl_file`.", + "is_deprecated": false, + "is_enterprise": false, + "is_secret": true, + "name": "iceberg_rest_catalog_crl", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_crl_file": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to certificate revocation list for `iceberg_rest_catalog_trust_file`.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_crl_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_endpoint": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "URL of Iceberg REST catalog endpoint.\nNOTE: If you set <> to `rest`, you must also set this property at the same time.", + "example": "`http://hostname:8181`", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_endpoint", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_gcp_user_project": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The GCP project that is billed for charges associated with Iceberg REST Catalog requests.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_gcp_user_project", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_oauth2_scope": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "PRINCIPAL_ROLE:ALL", + "defined_in": "src/v/config/configuration.cc", + "description": "The OAuth scope used to retrieve access tokens for Iceberg catalog authentication. Only meaningful when `iceberg_rest_catalog_authentication_mode` is set to `oauth2`", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_oauth2_scope", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_oauth2_server_uri": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The OAuth URI used to retrieve access tokens for Iceberg catalog authentication. If left undefined, the deprecated Iceberg catalog endpoint `/v1/oauth/tokens` is used instead.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_oauth2_server_uri", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_request_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum length of time that Redpanda waits for a response from the REST catalog before aborting the request", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "iceberg_rest_catalog_request_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "iceberg_rest_catalog_token": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Token used to access the REST Iceberg catalog. If the token is present, Redpanda ignores credentials stored in the properties <> and <>.\n\nRequired if <> is set to `bearer`.", + "is_deprecated": false, + "is_enterprise": false, + "is_secret": true, + "name": "iceberg_rest_catalog_token", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_trust": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The contents of a certificate chain to trust for the REST Iceberg catalog.", + "is_deprecated": false, + "is_enterprise": false, + "is_secret": true, + "name": "iceberg_rest_catalog_trust", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_trust_file": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Path to a file containing a certificate chain to trust for the REST Iceberg catalog.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_trust_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_rest_catalog_warehouse": { + "aliases": [ + "iceberg_rest_catalog_prefix" + ], + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Warehouse to use for the Iceberg REST catalog. Redpanda queries the catalog to retrieve warehouse-specific configurations and automatically configures settings like the appropriate prefix. The prefix is appended to the catalog path (for example, `/v1/\\{prefix}/namespaces`).", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_rest_catalog_warehouse", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "iceberg_target_backlog_size": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 104857600, + "defined_in": "src/v/config/configuration.cc", + "description": "Average size per partition of the datalake translation backlog that the backlog controller tries to maintain. When the backlog size is larger than the set point, the backlog controller will increase the translation scheduling group priority.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "iceberg_target_backlog_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "iceberg_target_lag_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "Default value for the `redpanda.iceberg.target.lag.ms` topic property, which controls how often the data in an Iceberg table is refreshed with new data from the corresponding Redpanda topic. Redpanda attempts to commit all data produced to the topic within the lag target, subject to resource availability.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "iceberg_target_lag_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-target-lag-ms[`redpanda.iceberg.target.lag.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "iceberg_throttle_backlog_size_ratio": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Ration of the total backlog size to the disk space at which the throttle to iceberg producers is applied.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_throttle_backlog_size_ratio", + "needs_restart": false, + "nullable": true, + "type": "number", + "visibility": "tunable" + }, + "iceberg_topic_name_dot_replacement": { + "c_type": "ss::sstring", + "cloud_byoc_only": true, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "A replacement string for dots in topic names when creating Iceberg table names. Use this when your downstream systems don't allow dots in table names. The replacement string cannot contain dots. Be careful to avoid table name collisions. Don't change this value after creating any Iceberg topics with dots in their names.", + "is_deprecated": false, + "is_enterprise": false, + "name": "iceberg_topic_name_dot_replacement", + "needs_restart": false, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "id_allocator_batch_size": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "The ID allocator allocates messages in batches (each batch is a one log record) and then serves requests from memory without touching the log until the batch is exhausted.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "id_allocator_batch_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "id_allocator_log_capacity": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "defined_in": "src/v/config/configuration.cc", + "description": "Capacity of the `id_allocator` log in number of batches. After it reaches `id_allocator_stm`, it truncates the log's prefix.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "id_allocator_log_capacity", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "initial.retention.local.target.bytes": { + "acceptable_values": "bytes (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "initial_retention_local_target_bytes_default", + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "A size-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.\n\nThis property supports three states:\n\n* Positive value: Sets the maximum bytes of local data to transfer during cluster resize.\n* 0: No local data is transferred during cluster resize.\n* Negative value: All locally retained data is transferred (default behavior).", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "initial.retention.local.target.bytes", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#initial_retention_local_target_bytes[`initial_retention_local_target_bytes`]", + "xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage]" + ], + "type": "integer" + }, + "initial.retention.local.target.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "initial_retention_local_target_ms_default", + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "A time-based initial retention limit for Tiered Storage that determines how much data in local storage is transferred to a partition replica when a cluster is resized. If `null` (default), all locally retained data is transferred.\n\nThis property supports three states:\n\n* Positive value: Sets the maximum age (milliseconds) of local data to transfer during cluster resize.\n* 0: No local data is transferred during cluster resize.\n* Negative value: All locally retained data is transferred (default behavior).", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "initial.retention.local.target.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#initial_retention_local_target_ms[`initial_retention_local_target_ms`]", + "xref:manage:tiered-storage.adoc#fast-commission-and-decommission[Fast commission and decommission through Tiered Storage]" + ], + "type": "integer" + }, + "initial_retention_local_target_bytes_default": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Initial local retention size target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica set.", + "is_deprecated": false, + "is_enterprise": false, + "name": "initial_retention_local_target_bytes_default", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "integer", + "visibility": "user" + }, + "initial_retention_local_target_ms_default": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Initial local retention time target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica is set.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "initial_retention_local_target_ms_default", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "integer", + "visibility": "user" + }, + "internal_rpc_request_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Default timeout for RPC requests between Redpanda nodes.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "internal_rpc_request_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "internal_topic_replication_factor": { + "c_type": "int", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3, + "defined_in": "src/v/config/configuration.cc", + "description": "Target replication factor for internal topics.\n\n*Unit*: number of replicas per topic.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "internal_topic_replication_factor", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "join_retry_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "default_human_readable": "5 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Time between cluster join retries in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "join_retry_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_api": { + "c_type": "config::broker_authn_endpoint", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [ + "127.0.0.1:9092", + null + ], + "defined_in": "src/v/config/node_config.cc", + "description": "IP address and port of the Kafka API endpoint that handles requests. Supports multiple listeners with different configurations.", + "example": ".Basic example\n[,yaml]\n----\nredpanda:\n kafka_api:\n - address: \n port: \n authentication_method: sasl\n----\n\n.Multiple listeners example (for different networks or authentication methods)\n[,yaml]\n----\nredpanda:\n kafka_api:\n - name: \n address: \n port: \n authentication_method: none\n - name: \n address: \n port: \n authentication_method: sasl\n - name: \n address: \n port: \n authentication_method: mtls_identity\n----\n\nReplace the following placeholders with your values:\n\n* ``: The IP address to bind the listener to (typically `0.0.0.0` for all interfaces)\n* ``: The port number for the Kafka API endpoint\n* ``: Name for internal network connections (for example, `internal`)\n* ``: Name for external network connections (for example, `external`)\n* ``: Name for mTLS connections (for example, `mtls`)\n* ``: The IP address for internal connections\n* ``: The port number for internal Kafka API connections\n* ``: The IP address for external connections\n* ``: The port number for external Kafka API connections\n* ``: The IP address for mTLS connections\n* ``: The port number for mTLS Kafka API connections", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "kafka_api", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_api_tls": { + "c_type": "endpoint_tls_config", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/config/node_config.cc", + "description": "Transport Layer Security (TLS) configuration for the Kafka API endpoint.", + "example": "[,yaml]\n----\nredpanda:\n kafka_api_tls:\n - name: \n enabled: true\n cert_file: \n key_file: \n truststore_file: \n require_client_auth: false\n----\n\nReplace the following placeholders with your values:\n\n* ``: Name that matches your Kafka API listener (defined in the <> broker property)\n* ``: Full path to the TLS certificate file\n* ``: Full path to the TLS private key file\n* ``: Full path to the Certificate Authority file\n\nNOTE: Set `require_client_auth: true` for mutual TLS (mTLS) authentication, or `false` for server-side TLS only.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "endpoint_tls_config" + }, + "name": "kafka_api_tls", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "kafka_batch_max_bytes": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1048576, + "defined_in": "src/v/config/configuration.cc", + "description": "The default maximum batch size for topics if the topic property xref:reference:properties/topic-properties.adoc[`message.max.bytes`] is not set. If the batch is compressed, the limit applies to the compressed batch size.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_batch_max_bytes", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc[`message.max.bytes`]" + ], + "type": "integer", + "visibility": "tunable" + }, + "kafka_connection_rate_limit": { + "c_type": "int64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum connections per second for one core. If `null` (the default), then the number of connections per second is unlimited.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "kafka_connection_rate_limit", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "kafka_connection_rate_limit_overrides": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "Overrides the maximum connections per second for one core for the specified IP addresses (for example, `['127.0.0.1:90', '50.20.1.1:40']`)", + "example": "`['127.0.0.1:90', '50.20.1.1:40']`", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "kafka_connection_rate_limit_overrides", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]", + "self-managed-only:xref:manage:cluster-maintenance/configure-client-connections.adoc#limit-client-connections[Limit client connections]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_connections_max": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Kafka client connections per broker. If `null`, the property is disabled.\n\n*Unit*: number of Kafka client connections per broker\n\n*Default*: null", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_connections_max", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]", + "self-managed-only:xref:manage:cluster-maintenance/configure-client-connections.adoc#limit-client-connections[Limit client connections]" + ], + "type": "integer", + "visibility": "user" + }, + "kafka_connections_max_overrides": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of IP addresses for which Kafka client connection limits are overridden and don't apply. For example, `(['127.0.0.1:90', '50.20.1.1:40']).`.", + "example": "`['127.0.0.1:90', '50.20.1.1:40']`", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "kafka_connections_max_overrides", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]", + "self-managed-only:xref:manage:cluster-maintenance/configure-client-connections.adoc#limit-client-connections[Limit client connections]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_connections_max_per_ip": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Kafka client connections per IP address, per broker. If `null`, the property is disabled.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_connections_max_per_ip", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/configure-availability.adoc#limit-client-connections[Limit client connections]", + "self-managed-only:xref:manage:cluster-maintenance/configure-client-connections.adoc#limit-client-connections[Limit client connections]" + ], + "type": "integer", + "visibility": "user" + }, + "kafka_enable_authorization": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Flag to require authorization for Kafka connections. If `null`, the property is disabled, and authorization is instead enabled by <>.\n\n* `null`: Ignored. Authorization is enabled with `enable_sasl`: `true`\n* `true`: authorization is required.\n* `false`: authorization is disabled.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_enable_authorization", + "needs_restart": false, + "nullable": true, + "type": "boolean", + "visibility": "user" + }, + "kafka_enable_describe_log_dirs_remote_storage": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Whether to include Tiered Storage as a special remote:// directory in `DescribeLogDirs Kafka` API requests.", + "example": "`false`", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_enable_describe_log_dirs_remote_storage", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "kafka_enable_partition_reassignment": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable the Kafka partition reassignment API.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_enable_partition_reassignment", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "kafka_fetch_request_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "default_human_readable": "5 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Broker-side target for the duration of a single fetch request. The broker will try to complete fetches within the specified duration, even if it means returning less bytes in the fetch than are available.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_fetch_request_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v25.3.7", + "visibility": "tunable" + }, + "kafka_group_recovery_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Kafka group recovery timeout.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_group_recovery_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "kafka_max_bytes_per_fetch": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 67108864, + "defined_in": "src/v/config/configuration.cc", + "description": "Limit fetch responses to this many bytes, even if the total of partition bytes limits is higher.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_max_bytes_per_fetch", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_max_message_size_upper_limit_bytes": { + "c_type": "int32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 104857600, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum value you can set for the xref:./topic-properties.adoc#maxmessagebytes[`max.message.bytes`] topic property. When set to `null`, no limit is enforced.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "kafka_max_message_size_upper_limit_bytes", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#maxmessagebytes[`max.message.bytes`]" + ], + "type": "integer", + "visibility": "tunable" + }, + "kafka_memory_share_for_fetch": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.5, + "defined_in": "src/v/config/configuration.cc", + "description": "The share of Kafka subsystem memory that can be used for fetch read buffers, as a fraction of the Kafka subsystem memory amount.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_memory_share_for_fetch", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "user" + }, + "kafka_mtls_principal_mapping_rules": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Principal mapping rules for mTLS authentication on the Kafka API. If `null`, the property is disabled.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "kafka_mtls_principal_mapping_rules", + "needs_restart": false, + "nullable": true, + "type": "array", + "visibility": "user" + }, + "kafka_nodelete_topics": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "_redpanda.audit_log", + "__consumer_offsets", + "_schemas" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of topics that are protected from deletion and configuration changes by Kafka clients. Set by default to a list of Redpanda internal topics.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "kafka_nodelete_topics", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:develop:consume-data/consumer-offsets.adoc[Consumer Offsets]", + "xref:manage:schema-registry.adoc[Schema Registry]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_noproduce_topics": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "A list of topics that are protected from being produced to by Kafka clients. Set by default to a list of Redpanda internal topics.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "kafka_noproduce_topics", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "kafka_produce_batch_validation": { + "c_type": "model::kafka_batch_validation_mode", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "relaxed", + "defined_in": "src/v/config/configuration.cc", + "description": "Controls the level of validation performed on batches produced to Redpanda. When set to `legacy`, there is minimal validation performed on the produce path. When set to `relaxed`, full validation is performed on uncompressed batches and on compressed batches with the `max_timestamp` value left unset. When set to `strict`, full validation of uncompressed and compressed batches is performed. This should be the default in environments where producing clients are not trusted.", + "enum": [ + "legacy", + "relaxed", + "strict" + ], + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_produce_batch_validation", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "kafka_qdc_depth_alpha": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.8, + "defined_in": "src/v/config/configuration.cc", + "description": "Smoothing factor for Kafka queue depth control depth tracking.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_qdc_depth_alpha", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "kafka_qdc_depth_update_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 7000, + "default_human_readable": "7 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Update frequency for Kafka queue depth control.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_qdc_depth_update_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_enable": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable kafka queue depth control.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_qdc_enable", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "kafka_qdc_idle_depth": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10, + "defined_in": "src/v/config/configuration.cc", + "description": "Queue depth when idleness is detected in Kafka queue depth control.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_qdc_idle_depth", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_latency_alpha": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.002, + "defined_in": "src/v/config/configuration.cc", + "description": "Smoothing parameter for Kafka queue depth control latency tracking.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_qdc_latency_alpha", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "kafka_qdc_max_depth": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum queue depth used in Kafka queue depth control.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_qdc_max_depth", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_max_latency_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 80, + "default_human_readable": "80 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum latency threshold for Kafka queue depth control depth tracking.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_qdc_max_latency_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "kafka_qdc_min_depth": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1, + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum queue depth used in Kafka queue depth control.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_qdc_min_depth", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_window_count": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 12, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of windows used in Kafka queue depth control latency tracking.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_qdc_window_count", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_qdc_window_size_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1500, + "default_human_readable": "1500 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Window size for Kafka queue depth control latency tracking.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_qdc_window_size_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_request_max_bytes": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 104857600, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of a single request processed using the Kafka API.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_request_max_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_rpc_server_stream_recv_buf": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of the user-space receive buffer. If `null`, this limit is not applied.", + "example": "`65536`", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_rpc_server_stream_recv_buf", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "kafka_rpc_server_tcp_recv_buf": { + "c_type": "int", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of the Kafka server TCP receive buffer. If `null`, the property is disabled.", + "example": "`65536`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "kafka_rpc_server_tcp_recv_buf", + "needs_restart": true, + "nullable": true, + "type": "integer" + }, + "kafka_rpc_server_tcp_send_buf": { + "c_type": "int", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of the Kafka server TCP transmit buffer. If `null`, the property is disabled.", + "example": "`65536`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "kafka_rpc_server_tcp_send_buf", + "needs_restart": true, + "nullable": true, + "type": "integer" + }, + "kafka_sasl_max_reauth_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum time between Kafka client reauthentications. If a client has not reauthenticated a connection within this time frame, that connection is torn down.\n\nIMPORTANT: If this property is not set (or set to `null`), session expiry is disabled, and a connection could live long after the client's credentials are expired or revoked.", + "example": "`1000`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kafka_sasl_max_reauth_ms", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "kafka_schema_id_validation_cache_capacity": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 128, + "defined_in": "src/v/config/configuration.cc", + "description": "Per-shard capacity of the cache for validating schema IDs.", + "is_deprecated": false, + "is_enterprise": false, + "name": "kafka_schema_id_validation_cache_capacity", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_tcp_keepalive_idle_timeout_seconds": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 120, + "default_human_readable": "2 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "TCP keepalive idle timeout in seconds for Kafka connections. This describes the timeout between TCP keepalive probes that the remote site successfully acknowledged. Refers to the TCP_KEEPIDLE socket option. When changed, applies to new connections only.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "kafka_tcp_keepalive_idle_timeout_seconds", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_tcp_keepalive_probe_interval_seconds": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "TCP keepalive probe interval in seconds for Kafka connections. This describes the timeout between unacknowledged TCP keepalives. Refers to the TCP_KEEPINTVL socket option. When changed, applies to new connections only.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "kafka_tcp_keepalive_probe_interval_seconds", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_tcp_keepalive_probes": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3, + "defined_in": "src/v/config/configuration.cc", + "description": "TCP keepalive unacknowledged probes until the connection is considered dead for Kafka connections. Refers to the TCP_KEEPCNT socket option. When changed, applies to new connections only.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_tcp_keepalive_probes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kafka_throughput_control": { + "c_type": "throughput_control_group", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "List of throughput control groups that define exclusions from broker-wide throughput limits. Clients excluded from broker-wide throughput limits are still potentially subject to client-specific throughput limits.\n\nEach throughput control group consists of:\n\n* `name` (optional) - any unique group name\n* `client_id` - regex to match client_id\n\nExample values:\n\n* `[{'name': 'first_group','client_id': 'client1'}, {'client_id': 'consumer-\\d+'}]`\n* `[{'name': 'catch all'}]`\n* `[{'name': 'missing_id', 'client_id': '+empty'}]`\n\nA connection is assigned the first matching group and is then excluded from throughput control. A `name` is not required, but can help you categorize the exclusions. Specifying `+empty` for the `client_id` will match on clients that opt not to send a `client_id`. You can also optionally omit the `client_id` and specify only a `name`, as shown. In this situation, all clients will match the rule and Redpanda will exclude them from all from broker-wide throughput control.", + "example": "`[{'name': 'first_group','client_id': 'client1'}, {'client_id': 'consumer-\\d+'}, {'name': 'catch all'}]`", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "throughput_control_group" + }, + "name": "kafka_throughput_control", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage throughput]" + ], + "type": "array", + "visibility": "user" + }, + "kafka_throughput_controlled_api_keys": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "produce", + "fetch" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "List of Kafka API keys that are subject to cluster-wide and node-wide throughput limit control.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "kafka_throughput_controlled_api_keys", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "kafka_throughput_limit_node_in_bps": { + "c_type": "int64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum rate of all ingress Kafka API traffic for a node. Includes all Kafka API traffic (requests, responses, headers, fetched data, produced data, etc.). If `null`, the property is disabled, and traffic is not limited.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "kafka_throughput_limit_node_in_bps", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits]" + ], + "type": "integer", + "visibility": "user" + }, + "kafka_throughput_limit_node_out_bps": { + "c_type": "int64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum rate of all egress Kafka traffic for a node. Includes all Kafka API traffic (requests, responses, headers, fetched data, produced data, etc.). If `null`, the property is disabled, and traffic is not limited.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "kafka_throughput_limit_node_out_bps", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/manage-throughput.adoc#node-wide-throughput-limits[Node-wide throughput limits]" + ], + "type": "integer", + "visibility": "user" + }, + "kafka_throughput_replenish_threshold": { + "c_type": "int64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold for refilling the token bucket as part of enforcing throughput limits.\n\nThis threshold is evaluated with each request for data. When the number of tokens to replenish exceeds this threshold, then tokens are added to the token bucket. This ensures that the atomic is not being updated for the token count with each request. The range for this threshold is automatically clamped to the corresponding throughput limit for ingress and egress.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "kafka_throughput_replenish_threshold", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_in_bps[`kafka_throughput_limit_node_in_bps`]", + "xref:reference:cluster-properties.adoc#kafka_throughput_limit_node_out_bps[`kafka_throughput_limit_node_out_bps`]", + "xref:manage:cluster-maintenance/manage-throughput.adoc[Manage Throughput]" + ], + "type": "integer", + "visibility": "tunable" + }, + "kafka_topics_max": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Kafka user topics that can be created. If `null`, then no limit is enforced.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "kafka_topics_max", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "kvstore_flush_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "10 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Key-value store flush interval (in milliseconds).", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "kvstore_flush_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "kvstore_max_segment_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 16777216, + "defined_in": "src/v/config/configuration.cc", + "description": "Key-value maximum segment size (in bytes).", + "is_deprecated": false, + "is_enterprise": false, + "name": "kvstore_max_segment_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "leader_balancer_idle_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 120000, + "default_human_readable": "2 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Leadership rebalancing idle timeout.\n\n*Unit*: milliseconds", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "leader_balancer_idle_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "leader_balancer_mode": { + "c_type": "model::leader_balancer_mode", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "calibrated", + "defined_in": "src/v/config/configuration.cc", + "description": "Mode of the leader balancer optimization strategy. `calibrated` uses a heuristic that balances leaders based on replica counts per shard. `random` randomly moves leaders to reduce load on heavily-loaded shards. Legacy values `greedy_balanced_shards` and `random_hill_climbing` are treated as `calibrated`.", + "enum": [ + "calibrated", + "random" + ], + "example": "`model::leader_balancer_mode_to_string( model::leader_balancer_mode::calibrated)`", + "is_deprecated": false, + "is_enterprise": false, + "name": "leader_balancer_mode", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "leader_balancer_mute_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 300000, + "default_human_readable": "5 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "The length of time that a glossterm:Raft[] group is muted after a leadership rebalance operation. Any group that has been moved, regardless of whether the move succeeded or failed, undergoes a cooling-off period. This prevents Raft groups from repeatedly experiencing leadership rebalance operations in a short time frame, which can lead to instability in the cluster.\n\nThe leader balancer maintains a list of muted groups and reevaluates muted status at the start of each balancing iteration. Muted groups still contribute to overall cluster balance calculations although they can't themselves be moved until the mute period is over.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "leader_balancer_mute_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "leader_balancer_node_mute_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 20000, + "default_human_readable": "20 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The duration after which a broker that hasn't sent a heartbeat is considered muted. This timeout sets a threshold for identifying brokers that shouldn't be targeted for leadership transfers when the cluster rebalances, for example, because of unreliable network connectivity.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "leader_balancer_node_mute_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "leader_balancer_transfer_limit_per_shard": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 512, + "defined_in": "src/v/config/configuration.cc", + "description": "Per shard limit for in-progress leadership transfers.", + "is_deprecated": false, + "is_enterprise": false, + "name": "leader_balancer_transfer_limit_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "legacy_group_offset_retention_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Group offset retention is enabled by default starting in Redpanda version 23.1. To enable offset retention after upgrading from an older version, set this option to true.", + "is_deprecated": false, + "is_enterprise": false, + "name": "legacy_group_offset_retention_enabled", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "legacy_permit_unsafe_log_operation": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Flag to enable a Redpanda cluster operator to use unsafe control characters within strings, such as consumer group names or user names. This flag applies only for Redpanda clusters that were originally on version 23.1 or earlier and have been upgraded to version 23.2 or later. Starting in version 23.2, newly-created Redpanda clusters ignore this property.", + "is_deprecated": false, + "is_enterprise": false, + "name": "legacy_permit_unsafe_log_operation", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "legacy_unsafe_log_warning_interval_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 300, + "default_human_readable": "5 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Period at which to log a warning about using unsafe strings containing control characters. If unsafe strings are permitted by `legacy_permit_unsafe_log_operation`, a warning will be logged at an interval specified by this property.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "legacy_unsafe_log_warning_interval_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "log_cleanup_policy": { + "c_type": "model::cleanup_policy_bitflags", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "delete", + "defined_in": "src/v/config/configuration.cc", + "description": "Default cleanup policy for topic logs.\n\nThe topic property xref:./topic-properties.adoc#cleanuppolicy[`cleanup.policy`] overrides the value of `log_cleanup_policy` at the topic level.", + "enum": [ + "none", + "delete", + "compact" + ], + "example": "`compact,delete`", + "is_deprecated": false, + "is_enterprise": false, + "name": "log_cleanup_policy", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#cleanuppolicy[`cleanup.policy`]" + ], + "type": "string", + "visibility": "user" + }, + "log_compaction_disable_tx_batch_removal": { + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "override", + "description": "Prevents log compaction from removing transaction metadata. Only set this to `true` if you experience stability issues related to transaction cleanup during compaction.", + "is_deprecated": false, + "is_topic_property": false, + "name": "log_compaction_disable_tx_batch_removal", + "type": "string", + "visibility": "user" + }, + "log_compaction_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "How often to trigger background compaction.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_compaction_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "log_compaction_max_priority_wait_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3600000, + "default_human_readable": "1 hour", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum time a priority partition (for example, __consumer_offsets) can wait for compaction before preempting regular compaction.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_compaction_max_priority_wait_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "log_compaction_merge_max_ranges": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum range of segments that can be processed in a single round of adjacent segment compaction. If `null` (the default value), no maximum is imposed on the number of ranges that can be processed at once. A value below 1 effectively disables adjacent merge compaction.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "log_compaction_merge_max_ranges", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "log_compaction_merge_max_segments_per_range": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of segments that can be combined into a single segment during an adjacent merge operation. If `null` (the default value), no maximum is imposed on the number of segments that can be combined at once. A value below 2 effectively disables adjacent merge compaction.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "log_compaction_merge_max_segments_per_range", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "log_compaction_pause_use_sliding_window": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Pause use of sliding window compaction. Toggle to `true` _only_ when you want to force adjacent segment compaction. The memory reserved by `storage_compaction_key_map_memory` is not freed when this is set to `true`.", + "is_deprecated": false, + "is_enterprise": false, + "name": "log_compaction_pause_use_sliding_window", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "log_compaction_tx_batch_removal_enabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables removal of transactional control batches during compaction. These batches are removed according to a topic's configured delete.retention.ms, and only if the topic's cleanup.policy allows compaction.", + "is_deprecated": false, + "is_enterprise": false, + "name": "log_compaction_tx_batch_removal_enabled", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "log_compaction_use_sliding_window": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Use sliding window compaction.", + "is_deprecated": false, + "is_enterprise": false, + "name": "log_compaction_use_sliding_window", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "log_compression_type": { + "c_type": "model::compression", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "producer", + "defined_in": "src/v/config/configuration.cc", + "description": "IMPORTANT: This property is ignored regardless of the value specified. The behavior is always the same as the `producer` value. Redpanda brokers do not compress or recompress data based on this property. If producers send compressed data, Redpanda stores it as-is; if producers send uncompressed data, Redpanda stores it uncompressed. Other listed values are accepted for Apache Kafka compatibility but are ignored by the broker. This property may appear in Admin API and `rpk topic describe` outputs for compatibility.\n\nDefault for the Kafka-compatible compression.type property. Redpanda does not recompress data.\n\nThe topic property xref:./topic-properties.adoc#compressiontype[`compression.type`] overrides the value of `log_compression_type` at the topic level.", + "enum": [ + "none", + "gzip", + "snappy", + "lz4", + "zstd", + "count", + "producer" + ], + "example": "`snappy`", + "is_deprecated": false, + "is_enterprise": false, + "name": "log_compression_type", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#compressiontype[`compression.type`]" + ], + "type": "string", + "visibility": "user" + }, + "log_disable_housekeeping_for_tests": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disables the housekeeping loop for local storage. This property is used to simplify testing, and should not be set in production.", + "is_deprecated": false, + "is_enterprise": false, + "name": "log_disable_housekeeping_for_tests", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "log_message_timestamp_after_max_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3600000, + "default_human_readable": "1 hour", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum allowed time difference when a record's timestamp is in the future compared to the broker's clock. For topics using <> timestamps, Redpanda rejects records with timestamps that are too far in the future. This property has no effect on topics using <> timestamps. The topic property xref:./topic-properties.adoc#messagetimestampaftermaxms[`message.timestamp.after.max.ms`] overrides this cluster-level setting.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_message_timestamp_after_max_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#messagetimestampaftermaxms[`message.timestamp.after.max.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "log_message_timestamp_before_max_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 9223372036854, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum allowed time difference when a record's timestamp is in the past compared to the broker's clock. For topics using <> timestamps, Redpanda rejects records with timestamps that are too far in the past. This property has no effect on topics using <> timestamps. The topic property xref:./topic-properties.adoc#messagetimestampbeforemaxms[`message.timestamp.before.max.ms`] overrides this cluster-level setting.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_message_timestamp_before_max_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#messagetimestampbeforemaxms[`message.timestamp.before.max.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "log_message_timestamp_type": { + "c_type": "model::timestamp_type", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "CreateTime", + "defined_in": "src/v/config/configuration.cc", + "description": "Default timestamp type for topic messages (CreateTime or LogAppendTime).\n\nThe topic property xref:./topic-properties.adoc#messagetimestamptype[`message.timestamp.type`] overrides the value of `log_message_timestamp_type` at the topic level.", + "enum": [ + "CreateTime", + "LogAppendTime" + ], + "example": "`LogAppendTime`", + "is_deprecated": false, + "is_enterprise": false, + "name": "log_message_timestamp_type", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#messagetimestamptype[`message.timestamp.type`]" + ], + "type": "string", + "visibility": "user" + }, + "log_retention_ms": { + "aliases": [ + "delete_retention_ms" + ], + "c_type": "retention_duration_property", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 604800000, + "default_human_readable": "1 week", + "defined_in": "src/v/config/configuration.cc", + "description": "The amount of time to keep a log file before deleting it (in milliseconds). If set to `-1`, no time limit is applied. This is a cluster-wide default when a topic does not set or disable xref:./topic-properties.adoc#retentionms[`retention.ms`].", + "is_deprecated": false, + "is_enterprise": false, + "name": "log_retention_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#retentionms[`retention.ms`]" + ], + "type": "retention_duration_property", + "visibility": "user" + }, + "log_segment_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "2 weeks", + "defined_in": "src/v/config/configuration.cc", + "description": "Default lifetime of log segments. If `null`, the property is disabled, and no default lifetime is set. Any value under 60 seconds (60000 ms) is rejected. This property can also be set in the Kafka API using the Kafka-compatible alias, `log.roll.ms`.", + "example": "`3600000`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_segment_ms", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "self-managed-only:xref:reference:properties/topic-properties.adoc#segmentms[`segment.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "log_segment_ms_max": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 31536000000, + "default_human_readable": "1 year", + "defined_in": "src/v/config/configuration.cc", + "description": "Upper bound on topic `segment.ms`: higher values will be clamped to this value.\n\n*Unit*: milliseconds", + "example": "`31536000000`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_segment_ms_max", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_ms_min": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 600000, + "default_human_readable": "10 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "Lower bound on topic `segment.ms`: lower values will be clamped to this value.\n\n*Unit*: milliseconds", + "example": "`600000`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "log_segment_ms_min", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_size": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 134217728, + "defined_in": "src/v/config/configuration.cc", + "description": "Default log segment size in bytes for topics which do not set `segment.bytes`.", + "example": "`2147483648`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "log_segment_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_size_jitter_percent": { + "c_type": "uint16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5, + "defined_in": "src/v/config/configuration.cc", + "description": "Random variation to the segment size limit used for each partition.", + "example": "`2`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 65535, + "minimum": 0, + "name": "log_segment_size_jitter_percent", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_size_max": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Upper bound on topic `segment.bytes`: higher values will be clamped to this limit.", + "example": "`268435456`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "log_segment_size_max", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "log_segment_size_min": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1048576, + "defined_in": "src/v/config/configuration.cc", + "description": "Lower bound on topic `segment.bytes`: lower values will be clamped to this limit.", + "example": "`16777216`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "log_segment_size_min", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "lz4_decompress_reusable_buffers_disabled": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Disable reusable preallocated buffers for LZ4 decompression.", + "is_deprecated": false, + "is_enterprise": false, + "name": "lz4_decompress_reusable_buffers_disabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "max.compaction.lag.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "max_compaction_lag_ms", + "default": 9223372036854, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The maximum amount of time (in ms) that a log segment can remain unaltered before it is eligible for compaction in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`] for the topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "max.compaction.lag.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#max_compaction_lag_ms[`max_compaction_lag_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#configuration-options[Configure maximum compaction lag]" + ], + "type": "integer" + }, + "max.message.bytes": { + "acceptable_values": "bytes (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "kafka_batch_max_bytes", + "default": 1048576, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The maximum size of a message or batch of a topic. If a compression type is enabled, `max.message.bytes` sets the maximum size of the compressed message or batch.\n\nIf `max.message.bytes` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`] for the topic.\n\nSet an upper limit for `max.message.bytes` using the cluster property config_ref:kafka_max_message_size_upper_limit_bytes,true,properties/cluster-properties[`kafka_max_message_size_upper_limit_bytes`].", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "max.message.bytes", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#kafka_batch_max_bytes[`kafka_batch_max_bytes`]", + "xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching]", + "xref:reference:properties/cluster-properties.adoc#kafka_max_message_size_upper_limit_bytes[`kafka_max_message_size_upper_limit_bytes`]" + ], + "type": "integer" + }, + "max_compacted_log_segment_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 536870912, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum compacted segment size after consolidation.", + "example": "`10737418240`", + "is_deprecated": false, + "is_enterprise": false, + "name": "max_compacted_log_segment_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_compaction_lag_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 9223372036854, + "defined_in": "src/v/config/configuration.cc", + "description": "For a compacted topic, the maximum time a message remains ineligible for compaction. The topic property `max.compaction.lag.ms` overrides this property.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "max_compaction_lag_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#max.compaction.lag.ms[`max.compaction.lag.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "max_concurrent_producer_ids": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "Maximum value", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of active producer sessions per shard. Each shard tracks producer IDs using an LRU (Least Recently Used) eviction policy. When the configured limit is exceeded, the least recently used producer IDs are evicted from the cache.\n\nIf you upgrade from 23.2.x to 23.3.x and encounter `OUT_OF_SEQUENCE` errors, consider increasing this value. In 23.3.x, the configuration changed from a per-partition basis to a per-shard basis.\n\nIMPORTANT: The default value is unlimited, which can lead to unbounded memory growth and out-of-memory (OOM) crashes in production environments with heavy producer usage, especially when using transactions or idempotent producers. Set a reasonable limit in production deployments.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "max_concurrent_producer_ids", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:develop:transactions.adoc#tune-producer-id-limits[Tune producer ID limits]", + "xref:reference:properties/cluster-properties.adoc#transactional_id_expiration_ms[transactional_id_expiration_ms]", + "xref:manage:monitoring.adoc[Monitor Redpanda]" + ], + "type": "integer", + "visibility": "tunable" + }, + "max_in_flight_pandaproxy_requests_per_shard": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 500, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of in-flight HTTP requests to HTTP Proxy permitted per shard. Any additional requests above this limit will be rejected with a 429 error.", + "is_deprecated": false, + "is_enterprise": false, + "name": "max_in_flight_pandaproxy_requests_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_in_flight_schema_registry_requests_per_shard": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 500, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of in-flight HTTP requests to Schema Registry permitted per shard. Any additional requests above this limit will be rejected with a 429 error.", + "is_deprecated": false, + "is_enterprise": false, + "name": "max_in_flight_schema_registry_requests_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_kafka_throttle_delay_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Fail-safe maximum throttle delay on Kafka requests.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "max_kafka_throttle_delay_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "max_transactions_per_coordinator": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "Maximum value", + "defined_in": "src/v/config/configuration.cc", + "description": "Specifies the maximum number of active transaction sessions per coordinator. When the threshold is passed Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, it leads to its batches being rejected with invalid producer epoch or invalid_producer_id_mapping error (depends on the transaction execution phase).\n\nFor details, see xref:develop:transactions#transaction-usage-tips[Transaction usage tips].", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "max_transactions_per_coordinator", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:develop:transactions#transaction-usage-tips[Transaction usage tips]" + ], + "type": "integer", + "visibility": "tunable" + }, + "members_backend_retry_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "default_human_readable": "5 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Time between members backend reconciliation loop retries.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "members_backend_retry_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "memory_abort_on_alloc_failure": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "If `true`, the Redpanda process will terminate immediately when an allocation cannot be satisfied due to memory exhaustion. If false, an exception is thrown.", + "is_deprecated": false, + "is_enterprise": false, + "name": "memory_abort_on_alloc_failure", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "memory_allocation_warning_threshold": { + "c_type": "size_t", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 131072, + "defined_in": "src/v/config/node_config.cc", + "description": "Threshold for log messages that contain a larger memory allocation than specified.", + "is_deprecated": false, + "is_enterprise": false, + "name": "memory_allocation_warning_threshold", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "message.timestamp.after.max.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "other", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "log_message_timestamp_after_max_ms", + "default": 3600000, + "default_human_readable": "1 hour", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The maximum allowable timestamp difference between the broker's timestamp and a record's timestamp. For topics with `message.timestamp.type` set to `CreateTime`, Redpanda rejects records that have timestamps later than the broker timestamp and exceed this difference. Redpanda ignores this property for topics with `message.timestamp.type` set to `AppendTime`.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "message.timestamp.after.max.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#log_message_timestamp_after_max_ms[`log_message_timestamp_after_max_ms`]" + ], + "type": "integer" + }, + "message.timestamp.before.max.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "other", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "log_message_timestamp_before_max_ms", + "default": 9223372036854, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The maximum allowable timestamp difference between the broker's timestamp and a record's timestamp. For topics with `message.timestamp.type` set to `CreateTime`, Redpanda rejects records that have timestamps earlier than the broker timestamp and exceed this difference. Redpanda ignores this property for topics with `message.timestamp.type` set to `AppendTime`.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "message.timestamp.before.max.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#log_message_timestamp_before_max_ms[`log_message_timestamp_before_max_ms`]" + ], + "type": "integer" + }, + "message.timestamp.type": { + "acceptable_values": "[`CreateTime`, `LogAppendTime`]", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "log_message_timestamp_type", + "default": "CreateTime", + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The source of a message's timestamp: either the message's creation time or its log append time.\n\nWhen `message.timestamp.type` is set, it overrides the cluster property xref:./cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`] for the topic.", + "enum": [ + "CreateTime", + "LogAppendTime" + ], + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "message.timestamp.type", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#log_message_timestamp_type[`log_message_timestamp_type`]" + ], + "type": "string" + }, + "metadata_dissemination_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3000, + "default_human_readable": "3 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval for metadata dissemination batching.", + "example": "`5000`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metadata_dissemination_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metadata_dissemination_retries": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of attempts to look up a topic's metadata-like shard before a request fails. This configuration controls the number of retries that request handlers perform when internal topic metadata (for topics like tx, consumer offsets, etc) is missing. These topics are usually created on demand when users try to use the cluster for the first time and it may take some time for the creation to happen and the metadata to propagate to all the brokers (particularly the broker handling the request). In the meantime Redpanda waits and retries. This configuration controls the number retries.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "metadata_dissemination_retries", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metadata_dissemination_retry_delay_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 500, + "default_human_readable": "500 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Delay before retrying a topic lookup in a shard or other meta tables.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metadata_dissemination_retry_delay_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metrics_reporter_report_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 86400000, + "default_human_readable": "1 day", + "defined_in": "src/v/config/configuration.cc", + "description": "Cluster metrics reporter report interval.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metrics_reporter_report_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metrics_reporter_tick_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 60000, + "default_human_readable": "1 minute", + "defined_in": "src/v/config/configuration.cc", + "description": "Cluster metrics reporter tick interval.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "metrics_reporter_tick_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "metrics_reporter_url": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "https://m.rp.vectorized.io/v2", + "defined_in": "src/v/config/configuration.cc", + "description": "URL of the cluster metrics reporter.", + "is_deprecated": false, + "is_enterprise": false, + "name": "metrics_reporter_url", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "min.cleanable.dirty.ratio": { + "acceptable_values": "[`0`, `1.0`]", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "min_cleanable_dirty_ratio", + "default": 0.2, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The minimum ratio between dirty and total bytes in closed segments before a partition's log is eligible for compaction in a compact topic.\n\nThis property supports three states:\n\n* Positive value: Sets the minimum dirty ratio (0.0 to 1.0) required before compaction.\n* 0: Compaction is always eligible regardless of dirty ratio.\n* Negative value: This property is not considered when deciding if a log is eligible for compaction.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "min.cleanable.dirty.ratio", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#min_cleanable_dirty_ratio[`min_cleanable_dirty_ratio`]" + ], + "type": "number" + }, + "min.compaction.lag.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "min_compaction_lag_ms", + "default": 0, + "default_human_readable": "0 milliseconds", + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic. Overrides the cluster property xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`] for the topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "min.compaction.lag.ms", + "needs_restart": false, + "related_topics": [ + "xref:cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`]", + "xref:reference:properties/cluster-properties.adoc#min_compaction_lag_ms[`min_compaction_lag_ms`]", + "xref:manage:cluster-maintenance/compaction-settings.adoc#configure-min-compaction-lag[Configure minimum compaction lag]" + ], + "type": "integer" + }, + "min_cleanable_dirty_ratio": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.2, + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum ratio between the number of bytes in dirty segments and the total number of bytes in closed segments that must be reached before a partition's log is eligible for compaction in a compact topic. The topic property `min.cleanable.dirty.ratio` overrides this value at the topic level.", + "example": "`0.2`", + "is_deprecated": false, + "is_enterprise": false, + "name": "min_cleanable_dirty_ratio", + "needs_restart": false, + "nullable": true, + "type": "number", + "visibility": "user" + }, + "min_compaction_lag_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0, + "default_human_readable": "0 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum amount of time (in ms) that a log segment must remain unaltered before it can be compacted in a compact topic.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "min_compaction_lag_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#min.compaction.lag.ms[`min.compaction.lag.ms`]" + ], + "type": "integer", + "visibility": "user" + }, + "minimum_topic_replication": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1, + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum allowable replication factor for topics in this cluster. The set value must be positive, odd, and equal to or less than the number of available brokers. Changing this parameter only restricts newly-created topics. Redpanda returns an `INVALID_REPLICATION_FACTOR` error on any attempt to create a topic with a replication factor less than this property. If you change the `minimum_topic_replications` setting, the replication factor of existing topics remains unchanged. However, Redpanda will log a warning on start-up with a list of any topics that have fewer replicas than this minimum. For example, you might see a message such as `Topic X has a replication factor less than specified minimum: 1 < 3`.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "minimum_topic_replication", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "mode_mutability": { + "c_type": "bool", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": true, + "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", + "description": "Enable modifications to the read-only `mode` of the Schema Registry. When set to `true`, the entire Schema Registry or its subjects can be switched to `READONLY` or `READWRITE`. This property is useful for preventing unwanted changes to the entire Schema Registry or specific subjects.", + "is_deprecated": false, + "is_enterprise": false, + "name": "mode_mutability", + "needs_restart": true, + "nullable": false, + "type": "boolean" + }, + "nested_group_behavior": { + "c_type": "security::oidc::nested_group_behavior", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "none", + "defined_in": "src/v/config/configuration.cc", + "description": "Behavior for handling nested groups when extracting groups from authentication tokens. Two options are available - none and suffix. With none, the group is left alone (e.g. '/group/child/grandchild'). Suffix will extract the final component from the nested group (e.g. '/group' -> 'group' and '/group/child/grandchild' -> 'grandchild').", + "is_deprecated": false, + "is_enterprise": false, + "name": "nested_group_behavior", + "needs_restart": false, + "nullable": false, + "type": "string", + "version": "v26.1.1-rc2", + "visibility": "user" + }, + "node_id": { + "c_type": "model::node_id", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "A number that uniquely identifies the broker within the cluster. If `null` (the default value), Redpanda automatically assigns an ID. If set, it must be non-negative value.\n\n.Do not set `node_id` manually.\n[WARNING]\n====\nRedpanda assigns unique IDs automatically to prevent issues such as:\n\n- Brokers with empty disks rejoining the cluster.\n- Conflicts during recovery or scaling.\n\nManually setting or reusing `node_id` values, even for decommissioned brokers, can cause cluster inconsistencies and operational failures.\n====\n\nBroker IDs are immutable. After a broker joins the cluster, its `node_id` *cannot* be changed.", + "is_deprecated": false, + "is_enterprise": false, + "name": "node_id", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "node_id_overrides": { + "c_type": "config::node_id_override", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/config/node_config.cc", + "description": "List of node ID and UUID overrides applied at broker startup. Each entry includes the current UUID, the desired new ID and UUID, and an ignore flag. An entry applies only if `current_uuid` matches the broker's actual UUID.\n\nRemove this property after the cluster restarts successfully and operates normally. This prevents reapplication and maintains consistent configuration across brokers.", + "example": "[,yaml]\n----\nredpanda:\n node_id_overrides:\n - current_uuid: \"\"\n new_id: \n new_uuid: \"\"\n ignore_existing_node_id: \n - current_uuid: \"\"\n new_id: \n new_uuid: \"\"\n ignore_existing_node_id: \n----\n\nReplace the following placeholders with your values:\n\n* ``: The current UUID of the broker to override\n* ``: The new broker ID to assign\n* ``: The new UUID to assign to the broker\n* ``: Set to `true` to force override on brokers that already have a node ID, or `false` to apply override only to brokers without existing node IDs\n* ``: Additional broker UUID for multiple overrides\n* ``: Additional new broker ID\n* ``: Additional new UUID\n* ``: Additional ignore existing node ID flag", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "node_id_overrides", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "node_isolation_heartbeat_timeout": { + "c_type": "int64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3000, + "defined_in": "src/v/config/configuration.cc", + "description": "How long after the last heartbeat request a node will wait before considering itself to be isolated.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 9223372036854775807, + "minimum": -9223372036854775808, + "name": "node_isolation_heartbeat_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "node_status_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Time interval between two node status messages. Node status messages establish liveness status outside of the Raft protocol.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "node_status_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "node_status_reconnect_max_backoff_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 15000, + "default_human_readable": "15 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum backoff (in milliseconds) to reconnect to an unresponsive peer during node status liveness checks.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "node_status_reconnect_max_backoff_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "oidc_clock_skew_tolerance": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "", + "defined_in": "src/v/config/configuration.cc", + "description": "The amount of time (in seconds) to allow for when validating the expiry claim in the token.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "oidc_clock_skew_tolerance", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "oidc_discovery_url": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "https://auth.prd.cloud.redpanda.com/.well-known/openid-configuration", + "defined_in": "src/v/config/configuration.cc", + "description": "The URL pointing to the well-known discovery endpoint for the OIDC provider.", + "is_deprecated": false, + "is_enterprise": false, + "name": "oidc_discovery_url", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "oidc_group_claim_path": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "$.groups", + "defined_in": "src/v/config/configuration.cc", + "description": "JSON path to extract groups from the JWT payload.", + "is_deprecated": false, + "is_enterprise": false, + "name": "oidc_group_claim_path", + "needs_restart": false, + "nullable": false, + "type": "string", + "version": "v26.1.1-rc2", + "visibility": "user" + }, + "oidc_keys_refresh_interval": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3600, + "default_human_readable": "1 hour", + "defined_in": "src/v/config/configuration.cc", + "description": "The frequency of refreshing the JSON Web Keys (JWKS) used to validate access tokens.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "oidc_keys_refresh_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "oidc_principal_mapping": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "$.sub", + "defined_in": "src/v/config/configuration.cc", + "description": "Rule for mapping JWT payload claim to a Redpanda user principal.", + "is_deprecated": false, + "is_enterprise": false, + "name": "oidc_principal_mapping", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:security/authentication.adoc#oidc[OpenID Connect authentication]", + "self-managed-only: xref:manage:kubernetes/security/authentication/k-authentication.adoc[OpenID Connect authentication in Kubernetes]" + ], + "type": "string", + "visibility": "user" + }, + "oidc_token_audience": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "redpanda", + "defined_in": "src/v/config/configuration.cc", + "description": "A string representing the intended recipient of the token.", + "is_deprecated": false, + "is_enterprise": false, + "name": "oidc_token_audience", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "openssl_config_file": { + "c_type": "std::filesystem::path", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the configuration file used by OpenSSL to properly load the FIPS-compliant module.", + "is_deprecated": false, + "is_enterprise": false, + "name": "openssl_config_file", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "openssl_module_directory": { + "c_type": "std::filesystem::path", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the directory that contains the OpenSSL FIPS-compliant module. The filename that Redpanda looks for is `fips.so`.", + "is_deprecated": false, + "is_enterprise": false, + "name": "openssl_module_directory", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "user" + }, + "pandaproxy_api": { + "c_type": "config::rest_authn_endpoint", + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [ + "0.0.0.0:8082", + null + ], + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "Rest API listener address and port.", + "example": "[,yaml]\n----\npandaproxy:\n pandaproxy_api:\n address: 0.0.0.0\n port: 8082\n authentication_method: http_basic\n----", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "pandaproxy_api", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "pandaproxy_api_tls": { + "c_type": "config::endpoint_tls_config", + "category": "pandaproxy", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/pandaproxy/rest/configuration.cc", + "description": "TLS configuration for Pandaproxy API.", + "example": "[,yaml]\n----\npandaproxy_client:\n brokers:\n - address: \n port: \n - address: \n port: \n sasl_mechanism: \n scram_username: \n scram_password: \n produce_ack_level: -1\n retries: 5\n----", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "pandaproxy_api_tls", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "partition_autobalancing_concurrent_moves": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 50, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of partitions that can be reassigned at once.", + "is_deprecated": false, + "is_enterprise": false, + "name": "partition_autobalancing_concurrent_moves", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "partition_autobalancing_max_disk_usage_percent": { + "c_type": "unsigned", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 80, + "defined_in": "src/v/config/configuration.cc", + "description": "NOTE: This property applies only when <> is set to `continuous`.\n\nWhen the disk usage of a node exceeds this threshold, it triggers Redpanda to move partitions off of the node.\n\n*Unit*: percent of disk used", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "partition_autobalancing_max_disk_usage_percent", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" + ], + "type": "integer", + "visibility": "user" + }, + "partition_autobalancing_min_size_threshold": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum size of partition that is going to be prioritized when rebalancing a cluster due to the disk size threshold being breached. This value is calculated automatically by default.", + "is_deprecated": false, + "is_enterprise": false, + "name": "partition_autobalancing_min_size_threshold", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "partition_autobalancing_mode": { + "c_type": "model::partition_autobalancing_mode", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "continuous", + "defined_in": "src/v/config/configuration.cc", + "description": "Controls when and how Redpanda automatically rebalances partition replicas across brokers. For more information, see xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing].\n\nValues:\n\n* `continuous`: Partition balancing happens automatically to maintain optimal performance and availability, based on continuous monitoring for node changes (same as `node_add`) and also high disk usage. This option requires an enterprise license, and it is customized by xref:reference:properties/cluster-properties.adoc#partition_autobalancing_node_availability_timeout_sec[`partition_autobalancing_node_availability_timeout_sec`] and xref:reference:properties/cluster-properties.adoc#partition_autobalancing_max_disk_usage_percent[`partition_autobalancing_max_disk_usage_percent`] properties.\n* `node_add`: Partition balancing happens when a node is added.\n* `off`: Partition balancing is disabled. This option is not recommended for production clusters.", + "enterprise_constructor": "restricted_with_sanctioned", + "enterprise_restricted_value": [ + "continuous" + ], + "enterprise_sanctioned_value": [ + "node_add" + ], + "enterprise_value": [ + "continuous" + ], + "enum": [ + "off", + "node_add", + "continuous" + ], + "example": "`node_add`", + "is_deprecated": false, + "is_enterprise": true, + "name": "partition_autobalancing_mode", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/cluster-balancing.adoc[partition balancing]", + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]", + "xref:get-started:licensing/index.adoc[enterprise license]" + ], + "type": "string", + "visibility": "user" + }, + "partition_autobalancing_movement_batch_size_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5368709120, + "defined_in": "src/v/config/configuration.cc", + "description": "Total size of partitions that autobalancer is going to move in one batch (deprecated, use partition_autobalancing_concurrent_moves to limit the autobalancer concurrency)", + "is_deprecated": true, + "is_enterprise": false, + "name": "partition_autobalancing_movement_batch_size_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "deprecated" + }, + "partition_autobalancing_node_autodecommission_timeout_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "When a node is unavailable for at least this timeout duration, it triggers Redpanda to decommission the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "partition_autobalancing_node_autodecommission_timeout_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "version": "v26.1.1-rc2", + "visibility": "user" + }, + "partition_autobalancing_node_availability_timeout_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 900, + "default_human_readable": "15 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "When a node is unavailable for at least this timeout duration, it triggers Redpanda to move partitions off of the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "partition_autobalancing_node_availability_timeout_sec", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/continuous-data-balancing.adoc[Configure Continuous Data Balancing]" + ], + "type": "integer", + "visibility": "user" + }, + "partition_autobalancing_tick_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Partition autobalancer tick interval.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "partition_autobalancing_tick_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "partition_autobalancing_tick_moves_drop_threshold": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0.2, + "defined_in": "src/v/config/configuration.cc", + "description": "If the number of scheduled tick moves drops by this ratio, a new tick is scheduled immediately. Valid values are (0, 1]. For example, with a value of 0.2 and 100 scheduled moves in a tick, a new tick is scheduled when the in-progress moves are fewer than 80.", + "is_deprecated": false, + "is_enterprise": false, + "name": "partition_autobalancing_tick_moves_drop_threshold", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "partition_autobalancing_topic_aware": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "If `true`, Redpanda prioritizes balancing a topic\u2019s partition replica count evenly across all brokers while it\u2019s balancing the cluster\u2019s overall partition count. Because different topics in a cluster can have vastly different load profiles, this better distributes the workload of the most heavily-used topics evenly across brokers.", + "is_deprecated": false, + "is_enterprise": false, + "name": "partition_autobalancing_topic_aware", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "partition_manager_shutdown_watchdog_timeout": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "A threshold value to detect partitions which might have been stuck while shutting down. After this threshold, a watchdog in partition manager will log information about partition shutdown not making progress.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "partition_manager_shutdown_watchdog_timeout", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "pp_sr_smp_max_non_local_requests": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Cross-core(Inter-shard communication) requests pending in HTTP Proxy and Schema Registry seastar::smp group. (For more details, see the `seastar::smp_service_group` documentation).\n\nSee https://docs.seastar.io/master/[Seastar documentation^]", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "pp_sr_smp_max_non_local_requests", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "produce_ack_level": { + "c_type": "int16_t", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": -1, + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Number of acknowledgments the producer requires the leader to have received before considering a request complete.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "produce_ack_level", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_batch_delay": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Delay (in milliseconds) to wait before sending batch.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "produce_batch_delay", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_batch_delay_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Delay (in milliseconds) to wait before sending batch.", + "is_deprecated": false, + "is_topic_property": false, + "name": "produce_batch_delay_ms", + "type": "string", + "visibility": "user" + }, + "produce_batch_record_count": { + "c_type": "int32_t", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 1000, + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Number of records to batch before sending to broker.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "produce_batch_record_count", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_batch_size_bytes": { + "c_type": "int32_t", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 1048576, + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Number of bytes to batch before sending to broker.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "produce_batch_size_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_compression_type": { + "c_type": "ss::sstring", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "none", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Enable or disable compression by the Kafka client. Specify `none` to disable compression or one of the supported types [gzip, snappy, lz4, zstd].", + "is_deprecated": false, + "is_enterprise": false, + "name": "produce_compression_type", + "needs_restart": true, + "nullable": false, + "type": "string" + }, + "produce_shutdown_delay": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 0, + "default_human_readable": "0 milliseconds", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Delay (in milliseconds) to allow for final flush of buffers before shutting down.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "produce_shutdown_delay", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "produce_shutdown_delay_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Delay (in milliseconds) to allow for final flush of buffers before shutting down.", + "is_deprecated": false, + "is_topic_property": false, + "name": "produce_shutdown_delay_ms", + "type": "string", + "visibility": "user" + }, + "quota_manager_gc_sec": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "30000 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Quota manager GC frequency in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "quota_manager_gc_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rack": { + "c_type": "model::rack_id", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "A label that identifies a failure zone. Apply the same label to all brokers in the same failure zone. When xref:./cluster-properties.adoc#enable_rack_awareness[enable_rack_awareness] is set to `true` at the cluster level, the system uses the rack labels to spread partition replicas across different failure zones.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rack", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#enable_rack_awareness[`enable_rack_awareness`]" + ], + "type": "string", + "visibility": "user" + }, + "raft_election_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1500, + "default_human_readable": "1500 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Election timeout expressed in milliseconds.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_election_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_enable_longest_log_detection": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables an additional step in leader election where a candidate is allowed to wait for all the replies from the broker it requested votes from. This may introduce a small delay when recovering from failure, but it prevents truncation if any of the replicas have more data than the majority.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_enable_longest_log_detection", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "raft_enable_lw_heartbeat": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables Raft optimization of heartbeats.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_enable_lw_heartbeat", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "raft_flush_timer_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Interval of checking partition against the `raft_replica_max_pending_flush_bytes`, deprecated started 24.1, use raft_replica_max_flush_delay_ms instead ", + "is_deprecated": true, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_flush_timer_interval_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "deprecated" + }, + "raft_heartbeat_disconnect_failures": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3, + "defined_in": "src/v/config/configuration.cc", + "description": "The number of failed heartbeats after which an unresponsive TCP connection is forcibly closed. To disable forced disconnection, set to 0.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_heartbeat_disconnect_failures", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_heartbeat_interval_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "150 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Number of milliseconds for Raft leader heartbeats.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_heartbeat_interval_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_heartbeat_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3000, + "default_human_readable": "3 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Raft heartbeat RPC (remote procedure call) timeout. Raft uses a heartbeat mechanism to maintain leadership authority and to trigger leader elections. The `raft_heartbeat_interval_ms` is a periodic heartbeat sent by the partition leader to all followers to declare its leadership. If a follower does not receive a heartbeat within the `raft_heartbeat_timeout_ms`, then it triggers an election to choose a new partition leader.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_heartbeat_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_io_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Raft I/O timeout.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_io_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_learner_recovery_rate": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 104857600, + "defined_in": "src/v/config/configuration.cc", + "description": "Raft learner recovery rate limit. Throttles the rate of data communicated to nodes (learners) that need to catch up to leaders. This rate limit is placed on a node sending data to a recovering node. Each sending node is limited to this rate. The recovering node accepts data as fast as possible according to the combined limits of all healthy nodes in the cluster. For example, if two nodes are sending data to the recovering node, and `raft_learner_recovery_rate` is 100 MB/sec, then the recovering node will recover at a rate of 200 MB/sec.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_learner_recovery_rate", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_max_buffered_follower_append_entries_bytes_per_shard": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0, + "defined_in": "src/v/config/configuration.cc", + "description": "The total size of append entry requests that may be cached per shard, using the Raft-buffered protocol. When an entry is cached, the leader can continue serving requests because the ordering of the cached requests cannot change. When the total size of cached requests reaches the set limit, back pressure is applied to throttle producers.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_max_buffered_follower_append_entries_bytes_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_max_inflight_follower_append_entries_requests_per_shard": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1024, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of append entry requests that may be sent from Raft groups on a Seastar shard to the current node, and are awaiting a reply. This property replaces `raft_max_concurrent_append_requests_per_follower`.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_max_inflight_follower_append_entries_requests_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_max_recovery_memory": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum memory that can be used for reads in Raft recovery process by default 15% of total memory.", + "example": "`41943040`", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_max_recovery_memory", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "raft_recovery_concurrency_per_shard": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 64, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of partitions that may simultaneously recover data to a particular shard. This number is limited to avoid overwhelming nodes when they come back online after an outage.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_recovery_concurrency_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_recovery_throttle_disable_dynamic_mode": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables cross shard sharing used to throttle recovery traffic. Should only be used to debug unexpected problems.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_recovery_throttle_disable_dynamic_mode", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "raft_replica_max_flush_delay_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum delay between two subsequent flushes. After this delay, the log is automatically force flushed.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_replica_max_flush_delay_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_replica_max_pending_flush_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 262144, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes that are not flushed per partition. If the configured threshold is reached, the log is automatically flushed even if it has not been explicitly requested.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_replica_max_pending_flush_bytes", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "raft_replicate_batch_window_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1048576, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum size of requests cached for replication.", + "is_deprecated": false, + "is_enterprise": false, + "name": "raft_replicate_batch_window_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_smp_max_non_local_requests": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of Cross-core(Inter-shard communication) requests pending in Raft seastar::smp group. For details, refer to the `seastar::smp_service_group` documentation).\n\nSee https://docs.seastar.io/master/[Seastar documentation^]", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "raft_smp_max_non_local_requests", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "raft_timeout_now_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "default_human_readable": "1 second", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for Raft's timeout_now RPC. This RPC is used to force a follower to dispatch a round of votes immediately.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_timeout_now_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "raft_transfer_leader_recovery_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Follower recovery timeout waiting period when transferring leadership.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "raft_transfer_leader_recovery_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "readers_cache_eviction_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Duration after which inactive readers are evicted from cache.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "readers_cache_eviction_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "readers_cache_target_max_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 200, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum desired number of readers cached per NTP. This a soft limit, meaning that a number of readers in cache may temporarily increase as cleanup is performed in the background.", + "is_deprecated": false, + "is_enterprise": false, + "name": "readers_cache_target_max_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_batch_cache_min_free": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 67108864, + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum amount of free memory maintained by the batch cache background reclaimer.", + "is_deprecated": false, + "is_enterprise": false, + "name": "reclaim_batch_cache_min_free", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_growth_window": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3000, + "default_human_readable": "3 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Starting from the last point in time when memory was reclaimed from the batch cache, this is the duration during which the amount of memory to reclaim grows at a significant rate, based on heuristics about the amount of available memory.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "reclaim_growth_window", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_max_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 4194304, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum batch cache reclaim size.", + "is_deprecated": false, + "is_enterprise": false, + "name": "reclaim_max_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_min_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 131072, + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum batch cache reclaim size.", + "is_deprecated": false, + "is_enterprise": false, + "name": "reclaim_min_size", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "reclaim_stable_window": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10000, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "If the duration since the last time memory was reclaimed is longer than the amount of time specified in this property, the memory usage of the batch cache is considered stable, so only the minimum size (<>) is set to be reclaimed.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "reclaim_stable_window", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "recovery_mode_enabled": { + "c_type": "bool", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": false, + "defined_in": "src/v/config/node_config.cc", + "description": "If `true`, start Redpanda in xref:manage:recovery-mode.adoc[recovery mode], where user partitions are not loaded and only administrative operations are allowed.", + "is_deprecated": false, + "is_enterprise": false, + "name": "recovery_mode_enabled", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:manage:recovery-mode.adoc[recovery mode]" + ], + "type": "boolean", + "visibility": "user" + }, + "redpanda.cloud_topic.enabled": { + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "topic", + "default": null, + "defined_in": "override", + "description": "Configuration property: redpanda.cloud_topic.enabled", + "exclude_from_docs": true, + "is_deprecated": false, + "is_topic_property": true, + "name": "redpanda.cloud_topic.enabled", + "type": "string", + "visibility": "user" + }, + "redpanda.iceberg.delete": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "iceberg_delete", + "default": true, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Whether the corresponding Iceberg table is deleted upon deleting the topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.iceberg.delete", + "needs_restart": false, + "type": "boolean" + }, + "redpanda.iceberg.invalid.record.action": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "iceberg_invalid_record_action", + "default": "dlq_table", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Whether to write invalid records to a dead-letter queue (DLQ).", + "enum": [ + "drop", + "dlq_table" + ], + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.iceberg.invalid.record.action", + "needs_restart": false, + "related_topics": [ + "xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]" + ], + "type": "string" + }, + "redpanda.iceberg.mode": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Enable the Iceberg integration for the topic. You can choose one of four modes.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.iceberg.mode", + "needs_restart": false, + "related_topics": [ + "xref:manage:iceberg/choose-iceberg-mode.adoc#override-value-schema-latest-default[Choose an Iceberg Mode]" + ], + "type": "string" + }, + "redpanda.iceberg.partition.spec": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "iceberg_default_partition_spec", + "default": "(hour(redpanda.timestamp))", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The link:https://iceberg.apache.org/docs/nightly/partitioning/[partitioning^] specification for the Iceberg table.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.iceberg.partition.spec", + "needs_restart": false, + "related_topics": [ + "xref:manage:iceberg/about-iceberg-topics.adoc#use-custom-partitioning[Use custom partitioning]" + ], + "type": "string" + }, + "redpanda.iceberg.target.lag.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "iceberg-integration", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Controls how often the data in the Iceberg table is refreshed with new data from the topic. Redpanda attempts to commit all data produced to the topic within the lag target, subject to resource availability.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.iceberg.target.lag.ms", + "needs_restart": false, + "type": "integer" + }, + "redpanda.key.schema.id.validation": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": "false", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Enable validation of the schema ID for keys on a record. When enabled, Redpanda validates that the schema ID encoded in the record's key is registered in the Schema Registry according to the configured subject name strategy.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.key.schema.id.validation", + "needs_restart": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "boolean" + }, + "redpanda.key.subject.name.strategy": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": "TopicNameStrategy", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The subject name strategy for keys when `redpanda.key.schema.id.validation` is enabled. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.key.subject.name.strategy", + "needs_restart": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "string" + }, + "redpanda.leaders.preference": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "default_leaders_preference", + "default": "none", + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The preferred location (rack) for partition leaders of a topic.\n\nThis property inherits the value from the config_ref:default_leaders_preference,true,properties/cluster-properties[] cluster configuration property. You may override the cluster-wide setting by specifying the value for individual topics.\n\nIf the cluster configuration property config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, Leader Pinning is disabled across the cluster.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.leaders.preference", + "needs_restart": false, + "related_topics": [ + "xref:develop:produce-data/leader-pinning.adoc[Leader pinning]" + ], + "type": "object" + }, + "redpanda.remote.allowgaps": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "other", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "object-storage-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "cloud_storage_enable_remote_allow_gaps", + "default": false, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "", + "exclude_from_docs": true, + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.remote.allowgaps", + "needs_restart": false, + "type": "boolean" + }, + "redpanda.remote.delete": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "A flag that enables deletion of data from object storage for Tiered Storage when it's deleted from local storage for a topic.\n\nNOTE: `redpanda.remote.delete` doesn't apply to Remote Read Replica topics: a Remote Read Replica topic isn't deleted from object storage when this flag is `true`.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.remote.delete", + "needs_restart": false, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "boolean" + }, + "redpanda.remote.read": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "object-storage-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "cloud_storage_enable_remote_read", + "default": false, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "A flag for enabling Redpanda to fetch data for a topic from object storage to local storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.remote.read", + "needs_restart": false, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "boolean" + }, + "redpanda.remote.readreplica": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "remote-read-replica", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The name of the object storage bucket for a Remote Read Replica topic.\n\nCAUTION: Setting `redpanda.remote.readreplica` together with either `redpanda.remote.read` or `redpanda.remote.write` results in an error.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.remote.readreplica", + "needs_restart": false, + "related_topics": [ + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" + ], + "type": "string" + }, + "redpanda.remote.recovery": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "A flag that enables the recovery or reproduction of a topic from object storage for Tiered Storage. The recovered data is saved in local storage, and the maximum amount of recovered data is determined by the local storage retention limits of the topic.\n\nTIP: You can only configure `redpanda.remote.recovery` when you create a topic. You cannot apply this setting to existing topics.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.remote.recovery", + "needs_restart": false, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "boolean" + }, + "redpanda.remote.write": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "object-storage-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "cloud_storage_enable_remote_write", + "default": false, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "A flag for enabling Redpanda to upload data for a topic from local storage to object storage. When set to `true` together with <>, it enables the xref:manage:tiered-storage.adoc[Tiered Storage] feature.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.remote.write", + "needs_restart": false, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "boolean" + }, + "redpanda.storage.mode": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "default_redpanda_storage_mode", + "default": "unset", + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The storage mode for a topic. Determines how topic data is stored and whether it is eligible for upload to object storage.\n\nAccepted values:\n\n* `local`: Topic data is stored only on the broker's local disk. Object storage upload is disabled for the topic, regardless of cluster-level Tiered Storage settings.\n* `tiered`: Topic data is stored on local disk and also uploaded to object storage. Enables xref:manage:tiered-storage.adoc[Tiered Storage] for the topic.\n* `cloud`: Topic data is stored in object storage using the Cloud Topics architecture. Local storage is used only as a write buffer.\n* `unset`: Uses the cluster-level config_ref:default_redpanda_storage_mode,true,properties/cluster-properties[] setting, or falls back to legacy `redpanda.remote.read` and `redpanda.remote.write` topic property behavior for backwards compatibility.\n\nThis property overrides the cluster-wide config_ref:default_redpanda_storage_mode,true,properties/cluster-properties[] setting for individual topics.", + "enum": [ + "local", + "tiered", + "cloud", + "unset" + ], + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.storage.mode", + "needs_restart": false, + "related_topics": [ + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "string" + }, + "redpanda.value.schema.id.validation": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": "false", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "Enable validation of the schema ID for values on a record. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.value.schema.id.validation", + "needs_restart": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "boolean" + }, + "redpanda.value.subject.name.strategy": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": "TopicNameStrategy", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for `redpanda.value.subject.name.strategy`. This determines how the topic and schema are mapped to a subject name in the Schema Registry.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.value.subject.name.strategy", + "needs_restart": false, + "related_topics": [ + "xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]" + ], + "type": "string" + }, + "redpanda.virtual.cluster.id": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "other", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "", + "exclude_from_docs": true, + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "redpanda.virtual.cluster.id", + "needs_restart": false, + "type": "string" + }, + "release_cache_on_segment_roll": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Flag for specifying whether or not to release cache when a full segment is rolled.", + "is_deprecated": false, + "is_enterprise": false, + "name": "release_cache_on_segment_roll", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "replicate_append_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 3000, + "default_human_readable": "3 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "Timeout for append entry requests issued while replicating entries.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "replicate_append_timeout_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "replication.factor": { + "acceptable_values": "integer (1 or greater)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "", + "config_scope": "topic", + "corresponding_cluster_property": null, + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "The number of replicas of a topic to save in different nodes (brokers) of a cluster.\n\nIf `replication.factor` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] for the topic.\n\nNOTE: Although `replication.factor` isn't returned or displayed by xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] as a valid Kafka property, you can set it using xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]. When the `replication.factor` of a topic is altered, it isn't simply a property value that's updated, but rather the actual replica sets of topic partitions that are changed.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "replication.factor", + "needs_restart": false, + "related_topics": [ + "xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`]", + "xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]", + "xref:reference:properties/cluster-properties.adoc#default_topic_replication[`default_topic_replication`]", + "xref:develop:manage-topics/config-topics.adoc#choose-the-replication-factor[Choose the replication factor]", + "xref:develop:manage-topics/config-topics.adoc#change-the-replication-factor[Change the replication factor]", + "xref:reference:properties/cluster-properties.adoc#default_topic_replication[default_topic_replication]" + ], + "type": "integer" + }, + "retention.bytes": { + "acceptable_values": "bytes (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "retention_bytes", + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "A size-based retention limit that configures the maximum size that a topic partition can grow before becoming eligible for cleanup.\n\nIf `retention.bytes` is set to a positive value, it overrides the cluster property xref:cluster-properties.adoc#retention_bytes[`retention_bytes`] for the topic, and the total retained size for the topic is `retention.bytes` multiplied by the number of partitions for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, cleanup occurs when either limit is reached.\n\nThis property supports three states:\n\n* Positive value: Sets the maximum bytes per partition. When exceeded, oldest data becomes eligible for cleanup.\n* 0: Partitions are immediately eligible for cleanup.\n* Negative value: Disables size-based retention for this topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "retention.bytes", + "needs_restart": false, + "related_topics": [ + "xref:cluster-properties.adoc#retention_bytes[`retention_bytes`]", + "xref:reference:properties/cluster-properties.adoc#retention_bytes[`retention_bytes`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "type": "integer" + }, + "retention.local.target.bytes": { + "acceptable_values": "bytes (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "retention_local_target_bytes_default", + "default": null, + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "A size-based retention limit for Tiered Storage that configures the maximum size that a topic partition in local storage can grow before becoming eligible for cleanup. It applies per partition and is equivalent to <> without Tiered Storage.\n\nThis property supports three states:\n\n* Positive value: Sets the maximum bytes per partition in local storage before cleanup.\n* 0: Data in local storage is immediately eligible for cleanup.\n* Negative value: Disables size-based local retention override for this topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "retention.local.target.bytes", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#retention_local_target_bytes[`retention_local_target_bytes`]", + "xref:manage:tiered-storage.adoc[Tiered Storage]" + ], + "type": "integer" + }, + "retention.local.target.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "tiered-storage", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "retention_local_target_ms_default", + "default": 86400000, + "default_human_readable": "1 day", + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "A time-based retention limit for Tiered Storage that sets the maximum duration that a log's segment file for a topic is retained in local storage before cleanup. It applies per partition and is equivalent to <> without Tiered Storage.\n\nThis property supports three states:\n\n* Positive value: Sets the maximum milliseconds to retain data in local storage.\n* 0: Data in local storage is immediately eligible for cleanup.\n* Negative value: Disables time-based local retention override for this topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "retention.local.target.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#retention_local_target_ms[`retention_local_target_ms`]", + "xref:manage:tiered-storage.adoc[Tiered Storage]", + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]" + ], + "type": "integer" + }, + "retention.ms": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "retention-compaction", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "log_retention_ms", + "default": 604800000, + "default_human_readable": "1 week", + "defined_in": "src/v/kafka/protocol/topic_properties.h", + "description": "A time-based retention limit that configures the maximum duration that a log's segment file for a topic is retained before it becomes eligible to be cleaned up. To consume all data, a consumer of the topic must read from a segment before its `retention.ms` elapses, otherwise the segment may be compacted and/or deleted.\n\nIf `retention.ms` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#log_retention_ms[`log_retention_ms`] for the topic.\n\nWhen both size-based (`retention.bytes`) and time-based (`retention.ms`) retention limits are set, the earliest occurring limit applies.\n\nThis property supports three states:\n\n* Positive value: Sets the maximum milliseconds to retain data. After this duration, segments become eligible for cleanup.\n* 0: Data is immediately eligible for cleanup.\n* Negative value: Disables time-based retention for this topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "retention.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#log_retention_ms[`log_retention_ms`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "type": "retention_duration_property" + }, + "retention_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Default maximum number of bytes per partition on disk before triggering deletion of the oldest messages. If `null` (the default value), no limit is applied.\n\nThe topic property xref:./topic-properties.adoc#retentionbytes[`retention.bytes`] overrides the value of `retention_bytes` at the topic level.", + "is_deprecated": false, + "is_enterprise": false, + "name": "retention_bytes", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#retentionbytes[`retention.bytes`]" + ], + "type": "integer", + "visibility": "user" + }, + "retention_local_strict": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Flag to allow Tiered Storage topics to expand to consumable retention policy limits. When this flag is enabled, non-local retention settings are used, and local retention settings are used to inform data removal policies in low-disk space scenarios.", + "is_deprecated": false, + "is_enterprise": false, + "name": "retention_local_strict", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "retention_local_strict_override": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Trim log data when a cloud topic reaches its local retention limit. When this option is disabled Redpanda will allow partitions to grow past the local retention limit, and will be trimmed automatically as storage reaches the configured target size.", + "is_deprecated": false, + "is_enterprise": false, + "name": "retention_local_strict_override", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "retention_local_target_bytes_default": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Local retention size target for partitions of topics with object storage write enabled. If `null`, the property is disabled.\n\nThis property can be overridden on a per-topic basis by setting `retention.local.target.bytes` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention].\n\nNOTE: Both `retention_local_target_bytes_default` and `retention_local_target_ms_default` can be set. The limit that is reached earlier is applied.", + "is_deprecated": false, + "is_enterprise": false, + "name": "retention_local_target_bytes_default", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "type": "integer", + "visibility": "user" + }, + "retention_local_target_capacity_bytes": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The target capacity (in bytes) that log storage will try to use before additional retention rules take over to trim data to meet the target. When no target is specified, storage usage is unbounded.\n\nNOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity.", + "example": "`2147483648000`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "retention_local_target_capacity_bytes", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "retention_local_target_capacity_percent": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 80.0, + "defined_in": "src/v/config/configuration.cc", + "description": "The target capacity in percent of unreserved space (<>) that log storage will try to use before additional retention rules will take over to trim data in order to meet the target. When no target is specified storage usage is unbounded.\n\nNOTE: Redpanda Data recommends setting only one of <> or <>. If both are set, the minimum of the two is used as the effective target capacity.", + "example": "`80.0`", + "is_deprecated": false, + "is_enterprise": false, + "name": "retention_local_target_capacity_percent", + "needs_restart": false, + "nullable": true, + "type": "number", + "visibility": "user" + }, + "retention_local_target_ms_default": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 86400000, + "default_human_readable": "1 day", + "defined_in": "src/v/config/configuration.cc", + "description": "Local retention time target for partitions of topics with object storage write enabled.\n\nThis property can be overridden on a per-topic basis by setting `retention.local.target.ms` in each topic enabled for Tiered Storage. See xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention].\n\nNOTE: Both <> and <> can be set. The limit that is reached first is applied.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "retention_local_target_ms_default", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]" + ], + "type": "integer", + "visibility": "user" + }, + "retention_local_trim_interval": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 30000, + "default_human_readable": "30 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The period during which disk usage is checked for disk pressure, and data is optionally trimmed to meet the target.", + "example": "`31536000000`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "retention_local_trim_interval", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "retention_local_trim_overage_coeff": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 2.0, + "defined_in": "src/v/config/configuration.cc", + "description": "The space management control loop reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations.", + "example": "`1.8`", + "is_deprecated": false, + "is_enterprise": false, + "name": "retention_local_trim_overage_coeff", + "needs_restart": false, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "retries": { + "c_type": "size_t", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 5, + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Number of times to retry a request to a broker.", + "is_deprecated": false, + "is_enterprise": false, + "name": "retries", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "retry_base_backoff": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": 100, + "default_human_readable": "100 milliseconds", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Delay (in milliseconds) for initial retry backoff.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "retry_base_backoff", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "retry_base_backoff_ms": { + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "override", + "description": "Delay (in milliseconds) for initial retry backoff.", + "is_deprecated": false, + "is_topic_property": false, + "name": "retry_base_backoff_ms", + "type": "string", + "visibility": "user" + }, + "rpc_client_connections_per_peer": { + "c_type": "int", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 128, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of connections a broker will open to each of its peers.", + "example": "`8`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "rpc_client_connections_per_peer", + "needs_restart": true, + "nullable": false, + "type": "integer" + }, + "rpc_server": { + "c_type": "net::unresolved_address", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": { + "address": "127.0.0.1", + "port": 33145 + }, + "defined_in": "src/v/config/node_config.cc", + "description": "IP address and port for the Remote Procedure Call (RPC) server.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rpc_server", + "needs_restart": true, + "nullable": false, + "type": "object", + "visibility": "user" + }, + "rpc_server_compress_replies": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable compression for internal RPC (remote procedure call) server replies.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rpc_server_compress_replies", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "rpc_server_listen_backlog": { + "c_type": "int", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum TCP connection queue length for Kafka server and internal RPC server. If `null` (the default value), no queue length is set.\n\n*Unit*: number of queue entries", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "rpc_server_listen_backlog", + "needs_restart": true, + "nullable": true, + "type": "integer", + "visibility": "user" + }, + "rpc_server_tcp_recv_buf": { + "c_type": "int", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Internal RPC TCP receive buffer size. If `null` (the default value), no buffer size is set by Redpanda.", + "example": "`65536`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "rpc_server_tcp_recv_buf", + "needs_restart": true, + "nullable": true, + "type": "integer" + }, + "rpc_server_tcp_send_buf": { + "c_type": "int", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "Internal RPC TCP send buffer size. If `null` (the default value), then no buffer size is set by Redpanda.", + "example": "`65536`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "rpc_server_tcp_send_buf", + "needs_restart": true, + "nullable": true, + "type": "integer" + }, + "rpc_server_tls": { + "c_type": "tls_config", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": { + "crl_file": null, + "enable_renegotiation": null, + "enabled": null, + "key_cert": null, + "min_tls_version": null, + "require_client_auth": null, + "tls_v1_2_cipher_suites": null, + "tls_v1_3_cipher_suites": null, + "truststore_file": null + }, + "defined_in": "src/v/config/node_config.cc", + "description": "TLS configuration for the RPC server.", + "example": "[,yaml]\n----\nredpanda:\n rpc_server_tls:\n enabled: true\n cert_file: \"\"\n key_file: \"\"\n truststore_file: \"\"\n require_client_auth: true\n----\n\nReplace the following placeholders with your values:\n\n* ``: Full path to the RPC TLS certificate file\n* ``: Full path to the RPC TLS private key file\n* ``: Full path to the certificate authority file", + "is_deprecated": false, + "is_enterprise": false, + "name": "rpc_server_tls", + "needs_restart": true, + "nullable": false, + "type": "object", + "visibility": "user" + }, + "rpk_path": { + "c_type": "std::filesystem::path", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "/usr/bin/rpk", + "defined_in": "src/v/config/configuration.cc", + "description": "Path to RPK binary.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rpk_path", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "tunable" + }, + "rps_limit_acls_and_users_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller ACLs and user's operations.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rps_limit_acls_and_users_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rps_limit_configuration_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller configuration operations.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rps_limit_configuration_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rps_limit_move_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller move operations.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rps_limit_move_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rps_limit_node_management_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller node management operations.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rps_limit_node_management_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "rps_limit_topic_operations": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "defined_in": "src/v/config/configuration.cc", + "description": "Rate limit for controller topic operations.", + "is_deprecated": false, + "is_enterprise": false, + "name": "rps_limit_topic_operations", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "sampled_memory_profile": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "When `true`, memory allocations are sampled and tracked. A sampled live set of allocations can then be retrieved from the Admin API. Additionally, Redpanda will periodically log the top-n allocation sites.", + "is_deprecated": false, + "is_enterprise": false, + "name": "sampled_memory_profile", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "sasl_kerberos_config": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "/etc/krb5.conf", + "defined_in": "src/v/config/configuration.cc", + "description": "The location of the Kerberos `krb5.conf` file for Redpanda.", + "is_deprecated": false, + "is_enterprise": false, + "name": "sasl_kerberos_config", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "sasl_kerberos_keytab": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "/var/lib/redpanda/redpanda.keytab", + "defined_in": "src/v/config/configuration.cc", + "description": "The location of the Kerberos keytab file for Redpanda.", + "is_deprecated": false, + "is_enterprise": false, + "name": "sasl_kerberos_keytab", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "sasl_kerberos_principal": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "redpanda", + "defined_in": "src/v/config/configuration.cc", + "description": "The primary of the Kerberos Service Principal Name (SPN) for Redpanda.", + "is_deprecated": false, + "is_enterprise": false, + "name": "sasl_kerberos_principal", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "sasl_kerberos_principal_mapping": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [ + "DEFAULT" + ], + "defined_in": "src/v/config/configuration.cc", + "description": "Rules for mapping Kerberos principal names to Redpanda user principals.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "sasl_kerberos_principal_mapping", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "sasl_mechanism": { + "c_type": "ss::sstring", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "The SASL mechanism to use when the HTTP Proxy client connects to the Kafka API. These credentials are used when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\nThis property specifies which individual SASL mechanism the HTTP Proxy client should use, while the cluster-wide available mechanisms are configured using the xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] cluster property.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]\n\nNOTE: While the cluster-wide xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`] property may support additional mechanisms (PLAIN, GSSAPI, OAUTHBEARER), HTTP Proxy client connections only support SCRAM mechanisms.", + "enum": [ + "SCRAM-SHA-256", + "SCRAM-SHA-512", + "OAUTHBEARER" + ], + "is_deprecated": false, + "is_enterprise": false, + "name": "sasl_mechanism", + "needs_restart": true, + "nullable": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#sasl_mechanisms[`sasl_mechanisms`]" + ], + "type": "string", + "x-enum-metadata": { + "OAUTHBEARER": { + "is_enterprise": true + }, + "SCRAM-SHA-256": { + "is_enterprise": false + }, + "SCRAM-SHA-512": { + "is_enterprise": false + } + } + }, + "sasl_mechanisms": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "SCRAM", + "defined_in": "src/v/config/configuration.cc", + "description": "A list of supported SASL mechanisms. Accepted values: `SCRAM`, `GSSAPI`, `OAUTHBEARER`, `PLAIN`. Note that in order to enable PLAIN, you must also enable SCRAM.", + "enterprise_constructor": "simple", + "is_deprecated": false, + "is_enterprise": true, + "items": { + "enum": [ + "GSSAPI", + "SCRAM", + "OAUTHBEARER", + "PLAIN" + ], + "type": "string", + "x-enum-metadata": { + "GSSAPI": { + "is_enterprise": true + }, + "OAUTHBEARER": { + "is_enterprise": true + }, + "PLAIN": { + "is_enterprise": false + }, + "SCRAM": { + "is_enterprise": false + } + } + }, + "name": "sasl_mechanisms", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "array", + "visibility": "user" + }, + "sasl_mechanisms_overrides": { + "c_type": "config::sasl_mechanisms_override", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "Configure different SASL authentication mechanisms for specific listeners. This overrides the cluster-wide <> setting for the specified listener. Use this when you need different authentication methods on different listeners, such as SCRAM for internal traffic and OAUTHBEARER for external clients. The same requirements from `sasl_mechanisms` apply.", + "enterprise_constructor": "simple", + "example": "`[{'listener':'kafka_listener', 'sasl_mechanisms':['SCRAM']}]`", + "is_deprecated": false, + "is_enterprise": true, + "items": { + "type": "object" + }, + "name": "sasl_mechanisms_overrides", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "array", + "visibility": "user" + }, + "schema_registry_always_normalize": { + "aliases": [ + "schema_registry_normalize_on_startup" + ], + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Always normalize schemas. If set, this overrides the `normalize` parameter in requests to the Schema Registry API.", + "is_deprecated": false, + "is_enterprise": false, + "name": "schema_registry_always_normalize", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "schema_registry_api": { + "c_type": "config::rest_authn_endpoint", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [ + "0.0.0.0:8081", + null + ], + "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", + "description": "Schema Registry API listener address and port", + "example": "[,yaml]\n----\nschema_registry:\n schema_registry_api:\n address: 0.0.0.0\n port: 8081\n authentication_method: http_basic\n----", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "schema_registry_api", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "schema_registry_api_tls": { + "c_type": "config::endpoint_tls_config", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", + "description": "TLS configuration for Schema Registry API.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "object" + }, + "name": "schema_registry_api_tls", + "needs_restart": true, + "nullable": false, + "type": "array" + }, + "schema_registry_avro_use_named_references": { + "c_type": "deprecated_property", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "defined_in": "src/v/config/configuration.cc", + "description": null, + "is_deprecated": true, + "is_enterprise": false, + "name": "schema_registry_avro_use_named_references", + "needs_restart": true, + "nullable": false, + "type": "object", + "version": "v26.1.1-rc2" + }, + "schema_registry_enable_authorization": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": true, + "cloud_readonly": false, + "cloud_supported": true, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables ACL-based authorization for Schema Registry requests. When `true`, Schema Registry\nuses ACL-based authorization instead of the default `public/user/superuser` authorization model.", + "enterprise_constructor": "restricted_only", + "enterprise_restricted_value": [ + "true" + ], + "enterprise_value": [ + "true" + ], + "is_deprecated": false, + "is_enterprise": true, + "name": "schema_registry_enable_authorization", + "needs_restart": false, + "nullable": false, + "related_topics": [], + "type": "boolean", + "visibility": "user" + }, + "schema_registry_enable_qualified_subjects": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable parsing of qualified subject syntax (:.context:subject). When false, subjects are treated literally, as subjects in the default context. When true, qualified syntax is parsed to extract context and subject.", + "is_deprecated": false, + "is_enterprise": false, + "name": "schema_registry_enable_qualified_subjects", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "version": "v26.1.1-rc2", + "visibility": "tunable" + }, + "schema_registry_replication_factor": { + "c_type": "int16_t", + "category": "schema-registry", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/pandaproxy/schema_registry/configuration.cc", + "description": "Replication factor for internal `_schemas` topic. If unset, defaults to the xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`] cluster property.", + "example": "[,yaml]\n----\npandaproxy:\n pandaproxy_api:\n address: 0.0.0.0\n port: 8082\n authentication_method: http_basic\n client_cache_max_size: 10\n client_keep_alive: 300000\n consumer_instance_timeout_ms: 300000\n----", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "schema_registry_replication_factor", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:../cluster-properties.adoc#default_topic_replication[`default_topic_replication`]", + "xref:../topic-properties.adoc#default_topic_replication[`default_topic_replication`]" + ], + "type": "integer" + }, + "scram_password": { + "c_type": "ss::sstring", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Password to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "is_deprecated": false, + "is_enterprise": false, + "is_secret": true, + "name": "scram_password", + "needs_restart": true, + "nullable": false, + "type": "string" + }, + "scram_username": { + "c_type": "ss::sstring", + "category": "pandaproxy-client", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": "", + "defined_in": "src/v/kafka/client/configuration.cc", + "description": "Username to use for SCRAM authentication mechanisms when the HTTP Proxy client connects to the Kafka API. This property is required when the HTTP Proxy API listener has <>: `none` but the cluster requires authenticated access to the Kafka API.\n\ninclude::shared:partial$http-proxy-ephemeral-credentials-breaking-change.adoc[]", + "example": "[,yaml]\n----\nschema_registry_client:\n brokers:\n - address: \n port: \n - address: \n port: \n sasl_mechanism: \n scram_username: \n scram_password: \n produce_batch_delay_ms: 0\n produce_batch_record_count: 0\n client_identifier: schema_registry_client\n----\n[,yaml]\n----\naudit_log_client:\n brokers:\n - address: \n port: \n - address: \n port: \n produce_batch_delay_ms: 0\n produce_batch_record_count: 0\n produce_batch_size_bytes: 0\n produce_compression_type: zstd\n produce_ack_level: 1\n produce_shutdown_delay_ms: 3000\n client_identifier: audit_log_client\n----", + "is_deprecated": false, + "is_enterprise": false, + "name": "scram_username", + "needs_restart": true, + "nullable": false, + "type": "string" + }, + "seed_servers": { + "c_type": "seed_server", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": [], + "defined_in": "src/v/config/node_config.cc", + "description": "List of the seed servers used to join current cluster. If the `seed_servers` list is empty the broker will be a cluster root and it will form a new cluster.\n\n* When `empty_seed_starts_cluster` is `true`, Redpanda enables one broker with an empty `seed_servers` list to initiate a new cluster. The broker with an empty `seed_servers` becomes the cluster root, to which other brokers must connect to join the cluster. Brokers looking to join the cluster should have their `seed_servers` populated with the cluster root's address, facilitating their connection to the cluster.\n+\n[IMPORTANT]\n====\nOnly one broker, the designated cluster root, should have an empty `seed_servers` list during the initial cluster bootstrapping. This ensures a single initiation point for cluster formation.\n====\n\n* When `empty_seed_starts_cluster` is `false`, Redpanda requires all brokers to start with a known set of brokers listed in `seed_servers`. The `seed_servers` list must not be empty and should be identical across these initial seed brokers, containing the addresses of all seed brokers. Brokers not included in the `seed_servers` list use it to discover and join the cluster, allowing for expansion beyond the foundational members.\n+\n[NOTE]\n====\nThe `seed_servers` list must be consistent across all seed brokers to prevent cluster fragmentation and ensure stable cluster formation.\n====", + "example": ".Example with `empty_seed_starts_cluster: true`\n[,yaml]\n----\n# Cluster root broker (seed starter)\nredpanda:\n empty_seed_starts_cluster: true\n seed_servers: []\n----\n\n[,yaml]\n----\n# Additional brokers joining the cluster\nredpanda:\n empty_seed_starts_cluster: true\n seed_servers:\n - host:\n address: \n port: \n----\n\n.Example with `empty_seed_starts_cluster: false`\n[,yaml]\n----\n# All initial seed brokers use the same configuration\nredpanda:\n empty_seed_starts_cluster: false\n seed_servers:\n - host:\n address: \n port: \n - host:\n address: \n port: \n - host:\n address: \n port: \n----\n\nReplace the following placeholders with your values:\n\n* ``: IP address of the cluster root broker\n* ``: IP addresses of each seed broker in the cluster\n* ``: RPC port for brokers (default: `33145`)", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "seed_server" + }, + "name": "seed_servers", + "needs_restart": true, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "segment.bytes": { + "acceptable_values": "bytes (integer)", + "alternate_cluster_property": "compacted_log_segment_size", + "alternate_cluster_property_doc_file": "cluster-properties.adoc", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "log_segment_size", + "default": 134217728, + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The maximum size of an active log segment for a topic. When the size of an active segment exceeds `segment.bytes`, the segment is closed and a new active segment is created. The closed, inactive segment is then eligible to be cleaned up according to retention properties.\n\nWhen `segment.bytes` is set to a positive value, it overrides the cluster property:\n\n* xref:./cluster-properties.adoc#log_segment_size[`log_segment_size`] for non-compacted topics\n* xref:./cluster-properties.adoc#compacted_log_segment_size[`compacted_log_segment_size`] for compacted topics (when `cleanup.policy=compact`)", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "segment.bytes", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#log_segment_size[`log_segment_size`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-segment-size[Configure segment size]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#configure-message-retention[Configure message retention]", + "xref:manage:remote-read-replicas.adoc[Remote Read Replicas]", + "xref:reference:properties/cluster-properties.adoc#compacted_log_segment_size[`compacted_log_segment_size`]" + ], + "type": "integer" + }, + "segment.ms": { + "acceptable_values": "milliseconds (integer)", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "segment-message", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "log_segment_ms", + "default": "2 weeks", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The maximum duration that a log segment of a topic is active (open for writes and not deletable). A periodic event, with `segment.ms` as its period, forcibly closes the active segment and transitions, or rolls, to a new active segment. The closed (inactive) segment is then eligible to be cleaned up according to cleanup and retention properties.\n\nIf set to a positive duration, `segment.ms` overrides the cluster property xref:./cluster-properties.adoc#log_segment_ms[`log_segment_ms`]. Values are automatically clamped between the cluster bounds set by xref:./cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`] (default: 10 minutes) and xref:./cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`] (default: 1 year). If your configured value exceeds these bounds, Redpanda uses the bound value and logs a warning. Check current cluster bounds with `rpk cluster config get log_segment_ms_min log_segment_ms_max`.\n\nFor topics with compaction enabled, `max.compaction.lag.ms` also acts as a limit to `segment.ms`.\n\nThis property supports three states:\n\n* Positive value: Sets the maximum milliseconds a segment remains active before rolling to a new segment.\n* 0: Segments are immediately eligible for closure.\n* Negative value: Disables time-based segment rolling for this topic.", + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "segment.ms", + "needs_restart": false, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#log_segment_ms[`log_segment_ms`]", + "xref:reference:properties/cluster-properties.adoc#log_segment_ms_min[`log_segment_ms_min`]", + "xref:reference:properties/cluster-properties.adoc#log_segment_ms_max[`log_segment_ms_max`]", + "xref:manage:cluster-maintenance/disk-utilization.adoc#log-rolling[Log rolling]" + ], + "type": "integer" + }, + "segment_appender_flush_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "1 second", + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum delay until buffered data is written.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "segment_appender_flush_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "segment_fallocation_step": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 33554432, + "defined_in": "src/v/config/configuration.cc", + "description": "Size for segments fallocation.", + "example": "`32768`", + "is_deprecated": false, + "is_enterprise": false, + "name": "segment_fallocation_step", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "space_management_enable": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Option to explicitly disable automatic disk space management. If this property was explicitly disabled while using v23.2, it will remain disabled following an upgrade.", + "is_deprecated": false, + "is_enterprise": false, + "name": "space_management_enable", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "space_management_enable_override": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enable automatic space management. This option is ignored and deprecated in versions >= v23.3.", + "is_deprecated": false, + "is_enterprise": false, + "name": "space_management_enable_override", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "space_management_max_log_concurrency": { + "c_type": "uint16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 20, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum parallel logs inspected during space management process.", + "example": "`20`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 65535, + "minimum": 0, + "name": "space_management_max_log_concurrency", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "space_management_max_segment_concurrency": { + "c_type": "uint16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum parallel segments inspected during space management process.", + "example": "`10`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 65535, + "minimum": 0, + "name": "space_management_max_segment_concurrency", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_compaction_index_memory": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 134217728, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes that may be used on each shard by compaction index writers.", + "example": "`1073741824`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "storage_compaction_index_memory", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_compaction_key_map_memory": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 134217728, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of bytes that may be used on each shard by compaction key-offset maps. Only applies when <> is set to `true`.", + "example": "`1073741824`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "storage_compaction_key_map_memory", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_compaction_key_map_memory_limit_percent": { + "c_type": "double", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 12.0, + "defined_in": "src/v/config/configuration.cc", + "description": "Limit on <>, expressed as a percentage of memory per shard, that bounds the amount of memory used by compaction key-offset maps. \n\nNOTE: Memory per shard is computed after <>, and only applies when <> is set to `true`.", + "example": "`12.0`", + "is_deprecated": false, + "is_enterprise": false, + "name": "storage_compaction_key_map_memory_limit_percent", + "needs_restart": true, + "nullable": false, + "type": "number", + "visibility": "tunable" + }, + "storage_failure_injection_config_path": { + "c_type": "std::filesystem::path", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Path to the configuration file used for low level storage failure injection.", + "is_deprecated": false, + "is_enterprise": false, + "name": "storage_failure_injection_config_path", + "needs_restart": true, + "nullable": true, + "type": "string", + "visibility": "tunable" + }, + "storage_failure_injection_enabled": { + "c_type": "bool", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": false, + "defined_in": "src/v/config/node_config.cc", + "description": "If `true`, inject low level storage failures on the write path. Do _not_ use for production instances.", + "is_deprecated": false, + "is_enterprise": false, + "name": "storage_failure_injection_enabled", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "storage_ignore_cstore_hints": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "When set, cstore hints are ignored and not used for data access (but are otherwise generated).", + "is_deprecated": false, + "is_enterprise": false, + "name": "storage_ignore_cstore_hints", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "storage_ignore_timestamps_in_future_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum number of seconds that a record's timestamp can be ahead of a Redpanda broker's clock and still be used when deciding whether to clean up the record for data retention. This property makes possible the timely cleanup of records from clients with clocks that are drastically unsynchronized relative to Redpanda.\n\nWhen determining whether to clean up a record with timestamp more than `storage_ignore_timestamps_in_future_sec` seconds ahead of the broker, Redpanda ignores the record's timestamp and instead uses a valid timestamp of another record in the same segment, or (if another record's valid timestamp is unavailable) the timestamp of when the segment file was last modified (mtime).\n\nBy default, `storage_ignore_timestamps_in_future_sec` is disabled (null).\n\n[TIP]\n====\nTo figure out whether to set `storage_ignore_timestamps_in_future_sec` for your system:\n\n. Look for logs with segments that are unexpectedly large and not being cleaned up.\n. In the logs, search for records with unsynchronized timestamps that are further into the future than tolerable by your data retention and storage settings. For example, timestamps 60 seconds or more into the future can be considered to be too unsynchronized.\n. If you find unsynchronized timestamps throughout your logs, determine the number of seconds that the timestamps are ahead of their actual time, and set `storage_ignore_timestamps_in_future_sec` to that value so data retention can proceed.\n. If you only find unsynchronized timestamps that are the result of transient behavior, you can disable `storage_ignore_timestamps_in_future_sec`.\n====", + "example": "`3600`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "storage_ignore_timestamps_in_future_sec", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "storage_max_concurrent_replay": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1024, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of partitions' logs that will be replayed concurrently at startup, or flushed concurrently on shutdown.", + "example": "`2048`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "storage_max_concurrent_replay", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_min_free_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5368709120, + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold of minimum bytes free space before rejecting producers.", + "is_deprecated": false, + "is_enterprise": false, + "name": "storage_min_free_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_read_buffer_size": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 131072, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of each read buffer (one per in-flight read, per log segment).", + "example": "`31768`", + "is_deprecated": false, + "is_enterprise": false, + "name": "storage_read_buffer_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_read_readahead_count": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1, + "defined_in": "src/v/config/configuration.cc", + "description": "How many additional reads to issue ahead of current read location.", + "example": "`1`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "storage_read_readahead_count", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_reserve_min_segments": { + "c_type": "int16_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 2, + "defined_in": "src/v/config/configuration.cc", + "description": "The number of segments per partition that the system will attempt to reserve disk capacity for. For example, if the maximum segment size is configured to be 100 MB, and the value of this option is 2, then in a system with 10 partitions Redpanda will attempt to reserve at least 2 GB of disk space.", + "example": "`4`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 32767, + "minimum": -32768, + "name": "storage_reserve_min_segments", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_space_alert_free_threshold_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0, + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold of minimum bytes free space before setting storage space alert.", + "is_deprecated": false, + "is_enterprise": false, + "name": "storage_space_alert_free_threshold_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_space_alert_free_threshold_percent": { + "c_type": "unsigned", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5, + "defined_in": "src/v/config/configuration.cc", + "description": "Threshold of minimum percent free space before setting storage space alert.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "storage_space_alert_free_threshold_percent", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "storage_strict_data_init": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Requires that an empty file named `.redpanda_data_dir` be present in the xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]. If set to `true`, Redpanda will refuse to start if the file is not found in the data directory.", + "is_deprecated": false, + "is_enterprise": false, + "name": "storage_strict_data_init", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/broker-properties.adoc#data_directory[`data_ directory`]" + ], + "type": "boolean", + "visibility": "user" + }, + "storage_target_replay_bytes": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10737418240, + "defined_in": "src/v/config/configuration.cc", + "description": "Target bytes to replay from disk on startup after clean shutdown: controls frequency of snapshots and checkpoints.", + "example": "`2147483648`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "storage_target_replay_bytes", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "superusers": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": [], + "defined_in": "src/v/config/configuration.cc", + "description": "List of superuser usernames.", + "is_deprecated": false, + "is_enterprise": false, + "items": { + "type": "string" + }, + "name": "superusers", + "needs_restart": false, + "nullable": false, + "type": "array", + "visibility": "user" + }, + "tls_certificate_name_format": { + "c_type": "tls_name_format", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "legacy", + "defined_in": "src/v/config/configuration.cc", + "description": "The format of the certificates's distinguished name to use for mTLS principal mapping. The `legacy` format would appear as 'C=US,ST=California,L=San Francisco,O=Redpanda,CN=redpanda', while the `rfc2253` format would appear as 'CN=redpanda,O=Redpanda,L=San Francisco,ST=California,C=US'.", + "enum": [ + "legacy", + "rfc2253" + ], + "is_deprecated": false, + "is_enterprise": false, + "name": "tls_certificate_name_format", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "tls_enable_renegotiation": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed.", + "is_deprecated": false, + "is_enterprise": false, + "name": "tls_enable_renegotiation", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "tls_min_version": { + "c_type": "tls_version", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": true, + "cloud_supported": true, + "config_scope": "cluster", + "default": "v1.2", + "defined_in": "src/v/config/configuration.cc", + "description": "The minimum TLS version that Redpanda clusters support. This property prevents client applications from negotiating a downgrade to the TLS version when they make a connection to a Redpanda cluster.", + "enum": [ + "v1.0", + "v1.1", + "v1.2", + "v1.3" + ], + "is_deprecated": false, + "is_enterprise": false, + "name": "tls_min_version", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "tls_v1_2_cipher_suites": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "defined_in": "src/v/config/configuration.cc", + "description": "The encryption algorithms available for TLS 1.2 client connections, specified as a colon-separated list in OpenSSL format. Use this to support older clients that require specific encryption methods.", + "is_deprecated": false, + "is_enterprise": false, + "name": "tls_v1_2_cipher_suites", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "tls_v1_3_cipher_suites": { + "c_type": "ss::sstring", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256", + "defined_in": "src/v/config/configuration.cc", + "description": "The encryption algorithms available for TLS 1.3 client connections, specified as a colon-separated list in OpenSSL format. Most deployments don't need to change this. Only modify it to meet specific organizational security requirements.", + "is_deprecated": false, + "is_enterprise": false, + "name": "tls_v1_3_cipher_suites", + "needs_restart": true, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "tombstone_retention_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "The retention time for tombstone records in a compacted topic. Cannot be enabled at the same time as any of `cloud_storage_enabled`, `cloud_storage_enable_remote_read`, or `cloud_storage_enable_remote_write`. A typical default setting is `86400000`, or 24 hours.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "tombstone_retention_ms", + "needs_restart": false, + "nullable": true, + "related_topics": [ + "xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal]" + ], + "type": "integer", + "visibility": "user" + }, + "topic_fds_per_partition": { + "c_type": "int32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5, + "defined_in": "src/v/config/configuration.cc", + "description": "File descriptors required per partition replica. If topic creation results in the ratio of file descriptor limit to partition replicas being lower than this value, creation of new topics is fails.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "topic_fds_per_partition", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "topic_label_aggregation_limit": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": null, + "defined_in": "src/v/config/configuration.cc", + "description": "When the number of topics exceeds this limit, the topic label in generated metrics will be aggregated. If `null`, then there is no limit.", + "is_deprecated": false, + "is_enterprise": false, + "name": "topic_label_aggregation_limit", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "topic_memory_per_partition": { + "c_type": "std::size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "DEFAULT_TOPIC_MEMORY_PER_PARTITION", + "defined_in": "src/v/config/configuration.cc", + "description": "Required memory in bytes per partition replica when creating or altering topics. The total size of the memory pool for partitions is the total memory available to Redpanda times `topic_partitions_memory_allocation_percent`. Each partition created requires `topic_memory_per_partition` bytes from that pool. If insufficient memory is available, creating or altering topics fails.", + "is_deprecated": false, + "is_enterprise": false, + "name": "topic_memory_per_partition", + "needs_restart": false, + "nullable": true, + "type": "integer", + "visibility": "tunable" + }, + "topic_partitions_memory_allocation_percent": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10, + "defined_in": "src/v/config/configuration.cc", + "description": "Percentage of total memory to reserve for topic partitions. See <> for details.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "topic_partitions_memory_allocation_percent", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "topic_partitions_per_shard": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 5000, + "defined_in": "src/v/config/configuration.cc", + "description": "Maximum number of partition replicas per shard. If topic creation results in the ratio of partition replicas to shards being higher than this value, creation of new topics fails.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "topic_partitions_per_shard", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "topic_partitions_reserve_shard0": { + "c_type": "uint32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 0, + "defined_in": "src/v/config/configuration.cc", + "description": "Reserved partition slots on shard (CPU core) 0 on each node. If this is greater than or equal to <>, no data partitions will be scheduled on shard 0.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 4294967295, + "minimum": 0, + "name": "topic_partitions_reserve_shard0", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "transaction_coordinator_cleanup_policy": { + "c_type": "model::cleanup_policy_bitflags", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "delete", + "defined_in": "src/v/config/configuration.cc", + "description": "Cleanup policy for a transaction coordinator topic.\n\n*Accepted values:*\n\n* `compact`\n* `delete`\n* `[\"compact\",\"delete\"]`\n* `none`", + "enum": [ + "none", + "delete", + "compact" + ], + "example": "`compact,delete`", + "is_deprecated": false, + "is_enterprise": false, + "name": "transaction_coordinator_cleanup_policy", + "needs_restart": false, + "nullable": false, + "type": "string", + "visibility": "user" + }, + "transaction_coordinator_delete_retention_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 604800000, + "default_human_readable": "1 week", + "defined_in": "src/v/config/configuration.cc", + "description": "Delete segments older than this age. To ensure transaction state is retained for as long as the longest-running transaction, make sure this is greater than or equal to <>.\n\nFor example, if your typical transactions run for one hour, consider setting both `transaction_coordinator_delete_retention_ms` and `transactional_id_expiration_ms` to at least 3600000 (one hour), or a little over.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "transaction_coordinator_delete_retention_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "transaction_coordinator_log_segment_size": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1073741824, + "defined_in": "src/v/config/configuration.cc", + "description": "The size (in bytes) each log segment should be.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "transaction_coordinator_log_segment_size", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "transaction_coordinator_partitions": { + "c_type": "int32_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 50, + "defined_in": "src/v/config/configuration.cc", + "description": "Number of partitions for transactions coordinator.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 2147483647, + "minimum": -2147483648, + "name": "transaction_coordinator_partitions", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "transaction_max_timeout_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 900000, + "default_human_readable": "15 minutes", + "defined_in": "src/v/config/configuration.cc", + "description": "The maximum allowed timeout for transactions. If a client-requested transaction timeout exceeds this configuration, the broker returns an error during transactional producer initialization. This guardrail prevents hanging transactions from blocking consumer progress.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "transaction_max_timeout_ms", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "transactional_id_expiration_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 604800000, + "default_human_readable": "1 week", + "defined_in": "src/v/config/configuration.cc", + "description": "Expiration time of producer IDs for both transactional and idempotent producers. Despite the name, this setting applies to all producer types. Measured starting from the time of the last write until now for a given ID.\n\nProducer IDs are automatically removed from memory when they expire, which helps manage memory usage. However, this natural cleanup may not be sufficient for workloads with high producer churn rates. Tune this value based on your application's producer session and transaction lifetimes. Consider your longest-running transaction duration plus a buffer to avoid premature expiration.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "transactional_id_expiration_ms", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:develop:transactions.adoc#tune-producer-id-limits[Tune producer ID limits]", + "xref:reference:properties/cluster-properties.adoc#max_concurrent_producer_ids[`max_concurrent_producer_ids`]" + ], + "type": "integer", + "visibility": "user" + }, + "tx_log_stats_interval_s": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 10, + "default_human_readable": "10 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "How often to log per partition tx stats, works only with debug logging enabled.", + "is_deprecated": true, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "tx_log_stats_interval_s", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "deprecated" + }, + "tx_timeout_delay_ms": { + "c_type": "std::chrono::milliseconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 1000, + "default_human_readable": "1 second", + "defined_in": "src/v/config/configuration.cc", + "description": "Delay before scheduling the next check for timed out transactions.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17592186044415, + "minimum": -17592186044416, + "name": "tx_timeout_delay_ms", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "user" + }, + "unsafe_enable_consumer_offsets_delete_retention": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": false, + "defined_in": "src/v/config/configuration.cc", + "description": "Enables delete retention of consumer offsets topic. This is an internal-only configuration and should be enabled only after consulting with Redpanda support.", + "is_deprecated": false, + "is_enterprise": false, + "name": "unsafe_enable_consumer_offsets_delete_retention", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "user" + }, + "upgrade_override_checks": { + "c_type": "bool", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": false, + "defined_in": "src/v/config/node_config.cc", + "description": "Whether to violate safety checks when starting a Redpanda version newer than the cluster's consensus version.", + "is_deprecated": false, + "is_enterprise": false, + "name": "upgrade_override_checks", + "needs_restart": true, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "usage_disk_persistance_interval_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "300 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The interval in which all usage stats are written to disk.", + "example": "`300`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "usage_disk_persistance_interval_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "usage_num_windows": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 24, + "defined_in": "src/v/config/configuration.cc", + "description": "The number of windows to persist in memory and disk.", + "example": "`24`", + "is_deprecated": false, + "is_enterprise": false, + "name": "usage_num_windows", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "usage_window_width_interval_sec": { + "c_type": "std::chrono::seconds", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "3600 seconds", + "defined_in": "src/v/config/configuration.cc", + "description": "The width of a usage window, tracking cloud and kafka ingress/egress traffic each interval.", + "example": "`3600`", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "usage_window_width_interval_sec", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "use_fetch_scheduler_group": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Use a separate scheduler group for fetch processing.", + "is_deprecated": false, + "is_enterprise": false, + "name": "use_fetch_scheduler_group", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "use_kafka_handler_scheduler_group": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Use a separate scheduler group to handle parsing Kafka protocol requests.", + "is_deprecated": false, + "is_enterprise": false, + "name": "use_kafka_handler_scheduler_group", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "use_produce_scheduler_group": { + "c_type": "bool", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": true, + "defined_in": "src/v/config/configuration.cc", + "description": "Use a separate scheduler group to process Kafka produce requests.", + "is_deprecated": false, + "is_enterprise": false, + "name": "use_produce_scheduler_group", + "needs_restart": false, + "nullable": false, + "type": "boolean", + "visibility": "tunable" + }, + "verbose_logging_timeout_sec_max": { + "c_type": "std::chrono::seconds", + "category": "redpanda", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "broker", + "default": null, + "defined_in": "src/v/config/node_config.cc", + "description": "Maximum duration in seconds for verbose (`TRACE` or `DEBUG`) logging. Values configured above this will be clamped. If null (the default) there is no limit. Can be overridden in the Admin API on a per-request basis.", + "example": "[,yaml]\n----\nschema_registry:\n schema_registry_api:\n address: 0.0.0.0\n port: 8081\n authentication_method: http_basic\n schema_registry_replication_factor: 3\n mode_mutability: true\n----", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 17179869183, + "minimum": -17179869184, + "name": "verbose_logging_timeout_sec_max", + "needs_restart": true, + "nullable": true, + "related_topics": [ + "xref:reference:properties/cluster-properties.adoc#http_authentication[`http_authentication`]" + ], + "type": "integer", + "visibility": "tunable" + }, + "virtual_cluster_min_producer_ids": { + "c_type": "uint64_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "Maximum value", + "defined_in": "src/v/config/configuration.cc", + "description": "Minimum number of active producers per virtual cluster.", + "is_deprecated": false, + "is_enterprise": false, + "maximum": 18446744073709551615, + "minimum": 0, + "name": "virtual_cluster_min_producer_ids", + "needs_restart": false, + "nullable": false, + "type": "integer", + "visibility": "tunable" + }, + "write.caching": { + "acceptable_values": "", + "alternate_cluster_property": "", + "alternate_cluster_property_doc_file": "", + "category": "performance-cluster", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "cluster_property_doc_file": "cluster-properties.adoc", + "config_scope": "topic", + "corresponding_cluster_property": "write_caching_default", + "default": "false", + "defined_in": "src/v/kafka/server/handlers/topics/types.h", + "description": "The write caching mode to apply to a topic.\n\nWhen `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first.", + "enum": [ + "true", + "false", + "disabled" + ], + "is_deprecated": false, + "is_enterprise": false, + "is_topic_property": true, + "name": "write.caching", + "needs_restart": false, + "related_topics": [ + "xref:develop:manage-topics/config-topics.adoc#configure-write-caching[Write caching]", + "xref:manage:tiered-storage.adoc[Tiered Storage]", + "xref:reference:properties/cluster-properties.adoc#write_caching_default[`write_caching_default`]", + "xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]" + ], + "type": "string" + }, + "write_caching_default": { + "c_type": "model::write_caching_mode", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": "false", + "defined_in": "src/v/config/configuration.cc", + "description": "The default write caching mode to apply to user topics. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. \n\nFsyncs follow <> and <>, whichever is reached first.\n\nThe `write_caching_default` cluster property can be overridden with the xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] topic property.", + "enum": [ + "true", + "false", + "disabled" + ], + "example": "`true`", + "is_deprecated": false, + "is_enterprise": false, + "name": "write_caching_default", + "needs_restart": false, + "nullable": false, + "related_topics": [ + "xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`]", + "xref:develop:manage-topics/config-topics.adoc#configure-write-caching[Write caching]" + ], + "type": "string", + "visibility": "user" + }, + "zstd_decompress_workspace_bytes": { + "c_type": "size_t", + "cloud_byoc_only": false, + "cloud_editable": false, + "cloud_readonly": false, + "cloud_supported": false, + "config_scope": "cluster", + "default": 8388608, + "defined_in": "src/v/config/configuration.cc", + "description": "Size of the zstd decompression workspace.", + "is_deprecated": false, + "is_enterprise": false, + "name": "zstd_decompress_workspace_bytes", + "needs_restart": true, + "nullable": false, + "type": "integer", + "visibility": "tunable" + } + } +} \ No newline at end of file diff --git a/modules/reference/pages/properties/cluster-properties.adoc b/modules/reference/pages/properties/cluster-properties.adoc index f3460c1adc..053173d11d 100644 --- a/modules/reference/pages/properties/cluster-properties.adoc +++ b/modules/reference/pages/properties/cluster-properties.adoc @@ -10,4 +10,4 @@ NOTE: Some cluster properties require that you restart the cluster for any updat == Cluster configuration -include::reference:partial$properties/cluster-properties.adoc[tags=!deprecated;!exclude-from-docs] \ No newline at end of file +include::reference:partial$properties/cluster-properties.adoc[tags=!deprecated;!exclude-from-docs] diff --git a/modules/reference/pages/properties/topic-properties.adoc b/modules/reference/pages/properties/topic-properties.adoc index ce669ba8e9..e99bff7283 100644 --- a/modules/reference/pages/properties/topic-properties.adoc +++ b/modules/reference/pages/properties/topic-properties.adoc @@ -33,6 +33,112 @@ These properties control disk flushing, replication, and how topics interact wit include::reference:partial$properties/topic-properties.adoc[tags=category-performance-cluster;!deprecated;!exclude-from-docs] +The maximum bytes not fsynced per partition. If this configured threshold is reached, the log is automatically fsynced, even though it wasn't explicitly requested. + +*Type:* integer + +*Unit:* bytes + +*Accepted values:* [`1`, `9223372036854775807`] + +*Default:* `262144` + +*Related cluster property:* xref:./cluster-properties.adoc#flush_bytes[`flush_bytes`] + +**Related topics**: + +- xref:develop:produce-data/configure-producers.adoc[] + +--- +[[flushms]] +=== flush.ms + +The maximum delay (in ms) between two subsequent fsyncs. After this delay, the log is automatically fsynced. + +*Type:* integer + +*Unit:* milliseconds + +*Accepted values:* [`1`, `9223372036854775`] + +*Default:* `100` + +*Related cluster property:* xref:./cluster-properties.adoc#flush_ms[`flush_ms`] + +**Related topics**: + +- xref:develop:produce-data/configure-producers.adoc[] + +--- +[[redpandaleaderspreference]] +=== redpanda.leaders.preference + +The preferred location (rack) for partition leaders of a topic. + +This property inherits the value from the config_ref:default_leaders_preference,true,properties/cluster-properties[] cluster configuration property. You may override the cluster-wide setting by specifying the value for individual topics. + +If the cluster configuration property config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, Leader Pinning is disabled across the cluster. + +*Type:* string + +*Default:* `none` + +**Values**: + +- `none`: Opt out the topic from Leader Pinning. +- `racks:[,,...]`: Specify the preferred location (rack) of all topic partition leaders. The list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks. + +**Related topics**: + +- xref:develop:produce-data/leader-pinning.adoc[Leader pinning] + +--- +[[replicationfactor]] +=== replication.factor + +The number of replicas of a topic to save in different nodes (brokers) of a cluster. + +If `replication.factor` is set to a positive value, it overrides the cluster property xref:./cluster-properties.adoc#default_topic_replication[default_topic_replication] for the topic. + +NOTE: Although `replication.factor` isn't returned or displayed by xref:reference:rpk/rpk-topic/rpk-topic-describe.adoc[`rpk topic describe`] as a valid Kafka property, you can set it using xref:reference:rpk/rpk-topic/rpk-topic-alter-config.adoc[`rpk topic alter-config`]. When the `replication.factor` of a topic is altered, it isn't simply a property value that's updated, but rather the actual replica sets of topic partitions that are changed. + +*Type:* integer + +*Accepted values:* [`1`, `512`] + +*Default:* null + +*Related cluster property:* xref:./cluster-properties.adoc#default_topic_replication[`default_topic_replication`] + +**Related topics**: + +- xref:develop:manage-topics/config-topics.adoc#choose-the-replication-factor[Choose the replication factor] +- xref:develop:manage-topics/config-topics.adoc#change-the-replication-factor[Change the replication factor] + +--- +[[writecaching]] +=== write.caching + +The write caching mode to apply to a topic. + +When `write.caching` is set, it overrides the cluster property xref:cluster-properties.adoc#write_caching_default[`write_caching_default`]. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. Fsyncs follow <> and <>, whichever is reached first. + +*Type:* boolean + +*Default:* `false` + +**Values**: + +- `true` - Enables write caching for a topic, according to <> and <>. +- `false` - Disables write caching for a topic, according to <> and <>. + +*Related cluster property:* xref:./cluster-properties.adoc#write_caching_default[`write_caching_default`] + +**Related topics**: + +- xref:develop:manage-topics/config-topics.adoc#configure-write-caching[Write caching] + +--- == Tiered Storage properties Configure properties to manage topics for xref:manage:tiered-storage.adoc[Tiered Storage]. diff --git a/modules/reference/pages/rpk/rpk-redpanda/rpk-redpanda-mode.adoc b/modules/reference/pages/rpk/rpk-redpanda/rpk-redpanda-mode.adoc index 9cabe78320..efd6b2a553 100644 --- a/modules/reference/pages/rpk/rpk-redpanda/rpk-redpanda-mode.adoc +++ b/modules/reference/pages/rpk/rpk-redpanda/rpk-redpanda-mode.adoc @@ -10,7 +10,7 @@ Development mode (`development` or `dev`) includes the following development-onl * Sets `developer_mode` to `true`. This starts Redpanda with dev-mode only settings, including: ** No minimal memory limits are enforced. ** No core assignment rules for Redpanda nodes are enforced. - ** Enables write caching, which is a relaxed mode of `acks=all` that acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to fsync to disk. This provides lower latency while still ensuring that a majority of brokers acknowledge the write. For more information, or to disable this, see xref:develop:config-topics.adoc#configure-write-caching[write caching]. + ** Enables write caching, which is a relaxed mode of `acks=all` that acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to fsync to disk. This provides lower latency while still ensuring that a majority of brokers acknowledge the write. For more information, or to disable this, see xref:develop:manage-topics/config-topics.adoc#configure-write-caching[write caching]. ** Bypasses `fsync` (from https://docs.seastar.io/master/structseastar_1_1reactor%5F%5Foptions.html#ad66cb23f59ed5dfa8be8189313988692[Seastar option `unsafe_bypass_fsync`^]), which results in unrealistically fast clusters and may result in data loss. * Sets `overprovisioned` to `true`. Redpanda expects a dev system to be an overprovisioned environment. Based on a https://docs.seastar.io/master/structseastar_1_1reactor%5F%5Foptions.html#a0caf6c2ad579b8c22e1352d796ec3c1d[Seastar option^], setting `overprovisioned` disables thread affinity, zeros idle polling time, and disables busy-poll for disk I/O. * Sets all autotuner xref:./rpk-redpanda-tune-list.adoc#tuners[tuners] to `false`. The tuners are intended to run only for production mode. diff --git a/modules/reference/pages/rpk/rpk-security/rpk-security-role-assign.adoc b/modules/reference/pages/rpk/rpk-security/rpk-security-role-assign.adoc index 5670fc9dd7..41aaf549c2 100644 --- a/modules/reference/pages/rpk/rpk-security/rpk-security-role-assign.adoc +++ b/modules/reference/pages/rpk/rpk-security/rpk-security-role-assign.adoc @@ -7,18 +7,30 @@ The `--principal` flag accepts principals with the format `:> and <> to control when uploads occur. An upload is triggered when any of these three thresholds is reached. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Range +| [`-17592186044416`, `17592186044415`] + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`250` (250 milliseconds) +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| User +endif::[] + +| Related topics +| +* xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics] + +* xref:reference:properties/cluster-properties.adoc#cloud_topics_produce_batching_size_threshold[`cloud_topics_produce_batching_size_threshold`] + +* xref:reference:properties/cluster-properties.adoc#cloud_topics_produce_cardinality_threshold[`cloud_topics_produce_cardinality_threshold`] + +|=== + + +=== cloud_topics_reconciliation_interval + +Time interval at which Redpanda reconciles data between short-term local storage and long-term object storage for Cloud Topics. During this reconciliation process, Redpanda optimizes the storage layout of data in short-term storage to improve the cost and performance associated with accessing data. After the reconciliation process has moved data into long-term storage, the data in short-term storage is subject to removal by a garbage collection process. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `string` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`null` +endif::[] + +| Nullable +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| User +endif::[] + +| Related topics +| +* xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics] + +* xref:reference:properties/cluster-properties.adoc#cloud_topics_long_term_garbage_collection_interval[`cloud_topics_long_term_garbage_collection_interval`] + +|=== + + +=== cloud_topics_reconciliation_max_interval + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Maximum reconciliation interval for adaptive scheduling. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Range +| [`-17592186044416`, `17592186044415`] + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`10000` (10 seconds) +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_reconciliation_max_object_size + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Maximum size in bytes for L1 objects produced by the reconciler. With the default target fill ratio of 0.8, this gives an effective target object size of 64 MiB. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`83886080` +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_reconciliation_min_interval + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Minimum reconciliation interval for adaptive scheduling. The reconciler will not run more frequently than this. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Range +| [`-17592186044416`, `17592186044415`] + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`250` (250 milliseconds) +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_reconciliation_parallelism + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Maximum number, per shard, of concurrent objects built by reconciliation + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`8` +endif::[] + +| Nullable +| No + +| Requires restart +| Yes + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_reconciliation_slowdown_blend + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Blend factor for slowing down reconciliation (0.0 to 1.0). Higher values mean reconciliation lowers its frequency faster when trying to find a frequency that produces well-sized objects. Generally this should be lower than the speedup blend, because reconciliation has less opportunities to adapt its frequency when it runs less frequently. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `number` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`0.4` +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_reconciliation_speedup_blend + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Blend factor for speeding up reconciliation (0.0 to 1.0). Higher values mean reconciliation increases its frequency faster when trying to find a frequency that produces well-sized objects. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `number` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`0.9` +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_reconciliation_target_fill_ratio + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Target fill ratio for L1 objects. The reconciler adapts its interval to produce objects at approximately this fill level (0.0 to 1.0). + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `number` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`0.8` +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_short_term_gc_backoff_interval + +ifndef::env-cloud[] +*Introduced in v25.3.3* +endif::[] + +The interval, in milliseconds, between invocations of the L0 garbage collection work loop when no progress is being made or errors are occurring. + +L0 (level-zero) objects are short-term data objects in Tiered Storage that are periodically garbage collected. When GC encounters errors or cannot make progress (for example, if there are no objects eligible for deletion), this backoff interval prevents excessive retries. + +Increase this value to reduce system load when GC cannot make progress. Decrease it if you need faster retry attempts after transient errors. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Range +| [`-17592186044416`, `17592186044415`] + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`60000` (1 minute) +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_short_term_gc_interval + +ifndef::env-cloud[] +*Introduced in v25.3.3* +endif::[] + +The interval, in milliseconds, between invocations of the L0 (level-zero) garbage collection work loop when progress is being made. + +L0 objects are short-term data objects in Tiered Storage associated with global epochs. This property controls how frequently GC runs when it successfully deletes objects. Lower values increase GC frequency, which can help maintain lower object counts but may increase S3 API usage. + +Decrease this value if L0 object counts are growing too quickly and you need more aggressive garbage collection. Increase it to reduce S3 API costs in clusters with lower ingestion rates. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Range +| [`-17592186044416`, `17592186044415`] + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`10000` (10 seconds) +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_short_term_gc_minimum_object_age + +ifndef::env-cloud[] +*Introduced in v25.3.3* +endif::[] + +The minimum age, in milliseconds, of an L0 (level-zero) object before it becomes eligible for garbage collection. + +This grace period delays deletion of L0 objects even after they become eligible based on epoch. The delay provides a safety buffer that can support recovery in cases involving accidental deletion or other operational issues. + +Increase this value to extend the retention window for L0 objects, providing more time for recovery from operational errors. Decrease it to free up object storage space more quickly, but with less protection against accidental deletion. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Range +| [`-17592186044416`, `17592186044415`] + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`43200000` (12 hours) +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cloud_topics_upload_part_size + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +The part size in bytes used for multipart uploads. The minimum of 5 MiB is the smallest non-terminal part size allowed by cloud object storage providers. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`16777216` +endif::[] + +| Nullable +| No + +| Requires restart +| Yes + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +|=== + + +=== cluster_id + +Cluster identifier. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `string` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`null` +endif::[] + +| Nullable +| Yes + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| No +endif::[] + +|=== + === compacted_log_segment_size @@ -2231,208 +3018,39 @@ ifndef::env-cloud[] endif::[] | Nullable -| Yes - -| Requires restart -| No - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -|=== - - -=== controller_snapshot_max_age_sec - -Maximum amount of time before Redpanda attempts to create a controller snapshot after a new controller command appears. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Range -| [`-17179869184`, `17179869183`] - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`60` (1 minute) -endif::[] - -| Nullable -| No - -| Unit -| Seconds - -| Requires restart -| No - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -|=== - - -// tag::deprecated[] -=== coproc_max_batch_size - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== coproc_max_inflight_bytes - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Bytes - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== coproc_max_ingest_bytes - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Bytes +| Yes | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] -|=== +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] -// end::deprecated[] +|=== -// tag::deprecated[] -=== coproc_offset_flush_interval_ms -No description available. +=== controller_snapshot_max_age_sec +Maximum amount of time before Redpanda attempts to create a controller snapshot after a new controller command appears. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `integer` + +| Range +| [`-17179869184`, `17179869183`] | Default | @@ -2440,26 +3058,30 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`60` (1 minute) endif::[] | Nullable | No | Unit -| Milliseconds +| Seconds | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + |=== -// end::deprecated[] === core_balancing_continuous @@ -2690,53 +3312,6 @@ endif::[] |=== -=== create_topic_timeout_ms - -Timeout, in milliseconds, to wait for new topic creation. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Range -| [`-17592186044416`, `17592186044415`] - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`2000` (2 seconds) -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| No - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -|=== - - // tag::redpanda-cloud[] === data_transforms_binary_max_size @@ -3337,45 +3912,6 @@ endif::[] |=== -// tag::deprecated[] -=== datalake_disk_space_monitor_interval - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === datalake_disk_usage_overage_coeff The datalake disk usage monitor reclaims the overage multiplied by this this coefficient to compensate for data that is written during the idle period between control loop invocations. @@ -3968,6 +4504,63 @@ endif::[] |=== +=== default_redpanda_storage_mode + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Default storage mode for newly-created topics. Determines how topic data is stored: `local` for broker-local storage only, `tiered` for both local and object storage, `cloud` for object-only storage using the Cloud Topics architecture, or `unset` to use legacy remote.read/write configs for backwards compatibility. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `string` (enum) + +| Accepted values +| +ifndef::env-cloud[] +`local`, `tiered`, `cloud`, `unset` +endif::[] +ifdef::env-cloud[] +`local`, `tiered`, `cloud`, `unset` +endif::[] + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`unset` +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| User +endif::[] + +| Example +| +`tiered` + +|=== + + === default_topic_partitions Default number of partitions per topic. @@ -4106,6 +4699,61 @@ endif::[] |=== +=== delete_topic_enable + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Enable or disable topic deletion via the Kafka DeleteTopics API. When set to false, all topic deletion requests are rejected with error code 73 (TOPIC_DELETION_DISABLED). This is a cluster-wide safety setting that cannot be overridden by superusers. Topics in kafka_nodelete_topics are always protected regardless of this setting. + +ifndef::env-cloud[] +.Enterprise license required +[NOTE] +==== +The following values require an Enterprise license: `false` + +For license details, see xref:get-started:licensing/index.adoc[Redpanda Licensing]. +==== +endif::[] + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `boolean` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`true` +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| User +endif::[] + +|=== + + === disable_batch_cache Disable batch cache in log manager. @@ -4338,60 +4986,21 @@ endif::[] | Nullable | No -| Unit -| Milliseconds - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| User -endif::[] - -|=== - - -// tag::deprecated[] -=== enable_admin_api - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - +| Unit +| Milliseconds + ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| User +endif::[] + |=== -// end::deprecated[] // tag::deprecated[] === enable_auto_rebalance_on_node_add @@ -4588,45 +5197,6 @@ endif::[] |=== -// tag::deprecated[] -=== enable_coproc - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - // tag::exclude-from-docs[] === enable_developmental_unrecoverable_data_corrupting_features @@ -5706,87 +6276,6 @@ endif::[] |=== -// tag::deprecated[] -=== find_coordinator_timeout_ms - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== full_raft_configuration_recovery_pattern - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === group_initial_rebalance_delay Delay added to the rebalance phase to wait for new members. @@ -6524,7 +7013,7 @@ ifndef::env-cloud[] *Introduced in v25.3.5* endif::[] -The default Iceberg catalog namespace where Redpanda creates tables. Supports nested namespaces as an array of strings. +The default namespace (database name) for Iceberg tables. All tables created by Redpanda will be placed in this namespace within the Iceberg catalog. Supports nested namespaces as an array of strings. IMPORTANT: This value must be configured before enabling Iceberg and must not be changed afterward. Changing it will cause Redpanda to lose track of existing tables. @@ -8240,45 +8729,6 @@ endif::[] |=== -// tag::deprecated[] -=== id_allocator_replication - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === initial_retention_local_target_bytes_default Initial local retention size target for partitions of topics with xref:manage:tiered-storage.adoc[Tiered Storage] enabled. If no initial local target retention is configured, then all locally-retained data will be delivered to learner when joining the partition replica set. @@ -8370,55 +8820,13 @@ endif::[] |=== -=== internal_topic_replication_factor - -Target replication factor for internal topics. - -*Unit*: number of replicas per topic. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Range -| [`-2147483648`, `2147483647`] - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`3` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] +=== internal_rpc_request_timeout_ms ifndef::env-cloud[] -| Visibility -| User +*Introduced in v26.1.1-rc2* endif::[] -|=== - - -=== join_retry_timeout_ms - -Time between cluster join retries in milliseconds. +Default timeout for RPC requests between Redpanda nodes. [cols="1s,2a"] |=== @@ -8438,7 +8846,7 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`5000` (5 seconds) +`10000` (10 seconds) endif::[] | Nullable @@ -8448,7 +8856,7 @@ endif::[] | Milliseconds | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] @@ -8463,48 +8871,11 @@ endif::[] |=== -// tag::deprecated[] -=== kafka_admin_topic_api_rate - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] +=== internal_topic_replication_factor -=== kafka_batch_max_bytes +Target replication factor for internal topics. -The default maximum batch size for topics if the topic property xref:reference:properties/topic-properties.adoc[`message.max.bytes`] is not set. If the batch is compressed, the limit applies to the compressed batch size. +*Unit*: number of replicas per topic. [cols="1s,2a"] |=== @@ -8515,8 +8886,8 @@ The default maximum batch size for topics if the topic property xref:reference:p -| Maximum -| `4294967295` +| Range +| [`-2147483648`, `2147483647`] | Default | @@ -8524,17 +8895,14 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`1048576` +`3` endif::[] | Nullable | No -| Unit -| Bytes - | Requires restart -| No +| Yes ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] @@ -8543,42 +8911,43 @@ endif::[] ifndef::env-cloud[] | Visibility -| Tunable +| User endif::[] -| Related topics -|xref:reference:properties/topic-properties.adoc[`message.max.bytes`] - |=== -// tag::deprecated[] -=== kafka_client_group_byte_rate_quota - -No description available. +=== join_retry_timeout_ms +Time between cluster join retries in milliseconds. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `integer` +| Range +| [`-17592186044416`, `17592186044415`] + | Default | ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`5000` (5 seconds) endif::[] | Nullable | No +| Unit +| Milliseconds + | Requires restart | Yes @@ -8587,48 +8956,63 @@ ifndef::env-cloud[] | Yes endif::[] -|=== +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] -// end::deprecated[] +|=== -// tag::deprecated[] -=== kafka_client_group_fetch_byte_rate_quota -No description available. +=== kafka_batch_max_bytes +The default maximum batch size for topics if the topic property xref:reference:properties/topic-properties.adoc[`message.max.bytes`] is not set. If the batch is compressed, the limit applies to the compressed batch size. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `integer` +| Maximum +| `4294967295` + | Default | ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`1048576` endif::[] | Nullable | No +| Unit +| Bytes + | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +| Related topics +|xref:reference:properties/topic-properties.adoc[`message.max.bytes`] + |=== -// end::deprecated[] === kafka_connection_rate_limit @@ -8984,7 +9368,7 @@ ifndef::env-cloud[] *Introduced in v25.3.7* endif::[] -Target duration for a single fetch request. The broker tries to complete each fetch within this duration, even if fewer bytes are available than requested. +Broker-side target for the duration of a single fetch request. The broker will try to complete fetches within the specified duration, even if it means returning less bytes in the fetch than are available. [cols="1s,2a"] |=== @@ -9167,45 +9551,6 @@ endif::[] |=== -// tag::deprecated[] -=== kafka_memory_batch_size_estimate_for_fetch - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === kafka_memory_share_for_fetch The share of Kafka subsystem memory that can be used for fetch read buffers, as a fraction of the Kafka subsystem memory amount. @@ -9806,187 +10151,21 @@ endif::[] |=== -=== kafka_qdc_window_size_ms - -Window size for Kafka queue depth control latency tracking. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Range -| [`-17592186044416`, `17592186044415`] - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`1500` (1500 milliseconds) -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -|=== - - -// tag::deprecated[] -=== kafka_quota_balancer_min_shard_throughput_bps - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Bytes per second - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== kafka_quota_balancer_min_shard_throughput_ratio - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== kafka_quota_balancer_node_period - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== kafka_quota_balancer_window - -No description available. +=== kafka_qdc_window_size_ms +Window size for Kafka queue depth control latency tracking. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `integer` + +| Range +| [`-17592186044416`, `17592186044415`] | Default | @@ -9994,12 +10173,15 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`1500` (1500 milliseconds) endif::[] | Nullable | No +| Unit +| Milliseconds + | Requires restart | Yes @@ -10008,9 +10190,13 @@ ifndef::env-cloud[] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + |=== -// end::deprecated[] === kafka_request_max_bytes @@ -10687,45 +10873,6 @@ endif::[] |=== -// tag::deprecated[] -=== kafka_throughput_throttling_v2 - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === kafka_topics_max Maximum number of Kafka user topics that can be created. If `null`, then no limit is enforced. @@ -10901,19 +11048,25 @@ endif::[] |=== -// tag::deprecated[] === leader_balancer_mode -No description available. - +Mode of the leader balancer optimization strategy. `calibrated` uses a heuristic that balances leaders based on replica counts per shard. `random` randomly moves leaders to reduce load on heavily-loaded shards. Legacy values `greedy_balanced_shards` and `random_hill_climbing` are treated as `calibrated`. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `string` (enum) +| Accepted values +| +ifndef::env-cloud[] +`calibrated`, `random` +endif::[] +ifdef::env-cloud[] +`calibrated`, `random` +endif::[] | Default @@ -10922,23 +11075,31 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`calibrated` endif::[] | Nullable | No | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| User +endif::[] + +| Example +| +`model::leader_balancer_mode_to_string( model::leader_balancer_mode::calibrated)` + |=== -// end::deprecated[] === leader_balancer_mute_timeout @@ -11258,18 +11419,16 @@ endif::[] |=== -// tag::deprecated[] -=== log_compaction_adjacent_merge_self_compaction_count - -No description available. +=== log_compaction_disable_tx_batch_removal +Prevents log compaction from removing transaction metadata. Only set this to `true` if you experience stability issues related to transaction cleanup during compaction. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `string` @@ -11285,31 +11444,34 @@ endif::[] | Nullable | No -| Requires restart -| Yes - ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| User +endif::[] + |=== -// end::deprecated[] -// tag::deprecated[] -=== log_compaction_disable_tx_batch_removal +=== log_compaction_interval_ms -Prevents log compaction from removing transaction metadata. Only set this to `true` if you experience stability issues related to transaction cleanup during compaction. +How often to trigger background compaction. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `integer` + +| Range +| [`-17592186044416`, `17592186044415`] | Default | @@ -11317,27 +11479,38 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`10000` (10 seconds) endif::[] | Nullable | No +| Unit +| Milliseconds + | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| User +endif::[] + |=== -// end::deprecated[] -=== log_compaction_interval_ms +=== log_compaction_max_priority_wait_ms -How often to trigger background compaction. +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Maximum time a priority partition (for example, __consumer_offsets) can wait for compaction before preempting regular compaction. [cols="1s,2a"] |=== @@ -11357,7 +11530,7 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`10000` (10 seconds) +`3600000` (1 hour) endif::[] | Nullable @@ -11376,7 +11549,7 @@ endif::[] ifndef::env-cloud[] | Visibility -| User +| Tunable endif::[] |=== @@ -11581,7 +11754,7 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`false` +`true` endif::[] | Nullable @@ -11795,90 +11968,6 @@ endif::[] |=== -// tag::deprecated[] -=== log_message_timestamp_alert_after_ms - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== log_message_timestamp_alert_before_ms - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === log_message_timestamp_before_max_ms The maximum allowed time difference when a record's timestamp is in the past compared to the broker's clock. For topics using <> timestamps, Redpanda rejects records with timestamps that are too far in the past. This property has no effect on topics using <> timestamps. The topic property xref:./topic-properties.adoc#messagetimestampbeforemaxms[`message.timestamp.before.max.ms`] overrides this cluster-level setting. @@ -12707,99 +12796,60 @@ endif::[] ifndef::env-cloud[] | Visibility -| Tunable -endif::[] - -|=== - - -=== max_transactions_per_coordinator - -Specifies the maximum number of active transaction sessions per coordinator. When the threshold is passed Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, it leads to its batches being rejected with invalid producer epoch or invalid_producer_id_mapping error (depends on the transaction execution phase). - -For details, see xref:develop:transactions#transaction-usage-tips[Transaction usage tips]. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Maximum -| `18446744073709552000` - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`Maximum value` -endif::[] - -| Nullable -| No - -| Requires restart -| No - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -| Related topics -|xref:develop:transactions#transaction-usage-tips[Transaction usage tips] +| Tunable +endif::[] |=== -// tag::deprecated[] -=== max_version +=== max_transactions_per_coordinator -No description available. +Specifies the maximum number of active transaction sessions per coordinator. When the threshold is passed Redpanda terminates old sessions. When an idle producer corresponding to the terminated session wakes up and produces, it leads to its batches being rejected with invalid producer epoch or invalid_producer_id_mapping error (depends on the transaction execution phase). +For details, see xref:develop:transactions#transaction-usage-tips[Transaction usage tips]. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `integer` +| Maximum +| `18446744073709552000` + | Default | ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`Maximum value` endif::[] | Nullable | No | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| Tunable +endif::[] + +| Related topics +|xref:develop:transactions#transaction-usage-tips[Transaction usage tips] + |=== -// end::deprecated[] === members_backend_retry_ms @@ -13031,53 +13081,6 @@ endif::[] |=== -=== metadata_status_wait_timeout_ms - -Maximum time to wait in metadata request for cluster health to be refreshed. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Range -| [`-17592186044416`, `17592186044415`] - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`2000` (2 seconds) -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -|=== - - === metrics_reporter_report_interval Cluster metrics reporter report interval. @@ -13302,60 +13305,66 @@ endif::[] |=== -// tag::deprecated[] -=== min_version - -No description available. +=== minimum_topic_replication +Minimum allowable replication factor for topics in this cluster. The set value must be positive, odd, and equal to or less than the number of available brokers. Changing this parameter only restricts newly-created topics. Redpanda returns an `INVALID_REPLICATION_FACTOR` error on any attempt to create a topic with a replication factor less than this property. If you change the `minimum_topic_replications` setting, the replication factor of existing topics remains unchanged. However, Redpanda will log a warning on start-up with a list of any topics that have fewer replicas than this minimum. For example, you might see a message such as `Topic X has a replication factor less than specified minimum: 1 < 3`. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `integer` +| Range +| [`-32768`, `32767`] + | Default | ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`1` endif::[] | Nullable | No | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| User +endif::[] + |=== -// end::deprecated[] -=== minimum_topic_replication +=== nested_group_behavior -Minimum allowable replication factor for topics in this cluster. The set value must be positive, odd, and equal to or less than the number of available brokers. Changing this parameter only restricts newly-created topics. Redpanda returns an `INVALID_REPLICATION_FACTOR` error on any attempt to create a topic with a replication factor less than this property. If you change the `minimum_topic_replications` setting, the replication factor of existing topics remains unchanged. However, Redpanda will log a warning on start-up with a list of any topics that have fewer replicas than this minimum. For example, you might see a message such as `Topic X has a replication factor less than specified minimum: 1 < 3`. +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +Behavior for handling nested groups when extracting groups from authentication tokens. Two options are available - none and suffix. With none, the group is left alone (e.g. '/group/child/grandchild'). Suffix will extract the final component from the nested group (e.g. '/group' -> 'group' and '/group/child/grandchild' -> 'grandchild'). [cols="1s,2a"] |=== | Property | Value | Type -| `integer` - +| `string` -| Range -| [`-32768`, `32767`] | Default | @@ -13363,7 +13372,7 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`1` +`none` endif::[] | Nullable @@ -13385,21 +13394,26 @@ endif::[] |=== -=== node_isolation_heartbeat_timeout +=== nested_group_behavior -How long after the last heartbeat request a node will wait before considering itself to be isolated. +Behavior for handling nested groups when extracting groups from authentication tokens. With `none`, the group is left alone (for example, `/group/child/grandchild`). With `suffix`, Redpanda extracts the final component from the nested group (for example, `/group/child/grandchild` becomes `grandchild`). [cols="1s,2a"] |=== | Property | Value | Type -| `integer` - +| `string` (enum) +| Accepted values +| +ifndef::env-cloud[] +`none`, `suffix` +endif::[] +ifdef::env-cloud[] +`none`, `suffix` +endif::[] -| Range -| [`-9223372036854776000`, `9223372036854776000`] | Default | @@ -13407,7 +13421,7 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`3000` +`none` endif::[] | Nullable @@ -13423,15 +13437,18 @@ endif::[] ifndef::env-cloud[] | Visibility -| Tunable +| User endif::[] +| Related topics +| +* xref:manage:security/authorization/gbac.adoc#customize-token-claim-extraction[Group-Based Access Control (GBAC) - Customize token claim extraction] |=== -=== node_management_operation_timeout_ms +=== node_isolation_heartbeat_timeout -Timeout for executing node management operations. +How long after the last heartbeat request a node will wait before considering itself to be isolated. [cols="1s,2a"] |=== @@ -13443,7 +13460,7 @@ Timeout for executing node management operations. | Range -| [`-17592186044416`, `17592186044415`] +| [`-9223372036854776000`, `9223372036854776000`] | Default | @@ -13451,17 +13468,14 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`5000` (5 seconds) +`3000` endif::[] | Nullable | No -| Unit -| Milliseconds - | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] @@ -13654,6 +13668,96 @@ endif::[] // end::redpanda-cloud[] +=== oidc_group_claim_path + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +JSON path to extract groups from the JWT payload. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `string` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`$.groups` +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| User +endif::[] + +|=== + + +=== oidc_group_claim_path + +A https://goessner.net/articles/JsonPath/[JSON path^] expression that tells Redpanda where to find group information in the OIDC token payload. The path must point to a claim containing group names as either a JSON array or a comma-separated string. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `string` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`$.groups` +endif::[] + +| Nullable +| No + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| User +endif::[] + +| Related topics +| +* xref:manage:security/authorization/gbac.adoc#customize-token-claim-extraction[Group-Based Access Control (GBAC) - Customize token claim extraction] + +|=== + + === oidc_keys_refresh_interval The frequency of refreshing the JSON Web Keys (JWKS) used to validate access tokens. @@ -14011,7 +14115,56 @@ endif::[] // tag::deprecated[] === partition_autobalancing_movement_batch_size_bytes -Total size of partitions that autobalancer is going to move in one batch (deprecated, use partition_autobalancing_concurrent_moves to limit the autobalancer concurrency) +Total size of partitions that autobalancer is going to move in one batch (deprecated, use partition_autobalancing_concurrent_moves to limit the autobalancer concurrency) + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `integer` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`5368709120` +endif::[] + +| Nullable +| No + +| Unit +| Bytes + +| Requires restart +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +ifndef::env-cloud[] +| Visibility +| Deprecated +endif::[] + +|=== + +// end::deprecated[] + +=== partition_autobalancing_node_autodecommission_timeout_sec + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +When a node is unavailable for at least this timeout duration, it triggers Redpanda to decommission the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`. [cols="1s,2a"] |=== @@ -14022,20 +14175,23 @@ Total size of partitions that autobalancer is going to move in one batch (deprec +| Range +| [`-17179869184`, `17179869183`] + | Default | ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`5368709120` +`null` endif::[] | Nullable -| No +| Yes | Unit -| Bytes +| Seconds | Requires restart | No @@ -14047,16 +14203,15 @@ endif::[] ifndef::env-cloud[] | Visibility -| Deprecated +| User endif::[] |=== -// end::deprecated[] === partition_autobalancing_node_availability_timeout_sec -When a node is unavailable for at least this timeout duration, it triggers Redpanda to move partitions off of the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`. +When a node is unavailable for at least this timeout duration, it triggers Redpanda to move partitions off of the node. This property applies only when `partition_autobalancing_mode` is set to `continuous`. [cols="1s,2a"] |=== @@ -14812,45 +14967,6 @@ endif::[] |=== -// tag::deprecated[] -=== raft_max_concurrent_append_requests_per_follower - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === raft_max_inflight_follower_append_entries_requests_per_shard The maximum number of append entry requests that may be sent from Raft groups on a Seastar shard to the current node, and are awaiting a reply. This property replaces `raft_max_concurrent_append_requests_per_follower`. @@ -14978,45 +15094,6 @@ endif::[] |=== -// tag::deprecated[] -=== raft_recovery_default_read_size - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === raft_recovery_throttle_disable_dynamic_mode include::reference:partial$internal-use-property.adoc[] @@ -15631,53 +15708,6 @@ endif::[] |=== -=== recovery_append_timeout_ms - -Timeout for append entry requests issued while updating a stale follower. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Range -| [`-17592186044416`, `17592186044415`] - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`5000` (5 seconds) -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -|=== - - === release_cache_on_segment_roll Flag for specifying whether or not to release cache when a full segment is rolled. @@ -16192,92 +16222,6 @@ endif::[] |=== -=== rm_sync_timeout_ms - -Resource manager's synchronization timeout. Specifies the maximum time for this node to wait for the internal state machine to catch up with all events written by previous leaders before rejecting a request. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Range -| [`-17592186044416`, `17592186044415`] - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`10000` (10 seconds) -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| No - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| User -endif::[] - -|=== - - -// tag::deprecated[] -=== rm_violation_recovery_policy - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === rpc_client_connections_per_peer The maximum number of connections a broker will open to each of its peers. @@ -17107,12 +17051,55 @@ ifndef::env-cloud[] endif::[] ifndef::env-cloud[] -| Aliases -| `schema_registry_normalize_on_startup` +| Aliases +| `schema_registry_normalize_on_startup` +endif::[] + +|=== + + +// tag::deprecated[] +=== schema_registry_avro_use_named_references + +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] + +No description available. + + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `object` + + + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`null` +endif::[] + +| Nullable +| No + +| Requires restart +| Yes + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes endif::[] |=== +// end::deprecated[] === schema_registry_avro_use_named_references @@ -17213,18 +17200,20 @@ endif::[] // end::redpanda-cloud[] -// tag::deprecated[] -=== schema_registry_protobuf_renderer_v2 +=== schema_registry_enable_qualified_subjects -No description available. +ifndef::env-cloud[] +*Introduced in v26.1.1-rc2* +endif::[] +Enable parsing of qualified subject syntax (:.context:subject). When false, subjects are treated literally, as subjects in the default context. When true, qualified syntax is parsed to extract context and subject. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `boolean` @@ -17234,7 +17223,7 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`false` endif::[] | Nullable @@ -17248,51 +17237,13 @@ ifndef::env-cloud[] | Yes endif::[] -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== seed_server_meta_topic_partitions - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Number of partitions per topic - -| Requires restart -| Yes - ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes +| Visibility +| Tunable endif::[] |=== -// end::deprecated[] === segment_appender_flush_timeout_ms @@ -17386,45 +17337,6 @@ endif::[] |=== -// tag::deprecated[] -=== seq_table_min_size - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === space_management_enable Option to explicitly disable automatic disk space management. If this property was explicitly disabled while using v23.2, it will remain disabled following an upgrade. @@ -18315,84 +18227,6 @@ endif::[] |=== -// tag::deprecated[] -=== target_fetch_quota_byte_rate - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== target_quota_byte_rate - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === tls_certificate_name_format The format of the certificates's distinguished name to use for mTLS principal mapping. The `legacy` format would appear as 'C=US,ST=California,L=San Francisco,O=Redpanda,CN=redpanda', while the `rfc2253` format would appear as 'CN=redpanda,O=Redpanda,L=San Francisco,ST=California,C=US'. @@ -18417,108 +18251,17 @@ endif::[] | Default | ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`legacy` -endif::[] - -| Nullable -| No - -| Requires restart -| No - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| User -endif::[] - -|=== - - -=== tls_enable_renegotiation - -TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `boolean` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`false` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -|=== - - -// tag::redpanda-cloud[] -=== tls_min_version - -The minimum TLS version that Redpanda clusters support. This property prevents client applications from negotiating a downgrade to the TLS version when they make a connection to a Redpanda cluster. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `string` (enum) - -| Accepted values -| -ifndef::env-cloud[] -`v1.0`, `v1.1`, `v1.2`, `v1.3` -endif::[] -ifdef::env-cloud[] -`v1.0`, `v1.1`, `v1.2`, `v1.3` -endif::[] - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console (read-only) +Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`v1.2` +`legacy` endif::[] | Nullable | No | Requires restart -| Yes +| No ifndef::env-cloud[] | Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] @@ -18532,18 +18275,17 @@ endif::[] |=== -// end::redpanda-cloud[] -=== tls_v1_2_cipher_suites +=== tls_enable_renegotiation -The encryption algorithms available for TLS 1.2 client connections, specified as a colon-separated list in OpenSSL format. Use this to support older clients that require specific encryption methods. +TLS client-initiated renegotiation is considered unsafe and is disabled by default . Only re-enable it if you are experiencing issues with your TLS-enabled client. This option has no effect on TLSv1.3 connections as client-initiated renegotiation was removed. [cols="1s,2a"] |=== | Property | Value | Type -| `string` +| `boolean` @@ -18553,7 +18295,7 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:AES256-GCM-SHA384:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:AES128-SHA:AES128-CCM:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:AES256-SHA:AES256-CCM` +`false` endif::[] | Nullable @@ -18569,32 +18311,41 @@ endif::[] ifndef::env-cloud[] | Visibility -| User +| Tunable endif::[] |=== -=== tls_v1_3_cipher_suites +// tag::redpanda-cloud[] +=== tls_min_version -The encryption algorithms available for TLS 1.3 client connections, specified as a colon-separated list in OpenSSL format. Most deployments don't need to change this. Only modify it to meet specific organizational security requirements. +The minimum TLS version that Redpanda clusters support. This property prevents client applications from negotiating a downgrade to the TLS version when they make a connection to a Redpanda cluster. [cols="1s,2a"] |=== | Property | Value | Type -| `string` +| `string` (enum) +| Accepted values +| +ifndef::env-cloud[] +`v1.0`, `v1.1`, `v1.2`, `v1.3` +endif::[] +ifdef::env-cloud[] +`v1.0`, `v1.1`, `v1.2`, `v1.3` +endif::[] | Default | ifdef::env-cloud[] -Available in the Redpanda Cloud Console +Available in the Redpanda Cloud Console (read-only) endif::[] ifndef::env-cloud[] -`TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256` +`v1.2` endif::[] | Nullable @@ -18615,22 +18366,20 @@ endif::[] |=== +// end::redpanda-cloud[] -=== tm_sync_timeout_ms +=== tls_v1_2_cipher_suites -Transaction manager's synchronization timeout. Maximum time to wait for internal state machine to catch up before rejecting a request. +The encryption algorithms available for TLS 1.2 client connections, specified as a colon-separated list in OpenSSL format. Use this to support older clients that require specific encryption methods. [cols="1s,2a"] |=== | Property | Value | Type -| `integer` - +| `string` -| Range -| [`-17592186044416`, `17592186044415`] | Default | @@ -18638,15 +18387,12 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`10000` (10 seconds) +`TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256:TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256` endif::[] | Nullable | No -| Unit -| Milliseconds - | Requires restart | Yes @@ -18663,18 +18409,16 @@ endif::[] |=== -// tag::deprecated[] -=== tm_violation_recovery_policy - -No description available. +=== tls_v1_3_cipher_suites +The encryption algorithms available for TLS 1.3 client connections, specified as a colon-separated list in OpenSSL format. Most deployments don't need to change this. Only modify it to meet specific organizational security requirements. [cols="1s,2a"] |=== | Property | Value | Type -| `deprecated_property` +| `string` @@ -18684,7 +18428,7 @@ ifdef::env-cloud[] Available in the Redpanda Cloud Console endif::[] ifndef::env-cloud[] -`null` +`TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256` endif::[] | Nullable @@ -18698,9 +18442,13 @@ ifndef::env-cloud[] | Yes endif::[] +ifndef::env-cloud[] +| Visibility +| User +endif::[] + |=== -// end::deprecated[] === tombstone_retention_ms @@ -19213,45 +18961,6 @@ endif::[] |=== -// tag::deprecated[] -=== transaction_coordinator_replication - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === transaction_max_timeout_ms The maximum allowed timeout for transactions. If a client-requested transaction timeout exceeds this configuration, the broker returns an error during transactional producer initialization. This guardrail prevents hanging transactions from blocking consumer progress. @@ -19400,87 +19109,6 @@ endif::[] // end::deprecated[] -// tag::deprecated[] -=== tx_registry_log_capacity - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - -// tag::deprecated[] -=== tx_registry_sync_timeout_ms - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === tx_timeout_delay_ms Delay before scheduling the next check for timed out transactions. @@ -19839,45 +19467,6 @@ endif::[] |=== -// tag::deprecated[] -=== use_scheduling_groups - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === virtual_cluster_min_producer_ids Minimum number of active producers per virtual cluster. @@ -19922,53 +19511,6 @@ endif::[] |=== -=== wait_for_leader_timeout_ms - -Timeout to wait for leadership in metadata cache. - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `integer` - - - -| Range -| [`-17592186044416`, `17592186044415`] - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`5000` (5 seconds) -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| No - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -ifndef::env-cloud[] -| Visibility -| Tunable -endif::[] - -|=== - - === write_caching_default The default write caching mode to apply to user topics. Write caching acknowledges a message as soon as it is received and acknowledged on a majority of brokers, without waiting for it to be written to disk. With `acks=all`, this provides lower latency while still ensuring that a majority of brokers acknowledge the write. @@ -20027,7 +19569,7 @@ endif::[] | * xref:reference:properties/topic-properties.adoc#writecaching[`write.caching`] -* xref:develop:config-topics.adoc#configure-write-caching[Write caching] +* xref:develop:manage-topics/config-topics.adoc#configure-write-caching[Write caching] |=== diff --git a/modules/reference/partials/properties/object-storage-properties.adoc b/modules/reference/partials/properties/object-storage-properties.adoc index a37097649d..449be4b6da 100644 --- a/modules/reference/partials/properties/object-storage-properties.adoc +++ b/modules/reference/partials/properties/object-storage-properties.adoc @@ -1739,45 +1739,6 @@ endif::[] |=== -// tag::deprecated[] -=== cloud_storage_disable_metadata_consistency_checks - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === cloud_storage_disable_read_replica_loop_for_tests Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production. @@ -3820,48 +3781,6 @@ endif::[] |=== -// tag::deprecated[] -=== cloud_storage_reconciliation_ms - -No description available. - - -[cols="1s,2a"] -|=== -| Property | Value - -| Type -| `deprecated_property` - - - -| Default -| -ifdef::env-cloud[] -Available in the Redpanda Cloud Console -endif::[] -ifndef::env-cloud[] -`null` -endif::[] - -| Nullable -| No - -| Unit -| Milliseconds - -| Requires restart -| Yes - -ifndef::env-cloud[] -| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] -| Yes -endif::[] - -|=== - -// end::deprecated[] - === cloud_storage_recovery_temporary_retention_bytes_default Retention in bytes for topics created during automated recovery. diff --git a/modules/reference/partials/properties/topic-properties.adoc b/modules/reference/partials/properties/topic-properties.adoc index 1463d30bfd..ad35e26c9b 100644 --- a/modules/reference/partials/properties/topic-properties.adoc +++ b/modules/reference/partials/properties/topic-properties.adoc @@ -948,8 +948,7 @@ endif::[] // tag::exclude-from-docs[] === redpanda.cloud_topic.enabled -No description available. - +Configuration property: redpanda.cloud_topic.enabled [cols="1s,2a"] |=== @@ -1270,6 +1269,11 @@ The preferred location (rack) for partition leaders of a topic. This property inherits the value from the config_ref:default_leaders_preference,true,properties/cluster-properties[] cluster configuration property. You may override the cluster-wide setting by specifying the value for individual topics. If the cluster configuration property config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, Leader Pinning is disabled across the cluster. +Accepted values: + +* `none`: Disable Leader Pinning for the topic. +* `racks:[,,...]`: Pin leaders to the specified racks with equal priority. Redpanda distributes partition leaders across brokers in the listed racks, with no preference ordering. +* `ordered_racks:[,,...]`: Pin leaders to the specified racks in priority order. Redpanda places leaders in the first listed rack when available, failing over to subsequent racks when higher-priority racks are unavailable. Requires Redpanda version 26.1 or later. [cols="1s,2a"] |=== @@ -1278,7 +1282,8 @@ If the cluster configuration property config_ref:enable_rack_awareness,true,prop | Type | `object` - +| Accepted Values +| `none`, `racks:[,,...]`, `ordered_racks:[,,...]` | Corresponding cluster property | xref:reference:cluster-properties.adoc#default_leaders_preference[default_leaders_preference] @@ -1550,6 +1555,66 @@ endif::[] // end::category-tiered-storage[] +// tag::category-tiered-storage[] +=== redpanda.storage.mode + +The storage mode for a topic. Determines how topic data is stored and whether it is eligible for upload to object storage. + +Accepted values: + +* `local`: Topic data is stored only on the broker's local disk. Object storage upload is disabled for the topic, regardless of cluster-level Tiered Storage settings. +* `tiered`: Topic data is stored on local disk and also uploaded to object storage. Enables xref:manage:tiered-storage.adoc[Tiered Storage] for the topic. +* `cloud`: Topic data is stored in object storage using the glossterm:Cloud Topic[,Cloud Topics] architecture. Local storage is used only as a write buffer. +* `unset`: Specifies that the topic's storage mode is unset, regardless of the cluster default. The topic may still have Tiered Storage enabled through the legacy properties `redpanda.remote.read` and `redpanda.remote.write`. + +This property overrides the cluster-wide config_ref:default_redpanda_storage_mode,true,properties/cluster-properties[] setting for individual topics. + +[cols="1s,2a"] +|=== +| Property | Value + +| Type +| `string` (enum) + +| Accepted Values +| +ifndef::env-cloud[] +`local`, `tiered`, `cloud`, `unset` +endif::[] +ifdef::env-cloud[] +`local`, `tiered`, `cloud`, `unset` +endif::[] + + +| Corresponding cluster property +| xref:reference:cluster-properties.adoc#default_redpanda_storage_mode[default_redpanda_storage_mode] + +| Default +| +ifdef::env-cloud[] +Available in the Redpanda Cloud Console +endif::[] +ifndef::env-cloud[] +`unset` +endif::[] + +| Nullable +| No + +ifndef::env-cloud[] +| Restored on xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore] +| Yes +endif::[] + +| Related topics +| +* xref:manage:tiered-storage.adoc[Tiered Storage] +* xref:develop:manage-topics/cloud-topics.adoc[Manage Cloud Topics] + +|=== + +// end::category-tiered-storage[] + // tag::category-schema-registry[] === redpanda.value.schema.id.validation @@ -1710,9 +1775,9 @@ endif::[] * xref:reference:properties/cluster-properties.adoc#default_topic_replication[`default_topic_replication`] -* xref:develop:config-topics.adoc#choose-the-replication-factor[Choose the replication factor] +* xref:develop:manage-topics/config-topics.adoc#choose-the-replication-factor[Choose the replication factor] -* xref:develop:config-topics.adoc#change-the-replication-factor[Change the replication factor] +* xref:develop:manage-topics/config-topics.adoc#change-the-replication-factor[Change the replication factor] * xref:reference:properties/cluster-properties.adoc#default_topic_replication[default_topic_replication] @@ -2106,7 +2171,7 @@ endif::[] | Related topics | -* xref:develop:config-topics.adoc#configure-write-caching[Write caching] +* xref:develop:manage-topics/config-topics.adoc#configure-write-caching[Write caching] * xref:manage:tiered-storage.adoc[Tiered Storage] diff --git a/modules/reference/partials/properties/topic-property-mappings.adoc b/modules/reference/partials/properties/topic-property-mappings.adoc index 238d37a826..6843535cc9 100644 --- a/modules/reference/partials/properties/topic-property-mappings.adoc +++ b/modules/reference/partials/properties/topic-property-mappings.adoc @@ -63,6 +63,9 @@ | <> | xref:./object-storage-properties.adoc#cloud_storage_enable_remote_write[`cloud_storage_enable_remote_write`] +| <> +| xref:./cluster-properties.adoc#default_redpanda_storage_mode[`default_redpanda_storage_mode`] + | <> | xref:./cluster-properties.adoc#retention_bytes[`retention_bytes`] diff --git a/modules/shared/partials/tristate-behavior-change-25-3.adoc b/modules/shared/partials/tristate-behavior-change-25-3.adoc index e79d95b59a..cce6a25c22 100644 --- a/modules/shared/partials/tristate-behavior-change-25-3.adoc +++ b/modules/shared/partials/tristate-behavior-change-25-3.adoc @@ -1,5 +1,5 @@ Starting in Redpanda v25.3, several topic properties support enhanced tristate behavior. Properties like `retention.ms`, `retention.bytes`, `segment.ms`, and others now distinguish between zero values (immediate eligibility for cleanup/compaction) and negative values (disable the feature entirely). Previously, zero and negative values were treated the same way. ifndef::env-cloud[] -For the complete list of affected properties and detailed information, see xref:25.3@get-started:release-notes/redpanda.adoc#behavior-changes[Redpanda v25.3 behavior changes]. +For the complete list of affected properties and detailed information, see xref:get-started:release-notes/redpanda.adoc#behavior-changes[Redpanda v26.1 behavior changes]. endif::[] Review your topic configurations if you currently use zero values for these properties. \ No newline at end of file diff --git a/modules/upgrade/pages/k-compatibility.adoc b/modules/upgrade/pages/k-compatibility.adoc index b5c000c31c..4f755cfd26 100644 --- a/modules/upgrade/pages/k-compatibility.adoc +++ b/modules/upgrade/pages/k-compatibility.adoc @@ -19,7 +19,7 @@ Starting from version 25.1.1, the Redpanda Operator and Redpanda Helm chart foll NOTE: If a version includes `-beta`, it is a pre-release version of the Redpanda Operator and Helm chart. These versions are not supported and should not be used in production environments. Beta versions are available only for testing and feedback. To give feedback on beta releases, reach out to the Redpanda team in https://redpanda.com/slack[Redpanda Community Slack^]. -Each Redpanda Operator and Helm chart version supports the corresponding Redpanda core version plus one minor version above and one below. This approach ensures flexibility during upgrades. For example, Redpanda Operator version 25.1.1 supports Redpanda core versions 25.2.x, 25.1.x, and 24.3.x. +Each Redpanda Operator and Helm chart version supports the corresponding Redpanda core version plus one minor version above and one below. This approach ensures flexibility during upgrades. For example, Redpanda Operator version 25.3.1 supports Redpanda core versions 26.1.x, 25.3.x, and 25.2.x. Redpanda Operator and Helm chart versions are supported only while their associated Redpanda core version remains supported. If the core version reaches end of life (EoL), the corresponding versions of the Redpanda Operator and Helm chart also reach EoL. @@ -37,7 +37,30 @@ Redpanda Core has no direct dependency on Kubernetes. Compatibility is influence |=== |Redpanda Core / `rpk` |Helm Chart |Operator Helm Chart |Operator |Helm CLI |Kubernetes -.2+|25.3.x +.2+|26.1.x + +|26.1.x +|26.1.x +|26.1.x +|3.12+ +// d (default) on Kubernetes cells is required to render footnotes at the +// bottom of the page rather than inside the table cell. +// See https://github.com/asciidoctor/asciidoctor/issues/2350#issuecomment-546841684 +d|1.32.x - 1.35.x{fn-k8s-compatibility} + +|25.3.x +|25.3.x +|25.3.x +|3.12+ +d|1.30.x - 1.33.x{fn-k8s-compatibility} + +.3+|25.3.x + +|26.1.x +|26.1.x +|26.1.x +|3.12+ +d|1.30.x - 1.33.x{fn-k8s-compatibility} |25.3.x |25.3.x @@ -70,28 +93,6 @@ d|1.30.x - 1.33.x{fn-k8s-compatibility} |25.1.x |3.12+ d|1.28.x - 1.33.x{fn-k8s-compatibility} - -.3+|25.1.x -|25.1.x -|25.1.x -|25.1.x -|3.12+ -// d (default) here is required to get footnotes to appear at the bottom of the page -// instead of inside the table cell. -// See https://github.com/asciidoctor/asciidoctor/issues/2350#issuecomment-546841684 -d|1.28.x - 1.32.x{fn-k8s-compatibility} - -|5.10.x -|2.4.x -|2.4.x -|3.12+ -d|1.28.x - 1.32.x{fn-k8s-compatibility} - -|5.9.x -|0.4.36 -|2.3.x -|3.12+ -d|1.28.x - 1.32.x{fn-k8s-compatibility} |=== By default, the Redpanda Helm chart depends on cert-manager for enabling TLS. @@ -111,12 +112,8 @@ Upgrading the Helm chart may also upgrade Redpanda Console. Because of this buil |Redpanda Console |Helm Chart |Operator |v3.x.x -|25.3.x, 25.2.x, 25.1.x -|25.3.x, 25.2.x - -|v2.x.x -|5.10.1, 5.9.x, 5.8.x -|25.3.x, 25.2.x, 25.1.x, 2.4.x, 2.3.x, 2.2.x +|26.1.x, 25.3.x, 25.2.x +|26.1.x, 25.3.x, 25.2.x |=== diff --git a/modules/upgrade/partials/incompat-changes.adoc b/modules/upgrade/partials/incompat-changes.adoc index 9610dfe846..ba16ec587a 100644 --- a/modules/upgrade/partials/incompat-changes.adoc +++ b/modules/upgrade/partials/incompat-changes.adoc @@ -1,5 +1,8 @@ === Review incompatible changes +* *Breaking changes in Redpanda 26.1*: +** If FIPS mode is enabled, change any SASL/SCRAM user passwords shorter than 14 characters to at least 14 characters before upgrading. FIPS 140-3 enforces stricter HMAC key size requirements than FIPS 140-2. Because Redpanda stores passwords in encrypted form, it cannot check the length of existing passwords. Clients with passwords shorter than 14 characters will fail to authenticate after the upgrade. See xref:manage:security/fips-compliance.adoc[Configure Redpanda for FIPS]. + * *Breaking changes in Redpanda 25.3*: ** {empty} +