Skip to content

Commit

Permalink
tests: add AdminAPI constraints pass/fail checks
Browse files Browse the repository at this point in the history
Signed-off-by: NyaliaLui <nyalia@redpanda.com>
  • Loading branch information
NyaliaLui committed Oct 16, 2023
1 parent e58409d commit e4430e0
Showing 1 changed file with 151 additions and 2 deletions.
153 changes: 151 additions & 2 deletions tests/rptest/tests/cluster_config_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,7 @@ def test_valid_settings(self):
# using the cluster
exclude_settings = {
'enable_sasl', 'kafka_enable_authorization',
'kafka_mtls_principal_mapping_rules', 'constraints'
'kafka_mtls_principal_mapping_rules'
}

# Don't enable coproc: it generates log errors if its companion service isn't running
Expand Down Expand Up @@ -1869,7 +1869,10 @@ class ConfigConstraintsTest(RedpandaTest):
RETENTION_MS = 86400000 # 1 day
RETENTION_MS_MIN = RETENTION_MS // 2
RETENTION_MS_MAX = RETENTION_MS * 2
topics = [TopicSpec(retention_ms=RETENTION_MS)]
topics = [
TopicSpec(compression_type=TopicSpec.COMPRESSION_PRODUCER,
retention_ms=RETENTION_MS)
]

LOG_LIFETIME_CONSTRAINT = {
'name': 'log_retention_ms',
Expand All @@ -1882,6 +1885,11 @@ class ConfigConstraintsTest(RedpandaTest):
'type': 'restrict',
'enabled': True
}
LOG_COMPRESSION_CONSTRAINT = {
'name': 'log_compression_type',
'type': 'clamp',
'enabled': True
}

def __init__(self, *args, **kwargs):
super(ConfigConstraintsTest, self).__init__(extra_rp_conf={
Expand All @@ -1891,6 +1899,147 @@ def __init__(self, *args, **kwargs):
*args,
**kwargs)

@cluster(num_nodes=3)
def test_config_constraints_admin_api_success(self):
admin = Admin(self.redpanda)
target_broker = self.redpanda.nodes[0]

res = admin.get_cluster_config(node=target_broker)
assert 'constraints' in res
assert type(res['constraints']) == list
assert len(res['constraints']) == 2
constraints = sorted(res['constraints'], key=lambda con: con['name'])
self.logger.debug(json.dumps(constraints, indent=2))
assert constraints[0] == self.LOG_CLEANUP_CONSTRAINT
assert constraints[1] == self.LOG_LIFETIME_CONSTRAINT

# Unset constraints
self.LOG_CLEANUP_CONSTRAINT['enabled'] = False
self.LOG_LIFETIME_CONSTRAINT['min'] = None

patch_result = admin.patch_cluster_config(upsert={
'constraints':
[self.LOG_LIFETIME_CONSTRAINT, self.LOG_CLEANUP_CONSTRAINT]
},
node=target_broker)
wait_for_version_sync(admin, self.redpanda,
patch_result['config_version'])

# The broker should report constraints but they are explictly unset
res = admin.get_cluster_config(node=target_broker)
assert 'constraints' in res
assert type(res['constraints']) == list
assert len(res['constraints']) == 2
constraints = sorted(res['constraints'], key=lambda con: con['name'])
self.logger.debug(json.dumps(constraints, indent=2))
assert constraints[0] == self.LOG_CLEANUP_CONSTRAINT
assert constraints[1] == self.LOG_LIFETIME_CONSTRAINT

# Add to the current list
patch_result = admin.patch_cluster_config(upsert={
'constraints': [
self.LOG_LIFETIME_CONSTRAINT, self.LOG_CLEANUP_CONSTRAINT,
self.LOG_COMPRESSION_CONSTRAINT
]
},
node=target_broker)
wait_for_version_sync(admin, self.redpanda,
patch_result['config_version'])

res = admin.get_cluster_config(node=target_broker)
assert 'constraints' in res
assert type(res['constraints']) == list
assert len(res['constraints']) == 3
constraints = sorted(res['constraints'], key=lambda con: con['name'])
self.logger.debug(json.dumps(constraints, indent=2))
assert constraints[0] == self.LOG_CLEANUP_CONSTRAINT
assert constraints[1] == self.LOG_COMPRESSION_CONSTRAINT
assert constraints[2] == self.LOG_LIFETIME_CONSTRAINT

# Could remove all constraints by setting to empty list
patch_result = admin.patch_cluster_config(upsert={'constraints': []},
node=target_broker)
wait_for_version_sync(admin, self.redpanda,
patch_result['config_version'])

# Expect empty constraints list from the broker
res = admin.get_cluster_config(node=target_broker)
assert 'constraints' in res
assert type(res['constraints']) == list
assert len(res['constraints']) == 0

@cluster(
num_nodes=3,
log_allow_list=[r"Constraints failure: .* has out-of-range value"])
def test_config_constraints_admin_api_failures(self):
admin = Admin(self.redpanda)
target_broker = self.redpanda.nodes[0]

# Setting a constraint for unsupport config, expect 400 HTTP status
try:
admin.patch_cluster_config(upsert={
'constraints': [{
'name': 'enable_rack_awareness',
'type': 'restrict',
'enabled': True
}]
},
node=target_broker)
except requests.exceptions.HTTPError as ex:
if ex.response.status_code == requests.codes.bad_request:
res_json = ex.response.json()
assert res_json[
'constraints'] == 'Constraints failure: [{"name":"enable_rack_awareness","type":"restrict","enabled":true}] is unsupported', 'Expected unsupported constraint'
else:
raise

# Setting min > max, expect 400 HTTP status
try:
admin.patch_cluster_config(upsert={
'constraints': [{
'name': 'default_topic_replications',
'type': 'restrict',
'min': 9,
'max': 3
}]
},
node=target_broker)
except requests.exceptions.HTTPError as ex:
if ex.response.status_code == requests.codes.bad_request:
res_json = ex.response.json()
assert res_json[
'constraints'] == 'Constraints failure: min > max: config default_topic_replications', 'Expected min > max'
else:
raise

# Set a high min for retention_ms, expect 409 HTTP status since the topic already has retention_ms 1 day
try:
self.LOG_LIFETIME_CONSTRAINT['min'] = self.RETENTION_MS_MAX
admin.patch_cluster_config(
upsert={'constraints': [self.LOG_LIFETIME_CONSTRAINT]},
node=target_broker)
except requests.exceptions.HTTPError as ex:
if ex.response.status_code == requests.codes.bad_request:
res_json = ex.response.json()
m = re.match(
r'Constraints failure: topic config has out-of-range value: topics topic-[a-zA-Z]+ , config log_retention_ms',
res_json['constraints'])
assert m is not None, 'Expected out-of-range'
# Reset lifetime min
self.LOG_LIFETIME_CONSTRAINT['min'] = self.RETENTION_MS_MIN
else:
raise

# Constraints should still be the pre-set ones from broker startup
res = admin.get_cluster_config(node=target_broker)
assert 'constraints' in res
assert type(res['constraints']) == list
assert len(res['constraints']) == 2
constraints = sorted(res['constraints'], key=lambda con: con['name'])
self.logger.debug(json.dumps(constraints, indent=2))
assert constraints[0] == self.LOG_CLEANUP_CONSTRAINT
assert constraints[1] == self.LOG_LIFETIME_CONSTRAINT

@cluster(num_nodes=3)
def test_broker_restart(self):
# Check that configuration constraints persist between broker restart.
Expand Down

0 comments on commit e4430e0

Please sign in to comment.