From bb839a6e1042bf36902e32fe568bb276858c1145 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Gajowy?= Date: Mon, 19 Aug 2019 16:09:07 +0200 Subject: [PATCH] [BEAM-5980] Remove redundant Combine tests in Flink + Python suite It turned out we tested nothing relevant with the additional 2 tests that are deleted. The Combine test maps values of different sizes to Long value. The deleted tests had only different initial size so, in practice, they were testing the same thing as test #1. --- .../job_LoadTests_Combine_Flink_Python.groovy | 48 +------------------ 1 file changed, 1 insertion(+), 47 deletions(-) diff --git a/.test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy b/.test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy index e2c3913edcd2d..09e52a0165c0c 100644 --- a/.test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy +++ b/.test-infra/jenkins/job_LoadTests_Combine_Flink_Python.groovy @@ -55,48 +55,6 @@ def scenarios = { datasetName -> [ top_count : 20, ] ], - [ - title : 'Combine Python Load test: 2GB 100 byte records', - itClass : 'apache_beam.testing.load_tests.combine_test:CombineTest.testCombineGlobally', - runner : CommonTestProperties.Runner.PORTABLE, - jobProperties: [ - job_name : 'load-tests-python-flink-batch-combine-2-' + now, - project : 'apache-beam-testing', - publish_to_big_query: true, - metrics_dataset : datasetName, - metrics_table : 'python_flink_batch_combine_2', - input_options : '\'{' + - '"num_records": 20000000,' + - '"key_size": 10,' + - '"value_size": 90}\'', - parallelism : 5, - job_endpoint : 'localhost:8099', - environment_config : pythonHarnessImageTag, - environment_type : 'DOCKER', - top_count : 20, - ] - ], - [ - title : 'Combine Python Load test: 2GB 100 kilobyte records', - itClass : 'apache_beam.testing.load_tests.combine_test:CombineTest.testCombineGlobally', - runner : CommonTestProperties.Runner.PORTABLE, - jobProperties: [ - job_name : 'load-tests-python-flink-batch-combine-3-' + now, - project : 'apache-beam-testing', - publish_to_big_query: true, - metrics_dataset : datasetName, - metrics_table : 'python_flink_batch_combine_3', - input_options : '\'{' + - '"num_records": 2000,' + - '"key_size": 100000,' + - '"value_size": 90}\'', - parallelism : 5, - job_endpoint : 'localhost:8099', - environment_config : pythonHarnessImageTag, - environment_type : 'DOCKER', - top_count : 20, - ] - ], [ title : 'Combine Python Load test: 2GB Fanout 4', itClass : 'apache_beam.testing.load_tests.combine_test:CombineTest.testCombineGlobally', @@ -164,11 +122,7 @@ def batchLoadTestJob = { scope, triggeringContext -> def scaledNumberOfWorkers = 5 infra.scaleCluster(scope, jenkinsJobName, scaledNumberOfWorkers) - defineTestSteps(scope, testScenarios, [ - 'Combine Python Load test: 2GB 10 byte records', - 'Combine Python Load test: 2GB 100 byte records', - 'Combine Python Load test: 2GB 100 kilobyte records' - ]) + defineTestSteps(scope, testScenarios, ['Combine Python Load test: 2GB 10 byte records']) infra.teardownDataproc(scope, jenkinsJobName) }