diff --git a/src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js b/src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js index 535f2ae38e..6d19a5f0e7 100644 --- a/src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js +++ b/src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js @@ -93,7 +93,7 @@ describe('test versioning concurrency', () => { expect(failed_operations).toHaveLength(0); }, test_timeout); - // same as s3tests_boto3/functional/test_s3.py::test_versioning_concurrent_multi_object_delete, + // same as s3tests_boto3/functional/test_s3.py::test_versioning_concurrent_multi_object_delete, // this test has a bug, it tries to create the bucket twice and fails // https://github.com/ceph/s3-tests/blob/master/s3tests_boto3/functional/test_s3.py#L1642 // see - https://github.com/ceph/s3-tests/issues/588 @@ -488,7 +488,73 @@ describe('test versioning concurrency', () => { expect(successful_operations2).toHaveLength(num_of_concurrency2); expect(failed_operations1).toHaveLength(0); const versions = await nsfs.list_object_versions({ bucket: bucket }, DUMMY_OBJECT_SDK); - expect(versions.objects.length).toBe(num_of_concurrency1 + 1); // num_of_concurrency1 is the number of versions uploaded when versioning was enabled + 1 null version + expect(versions.objects.length).toBe(num_of_concurrency1 + 1); // num_of_concurrency1 is the number of versions uploaded when versioning was enabled + 1 null version + }, test_timeout); + + it('multiple delete different keys', async () => { + const bucket = 'bucket1'; + const key_prefix = 'key_deleted'; + const versions_arr = []; + const num_deletes = 5; + const num_objects = 7; + + for (let i = 0; i < num_objects; i++) { + const random_data = Buffer.from(String(i)); + const body = buffer_utils.buffer_to_read_stream(random_data); + const res = await nsfs.upload_object({ bucket, key: key_prefix + i, source_stream: body }, DUMMY_OBJECT_SDK).catch(err => console.log('put error - ', err)); + versions_arr.push(res.version_id); + } + const number_of_successful_operations = []; + const failed_operations = []; + for (let i = 0; i < num_deletes; i++) { + nsfs.delete_object({ bucket, key: key_prefix + i, version_id: versions_arr[i]}, DUMMY_OBJECT_SDK) + .then(res => number_of_successful_operations.push(res)) + .catch(err => failed_operations.push(err)); + } + await P.delay(2000); + expect(number_of_successful_operations).toHaveLength(num_deletes); + expect(failed_operations).toHaveLength(0); + const list_objects = await nsfs.list_objects({bucket}, DUMMY_OBJECT_SDK); + let num_objs = 0; + list_objects.objects.forEach(obj => { + if (obj.key.startsWith(key_prefix)) { + num_objs += 1; + } + }); + expect(num_objs).toBe(num_objects - num_deletes); + + }, test_timeout); + + it('copy-object to same target', async () => { + const num_copies = 5; + const bucket = 'bucket1'; + const copy_source = {bucket, key: 'key1'}; + const random_data = Buffer.from("test data, it is test data"); + const body = buffer_utils.buffer_to_read_stream(random_data); + const source_res = await nsfs.upload_object({ bucket: copy_source.bucket, + key: copy_source.key, source_stream: body }, DUMMY_OBJECT_SDK); + copy_source.version_id = source_res.version_id; + + const key = 'key3'; + const versions_arr = []; + const failed_operations = []; + // copy key1 5 times to key3 + for (let i = 0; i < num_copies; i++) { + nsfs.upload_object({ bucket, key, source_stream: body, copy_source }, DUMMY_OBJECT_SDK) + .then(res => versions_arr.push(res.etag)) + .catch(err => failed_operations.push(err)); + } + await P.delay(1000); + expect(versions_arr).toHaveLength(num_copies); + expect(failed_operations).toHaveLength(0); + const list_objects = await nsfs.list_object_versions({bucket}, DUMMY_OBJECT_SDK); + let num_versions = 0; + list_objects.objects.forEach(obj => { + if (obj.key === key) { + num_versions += 1; + } + }); + expect(num_versions).toBe(num_copies); }, test_timeout); });