Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test #2604

Draft
wants to merge 6 commits into
base: bugfix/BB-641
Choose a base branch
from
Draft

test #2604

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/actions/ft-test/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ runs:
env:
BACKBEAT_CONFIG_FILE: ${{ inputs.config }}

- uses: codecov/codecov-action@v4
- uses: codecov/codecov-action@v5
with:
token: ${{ inputs.token }}
directory: ./coverage/ft_test:${{ inputs.testsuite }}
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ jobs:
-nodes 1 -stream -timeout 5m -slowSpecThreshold 60
working-directory: bucket-scanner

- uses: codecov/codecov-action@v4
- uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
directory: bucket-scanner
Expand Down Expand Up @@ -133,7 +133,7 @@ jobs:
run: yarn run cover:test
env:
BACKBEAT_CONFIG_FILE: tests/config.json
- uses: codecov/codecov-action@v4
- uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
directory: ./coverage/test
Expand Down
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
ARG NODE_VERSION=16.20-bullseye-slim

FROM node:${NODE_VERSION} as builder
FROM node:${NODE_VERSION} AS builder

WORKDIR /usr/src/app

Expand All @@ -22,7 +22,7 @@ RUN apt-get update \
libffi-dev \
libzstd-dev

ENV DOCKERIZE_VERSION v0.6.1
ENV DOCKERIZE_VERSION=v0.6.1

RUN wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
&& tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz \
Expand Down
185 changes: 134 additions & 51 deletions extensions/mongoProcessor/MongoQueueProcessor.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
const { replicationBackends, emptyFileMd5 } = require('arsenal').constants;
const MongoClient = require('arsenal').storage
.metadata.mongoclient.MongoClientInterface;
const ObjectMD = require('arsenal').models.ObjectMD;
const { ObjectMD } = require('arsenal').models;
const { VersionID } = require('arsenal').versioning;
const { extractVersionId } = require('../../lib/util/versioning');

const Config = require('../../lib/Config');
Expand Down Expand Up @@ -194,30 +195,29 @@
], done);
}

_getZenkoObjectMetadata(log, entry, bucketInfo, done) {
// NOTE: This is only used for updating replication info. If the Zenko
// bucket does not have repInfo set, then we can ignore fetching
const bucketRepInfo = bucketInfo.getReplicationConfiguration();
if (!bucketRepInfo || !bucketRepInfo.rules ||
!bucketRepInfo.rules[0].enabled) {
return done();
}

/**
* Retrieve Zenko object metadata from MongoDB
* @param {Logger} log The logger object
* @param {ObjectQueueEntry|DeleteOpQueueEntry} entry The entry to being processed
* @param {string} versionId The version id of the object
* @param {function} done The callback function
* @returns {undefined}
*/
_getZenkoObjectMetadata(log, entry, versionId, done) {
const bucket = entry.getBucket();
const key = entry.getObjectKey();
const params = {};

// master keys with a 'null' version id comming from
// a versioning suspended bucket are considered a version
// we should not specify the version id in this case
if (entry.getVersionId() && !entry.getIsNull()) {
params.versionId = entry.getVersionId();
if (versionId && !(entry.getIsNull && entry.getIsNull())) {
params.versionId = versionId;
}

return this._mongoClient.getObject(bucket, key, params, log,
(err, data) => {
if (err && err.NoSuchKey) {
return done();
}
log.debug('getting zenko object metadata', { bucket, key, versionId, params });

return this._mongoClient.getObject(bucket, key, params, log, (err, data) => {
if (err) {
log.error('error getting zenko object metadata', {
method: 'MongoQueueProcessor._getZenkoObjectMetadata',
Expand All @@ -226,6 +226,7 @@
});
return done(err);
}

return done(null, data);
});
}
Expand Down Expand Up @@ -332,6 +333,8 @@
_updateReplicationInfo(entry, bucketInfo, content, zenkoObjMd) {
const bucketRepInfo = bucketInfo.getReplicationConfiguration();

// TODO: adjust `_updateReplicationInfo` for restored objects

// reset first before attempting any other updates
const objectMDModel = new ObjectMD();
entry.setReplicationInfo(objectMDModel.getReplicationInfo());
Expand Down Expand Up @@ -398,35 +401,59 @@
const key = sourceEntry.getObjectKey();
const versionId = extractVersionId(sourceEntry.getObjectVersionedKey());

const options = versionId ? { versionId } : undefined;

// Calling deleteObject with undefined options to use deleteObjectNoVer which is used for
// deleting non versioned objects that only have master keys.
// When deleting a versioned object however we supply the version id in the options, which
// causes the function to call the deleteObjectVer function that is used to handle objects that
// have both a master and version keys. This handles the deletion of both the version and the master
// keys in the case where no other version is available, or deleting the version and updating the
// master key otherwise.
return this._mongoClient.deleteObject(bucket, key, options, log,
err => {
if (err) {
this._normalizePendingMetric(location);
log.end().error('error deleting object metadata ' +
'from mongo', {
bucket,
key,
error: err.message,
this.logger.debug('processing object delete', { bucket, key, versionId });

async.waterfall([
cb => this._getZenkoObjectMetadata(log, sourceEntry, versionId, cb),
(zenkoObjMd, cb) => {
if (zenkoObjMd.dataStoreName !== location) {
log.end().info('ignore delete entry, transitioned to another location', {
entry: sourceEntry.getLogInfo(),
location,
});
return done(err);
return done();
}
this._produceMetricCompletionEntry(location);
log.end().info('object metadata deleted from mongo', {

return cb();
},
cb => {
// Calling deleteObject with undefined options to use deleteObjectNoVer which is used for
// deleting non versioned objects that only have master keys.
// When deleting a versioned object however we supply the version id in the options, which
// causes the function to call the deleteObjectVer function that is used to handle objects that
// have both a master and version keys. This handles the deletion of both the version and the master
// keys in the case where no other version is available, or deleting the version and updating the
// master key otherwise.
const options = versionId ? { versionId } : undefined;

return this._mongoClient.deleteObject(bucket, key, options, log, cb);
},
], err => {
if (err?.is.NoSuchKey) {
log.end().info('skipping delete entry', {
entry: sourceEntry.getLogInfo(),
location,
});
return done();
}
if (err) {
this._normalizePendingMetric(location);
log.end().error('error deleting object metadata ' +
'from mongo', {
bucket,
key,
error: err.message,
location,
});
return done(err);
}
this._produceMetricCompletionEntry(location);
log.end().info('object metadata deleted from mongo', {
entry: sourceEntry.getLogInfo(),
location,
});
return done();
});
}

/**
Expand All @@ -441,10 +468,29 @@
_processObjectQueueEntry(log, sourceEntry, location, bucketInfo, done) {
const bucket = sourceEntry.getBucket();
const key = sourceEntry.getObjectKey();
const scalVersionId = sourceEntry.getValue()['x-amz-meta-scal-version-id'];

this._getZenkoObjectMetadata(log, sourceEntry, bucketInfo,
(err, zenkoObjMd) => {
if (err) {
this.logger.debug('processing object metadata', { bucket, key, scalVersionId });

const maybeGetZenkoObjectMetadata = cb => {
// NOTE: ZenkoObjMD is used for updating replication info, as well as validating the
// `x-amz-meta-scal-version-id` header of restored objects. If the Zenko bucket does
// not have repInfo set and the header is not set, then we can skip fetching.
const bucketRepInfo = bucketInfo.getReplicationConfiguration();
if (!scalVersionId && !bucketRepInfo?.rules?.some(r => r.enabled)) {
return cb();

Check warning on line 481 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L481

Added line #L481 was not covered by tests
}

// Use x-amz-meta-scal-version-id if provided, instead of the actual versionId of the object.
// This should happen only for restored objects : in all other situations, both the source
// and ingested objects should have the same version id (and not x-amz-meta-scal-version-id
// metadata).
const versionId = scalVersionId ? VersionID.decode(scalVersionId) : sourceEntry.getVersionId();
return this._getZenkoObjectMetadata(log, sourceEntry, versionId, cb);
};

maybeGetZenkoObjectMetadata((err, zenkoObjMd) => {
if (err && !err.NoSuchKey) {
this._normalizePendingMetric(location);
log.end().error('error processing object queue entry', {
method: 'MongoQueueProcessor._processObjectQueueEntry',
Expand All @@ -454,6 +500,35 @@
return done(err);
}

// If the object has `x-amz-meta-scal-version-id`, we need to use it instead of the id.
// This should only happen for objects restored onto the OOB location, and the location
// should match in that case
if (scalVersionId) {
if (!zenkoObjMd) {
this.logger.warn('missing source entry, ignoring x-amz-meta-scal-version-id', {

Check warning on line 508 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L507-L508

Added lines #L507 - L508 were not covered by tests
method: 'MongoQueueProcessor._processObjectQueueEntry',
location,
});
} else if (zenkoObjMd.location[0]?.dataStoreVersionId !== sourceEntry.getVersionId()) {
this.logger.warn('mismatched source entry, ignoring x-amz-meta-scal-version-id', {

Check warning on line 513 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L512-L513

Added lines #L512 - L513 were not covered by tests
method: 'MongoQueueProcessor._processObjectQueueEntry',
location,
});
} else {
this.logger.info('restored oob object', {

Check warning on line 518 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L518

Added line #L518 was not covered by tests
bucket, key, scalVersionId, zenkoObjMd, sourceEntry
});

sourceEntry.setVersionId(scalVersionId);

Check warning on line 522 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L522

Added line #L522 was not covered by tests

// TODO: do we need to update the (mongo) metadata in that case???
// - This may happen if object is re-tagged while restored?
// - Need to cleanup scal version id: delete objVal['x-amz-meta-scal-version-id'];
// - Need to keep the archive & restore fields in the metadata
return done();

Check warning on line 528 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L528

Added line #L528 was not covered by tests
}
}

const content = getContentType(sourceEntry, zenkoObjMd);
if (content.length === 0) {
this._normalizePendingMetric(location);
Expand All @@ -480,6 +555,7 @@

const objVal = sourceEntry.getValue();
const params = {};

// Versioning suspended entries will have a version id but also a isNull tag.
// These master keys are considered a version and do not have a duplicate version,
// we don't specify the version id and repairMaster in this case
Expand Down Expand Up @@ -608,6 +684,9 @@
MongoProcessorMetrics.onProcessKafkaEntry();
const log = this.logger.newRequestLogger();
const sourceEntry = QueueEntry.createFromKafkaEntry(kafkaEntry);

this.logger.trace('processing kafka entry', { sourceEntry });

Check warning on line 688 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L688

Added line #L688 was not covered by tests

if (sourceEntry.error) {
log.end().error('error processing source entry',
{ error: sourceEntry.error });
Expand All @@ -623,19 +702,23 @@
const location = bucketInfo.getLocationConstraint();

if (sourceEntry instanceof DeleteOpQueueEntry) {
return this._processDeleteOpQueueEntry(log, sourceEntry,
location, err => {
this._handleMetrics(sourceEntry, !!err);
return done(err);
});
return this._processDeleteOpQueueEntry(log, sourceEntry, location, err => {
this._handleMetrics(sourceEntry, !!err);
return done(err);

Check warning on line 707 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L705-L707

Added lines #L705 - L707 were not covered by tests
});
}

if (sourceEntry instanceof ObjectQueueEntry) {
return this._processObjectQueueEntry(log, sourceEntry, location,
bucketInfo, err => {
this._handleMetrics(sourceEntry, !!err);
return done(err);
});
// TODO: need to handle "replacement" case, e.g. if new object (version) is created on the RING
// while the object is either restored or archived
// - this is versionned : so it will not depend on the "state" of the object
// - probably works fine, may simply not have the same versions on both sides...
return this._processObjectQueueEntry(log, sourceEntry, location, bucketInfo, err => {
this._handleMetrics(sourceEntry, !!err);
return done(err);

Check warning on line 718 in extensions/mongoProcessor/MongoQueueProcessor.js

View check run for this annotation

Codecov / codecov/patch/Backbeat

extensions/mongoProcessor/MongoQueueProcessor.js#L716-L718

Added lines #L716 - L718 were not covered by tests
});
}

log.end().warn('skipping unknown source entry', {
entry: sourceEntry.getLogInfo(),
entryType: sourceEntry.constructor.name,
Expand Down
Loading
Loading