Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 48 additions & 17 deletions extensions/lifecycle/conductor/LifecycleConductor.js
Original file line number Diff line number Diff line change
Expand Up @@ -285,26 +285,57 @@ class LifecycleConductor {
return cb(null, lifecycleTaskVersions.v1);
}

LifecycleMetrics.onLegacyTask(log, 'putBucketIndexes');

this.activeIndexingJobs.push({
bucket: task.bucketName,
indexes: indexesForFeature.lifecycle.v2,
});
return async.series({
Comment thread
delthas marked this conversation as resolved.
diskUsage: done => this._mongodbClient.getDiskUsage(done),
collStats: done => this._mongodbClient.getCollectionStats(
task.bucketName, log, done),
}, (err, results) => {
if (err) {
log.warn('unable to check disk space, skipping index creation', {
bucket: task.bucketName,
error: err,
});
LifecycleMetrics.onLegacyTask(log, 'diskSpaceCheckFailed');
return cb(null, lifecycleTaskVersions.v1);
}

return backbeatMetadataProxy.putBucketIndexes(
task.bucketName,
indexesForFeature.lifecycle.v2,
log,
err => {
if (err) {
log.warn('unable to create lifecycle indexes', {
bucket: task.bucketName,
error: err,
});
}
const fsFreeSize = results.diskUsage.free;
Comment thread
delthas marked this conversation as resolved.
// Each lifecycle index is roughly the same size as the
// _id_ index. We create two, so 3x gives a safe margin
// accounting for _id_ index bloat from incremental inserts.
const idIndexSize = results.collStats.indexSizes?._id_ || 0;

if (fsFreeSize < 3 * idIndexSize) {
Comment thread
delthas marked this conversation as resolved.
Comment thread
delthas marked this conversation as resolved.
log.warn('insufficient disk space for index creation', {
bucket: task.bucketName,
fsFreeSize,
threshold: 3 * idIndexSize,
});
LifecycleMetrics.onLegacyTask(log, 'insufficientDiskSpace');
return cb(null, lifecycleTaskVersions.v1);
}

LifecycleMetrics.onLegacyTask(log, 'putBucketIndexes');

this.activeIndexingJobs.push({
bucket: task.bucketName,
indexes: indexesForFeature.lifecycle.v2,
});

return backbeatMetadataProxy.putBucketIndexes(
task.bucketName,
indexesForFeature.lifecycle.v2,
log,
err => {
if (err) {
log.warn('unable to create lifecycle indexes', {
bucket: task.bucketName,
error: err,
});
}
return cb(null, lifecycleTaskVersions.v1);
});
});
});
}

Expand Down
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
"@scality/cloudserverclient": "^1.0.3",
"@smithy/node-http-handler": "^3.3.3",
"JSONStream": "^1.3.5",
"arsenal": "git+https://github.com/scality/arsenal#8.3.0",
"arsenal": "git+https://github.com/scality/arsenal#8.3.9",
"async": "^2.3.0",
"backo": "^1.1.0",
"breakbeat": "scality/breakbeat#v1.0.3",
Expand Down
54 changes: 54 additions & 0 deletions tests/unit/lifecycle/LifecycleConductor.spec.js
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,12 @@ describe('Lifecycle Conductor', () => {
conductor.activeIndexingJobsRetrieved = getInProgressSucceeded;
conductor.activeIndexingJobs = inJobs;
conductor._bucketSource = bucektSource;
conductor._mongodbClient = {
getDiskUsage: cb => cb(null, { available: 10000000000, free: 10000000000, total: 20000000000 }),
getCollectionStats: (bucketName, _log, cb) => cb(null, {
indexSizes: { _id_: 80000 },
}),
};
Comment thread
delthas marked this conversation as resolved.
client.indexesObj = getIndexes;
client.error = mockError;

Expand All @@ -422,6 +428,54 @@ describe('Lifecycle Conductor', () => {
done();
});
}));

it('should return v1: missing indexes + disk space check fails', done => {
const client = new BackbeatMetadataProxyMock();
conductor.clientManager.getBackbeatMetadataProxy = () => client;
conductor.activeIndexingJobsRetrieved = true;
conductor.activeIndexingJobs = [];
conductor._bucketSource = 'mongodb';
conductor._mongodbClient = {
getDiskUsage: cb => cb(new Error('dbStats failed')),
getCollectionStats: (bucketName, _log, cb) => cb(null, {
indexSizes: { _id_: 80000 },
}),
};
client.indexesObj = [];
client.error = null;

conductor._indexesGetOrCreate(getTask(true), log, (err, taskVersion) => {
assert.ifError(err);
assert.deepStrictEqual(client.receivedIdxObj, null);
assert.deepStrictEqual(conductor.activeIndexingJobs, []);
assert.deepStrictEqual(taskVersion, lifecycleTaskVersions.v1);
done();
});
});

it('should return v1: missing indexes + insufficient disk space', done => {
const client = new BackbeatMetadataProxyMock();
conductor.clientManager.getBackbeatMetadataProxy = () => client;
conductor.activeIndexingJobsRetrieved = true;
conductor.activeIndexingJobs = [];
conductor._bucketSource = 'mongodb';
conductor._mongodbClient = {
getDiskUsage: cb => cb(null, { available: 100000, free: 100000, total: 20000000000 }),
getCollectionStats: (bucketName, _log, cb) => cb(null, {
indexSizes: { _id_: 80000000 },
}),
};
client.indexesObj = [];
client.error = null;

conductor._indexesGetOrCreate(getTask(true), log, (err, taskVersion) => {
assert.ifError(err);
assert.deepStrictEqual(client.receivedIdxObj, null);
assert.deepStrictEqual(conductor.activeIndexingJobs, []);
assert.deepStrictEqual(taskVersion, lifecycleTaskVersions.v1);
done();
});
});
});

describe('listBuckets', () => {
Expand Down
43 changes: 39 additions & 4 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -1698,6 +1698,26 @@
events "^3.0.0"
tslib "^2.8.1"

"@azure/storage-blob@^12.31.0":
version "12.31.0"
resolved "https://registry.yarnpkg.com/@azure/storage-blob/-/storage-blob-12.31.0.tgz#97b09be2bf6ab59739b862edd8124798362ce720"
integrity sha512-DBgNv10aCSxopt92DkTDD0o9xScXeBqPKGmR50FPZQaEcH4JLQ+GEOGEDv19V5BMkB7kxr+m4h6il/cCDPvmHg==
dependencies:
"@azure/abort-controller" "^2.1.2"
"@azure/core-auth" "^1.9.0"
"@azure/core-client" "^1.9.3"
"@azure/core-http-compat" "^2.2.0"
"@azure/core-lro" "^2.2.0"
"@azure/core-paging" "^1.6.2"
"@azure/core-rest-pipeline" "^1.19.1"
"@azure/core-tracing" "^1.2.0"
"@azure/core-util" "^1.11.0"
"@azure/core-xml" "^1.4.5"
"@azure/logger" "^1.1.4"
"@azure/storage-common" "^12.3.0"
events "^3.0.0"
tslib "^2.8.1"

"@azure/storage-common@^12.1.1":
version "12.1.1"
resolved "https://registry.yarnpkg.com/@azure/storage-common/-/storage-common-12.1.1.tgz#cd0768188f7cf8ea7202d584067ad5f3eba89744"
Expand All @@ -1713,6 +1733,21 @@
events "^3.3.0"
tslib "^2.8.1"

"@azure/storage-common@^12.3.0":
version "12.3.0"
resolved "https://registry.yarnpkg.com/@azure/storage-common/-/storage-common-12.3.0.tgz#5bf257383836e67a426c91d7e9678479afe802a9"
integrity sha512-/OFHhy86aG5Pe8dP5tsp+BuJ25JOAl9yaMU3WZbkeoiFMHFtJ7tu5ili7qEdBXNW9G5lDB19trwyI6V49F/8iQ==
dependencies:
"@azure/abort-controller" "^2.1.2"
"@azure/core-auth" "^1.9.0"
"@azure/core-http-compat" "^2.2.0"
"@azure/core-rest-pipeline" "^1.19.1"
"@azure/core-tracing" "^1.2.0"
"@azure/core-util" "^1.11.0"
"@azure/logger" "^1.1.4"
events "^3.3.0"
tslib "^2.8.1"

"@babel/code-frame@^7.27.1":
version "7.27.1"
resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.27.1.tgz#200f715e66d52a23b221a9435534a91cc13ad5be"
Expand Down Expand Up @@ -4001,16 +4036,16 @@ arraybuffer.prototype.slice@^1.0.4:
optionalDependencies:
ioctl "^2.0.2"

"arsenal@git+https://github.com/scality/arsenal#8.3.0":
version "8.3.0"
resolved "git+https://github.com/scality/arsenal#7c6c73a01c37e241e0e054edbdc9e3a379c16a77"
"arsenal@git+https://github.com/scality/arsenal#8.3.9":
version "8.3.9"
resolved "git+https://github.com/scality/arsenal#51e5b761f7f0612a722c828fa3d43b438c50ab7c"
dependencies:
"@aws-sdk/client-kms" "^3.975.0"
"@aws-sdk/client-s3" "^3.975.0"
"@aws-sdk/credential-providers" "^3.975.0"
"@aws-sdk/lib-storage" "^3.975.0"
"@azure/identity" "^4.13.0"
"@azure/storage-blob" "^12.28.0"
"@azure/storage-blob" "^12.31.0"
"@js-sdsl/ordered-set" "^4.4.2"
"@scality/hdclient" "^1.3.1"
"@smithy/node-http-handler" "^4.3.0"
Expand Down
Loading