Skip to content

Commit

Permalink
Implement ObjectID to replace mongodb
Browse files Browse the repository at this point in the history
We need to remove code related to mongodb as we are using postgrace.
This is to be done in steps and first step is to implement
ObjectId class and replace mongodb.ObjectId calls with that class.

Signed-off-by: Ashish Pandey <[email protected]>
  • Loading branch information
aspandey committed Nov 29, 2024
1 parent e5e6a35 commit f2f499b
Show file tree
Hide file tree
Showing 50 changed files with 1,421 additions and 228 deletions.
4 changes: 2 additions & 2 deletions src/agent/block_store_speed.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
// const _ = require('lodash');
const argv = require('minimist')(process.argv);
const cluster = require('cluster');
const mongodb = require('mongodb');

const api = require('../api');
const config = require('../../config');
const dotenv = require('../util/dotenv');
const Speedometer = require('../util/speedometer');
const { RPC_BUFFERS } = require('../rpc');

const ObjectID = require('../util/objectid.js');
dotenv.load();

argv.email = argv.email || '[email protected]';
Expand Down Expand Up @@ -60,7 +60,7 @@ async function worker(client) {
}

async function write_block(client) {
const block_id = new mongodb.ObjectId();
const block_id = (new ObjectID(null)).toString();
return client.block_store.write_block({
[RPC_BUFFERS]: { data: Buffer.allocUnsafe(argv.size) },
block_md: {
Expand Down
6 changes: 3 additions & 3 deletions src/cmd/manage_nsfs.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ const nb_native = require('../util/nb_native');
const { ConfigFS } = require('../sdk/config_fs');
const cloud_utils = require('../util/cloud_utils');
const native_fs_utils = require('../util/native_fs_utils');
const mongo_utils = require('../util/mongo_utils');
const ObjectID = require('../util/objectid.js');
const SensitiveString = require('../util/sensitive_string');
const { account_id_cache } = require('../sdk/accountspace_fs');
const ManageCLIError = require('../manage_nsfs/manage_nsfs_cli_errors').ManageCLIError;
Expand Down Expand Up @@ -169,7 +169,7 @@ async function merge_new_and_existing_config_data(user_input_bucket_data) {
* @returns { Promise<{ code: ManageCLIResponse.BucketCreated, detail: Object, event_arg: Object }>}
*/
async function add_bucket(data) {
data._id = mongo_utils.mongoObjectId();
data._id = (new ObjectID(null)).toString();
const parsed_bucket_data = await config_fs.create_bucket_config_file(data);
await set_bucker_owner(parsed_bucket_data);
return { code: ManageCLIResponse.BucketCreated, detail: parsed_bucket_data, event_arg: { bucket: data.name }};
Expand Down Expand Up @@ -413,7 +413,7 @@ async function fetch_existing_account_data(action, target, decrypt_secret_key) {
* @returns { Promise<{ code: typeof ManageCLIResponse.AccountCreated, detail: Object, event_arg: Object }>}
*/
async function add_account(data) {
data._id = mongo_utils.mongoObjectId();
data._id = (new ObjectID(null)).toString();
await config_fs.create_account_config_file(data);
return { code: ManageCLIResponse.AccountCreated, detail: data, event_arg: { account: data.name } };
}
Expand Down
2 changes: 1 addition & 1 deletion src/manage_nsfs/nc_master_key_manager.js
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class NCMasterKeysManager {
*/
async _create_master_key() {
const master_key = {
id: db_client.new_object_id(),
id: db_client.new_object_id().toString(),
cipher_key: crypto.randomBytes(32),
cipher_iv: crypto.randomBytes(16),
encryption_type: 'aes-256-gcm'
Expand Down
7 changes: 3 additions & 4 deletions src/nc/nc_utils.js
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
/* Copyright (C) 2024 NooBaa */
'use strict';

const mongo_utils = require('../util/mongo_utils');

const objectid = require('../util/objectid.js');
/**
* generate_id will generate an id that we use to identify entities (such as account, bucket, etc.).
*/
// TODO:
// - reuse this function in NC NSFS where we used the mongo_utils module
// - this function implantation should be db_client.new_object_id(),
// - this function implantation should be db_client.new_object_id().toString(),
// but to align with manage nsfs we won't change it now
function generate_id() {
return mongo_utils.mongoObjectId();
return objectid();
}

/**
Expand Down
4 changes: 2 additions & 2 deletions src/sdk/bucketspace_fs.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
/* Copyright (C) 2020 NooBaa */
'use strict';

const objectid = require('../util/objectid.js');
const _ = require('lodash');
const util = require('util');
const path = require('path');
Expand All @@ -9,7 +10,6 @@ const config = require('../../config');
const RpcError = require('../rpc/rpc_error');
const js_utils = require('../util/js_utils');
const nb_native = require('../util/nb_native');
const mongo_utils = require('../util/mongo_utils');
const KeysSemaphore = require('../util/keys_semaphore');
const {
get_umasked_mode,
Expand Down Expand Up @@ -314,7 +314,7 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {

new_bucket_defaults(account, { name, tag, lock_enabled, force_md5_etag }, create_uls, bucket_storage_path) {
return {
_id: mongo_utils.mongoObjectId(),
_id: objectid(),
name,
tag: js_utils.default_value(tag, undefined),
owner_account: account._id,
Expand Down
20 changes: 10 additions & 10 deletions src/sdk/map_api_types.js
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class ChunkAPI {
get is_building_frags() { return this.chunk_info.is_building_frags; }
set is_building_frags(val) { this.chunk_info.is_building_frags = val; }
get dup_chunk_id() { return parse_optional_id(this.chunk_info.dup_chunk); }
set dup_chunk_id(val) { this.chunk_info.dup_chunk = val.toHexString(); }
set dup_chunk_id(val) { this.chunk_info.dup_chunk = val; }

get frags() {
if (!this.__frags) {
Expand All @@ -102,7 +102,7 @@ class ChunkAPI {

set_new_chunk_id() {
if (this._id) throw new Error(`ChunkAPI.set_new_chunk_id: unexpected call for existing chunk ${this._id}`);
this.chunk_info._id = db_client.instance().new_object_id().toHexString();
this.chunk_info._id = db_client.instance().new_object_id().toString();
}

/**
Expand All @@ -112,14 +112,14 @@ class ChunkAPI {
*/
add_block_allocation(frag, pools, mirror) {
const block_md = {
id: db_client.instance().new_object_id().toHexString(),
id: db_client.instance().new_object_id().toString(),
size: this.frag_size,
digest_b64: frag.digest_b64,
digest_type: this.chunk_coder_config.frag_digest_type,
};
if (!frag.allocations) frag.allocations = [];
frag.allocations.push({
mirror_group: mirror._id.toHexString(),
mirror_group: mirror._id,
block_md,
mirror,
pools,
Expand Down Expand Up @@ -238,7 +238,7 @@ class FragAPI {
set allocations(val) { this.frag_info.allocations = val; }

set_new_frag_id() {
this.frag_info._id = db_client.instance().new_object_id().toHexString();
this.frag_info._id = db_client.instance().new_object_id().toString();
}

/**
Expand Down Expand Up @@ -344,8 +344,8 @@ class BlockAPI {
/** @type {nb.Pool} */
const pool = this.system_store.data.systems[0].pools_by_name[node.pool];
this.node = node;
this.block_md.node = node._id.toHexString();
this.block_md.pool = pool._id.toHexString();
this.block_md.node = node._id;
this.block_md.pool = pool._id;
this.block_md.address = node.rpc_address;
this.block_md.node_type = node.node_type;
const adminfo = this.block_info.adminfo;
Expand Down Expand Up @@ -423,18 +423,18 @@ class PartAPI {

set_new_part_id() {
if (this._id) throw new Error(`PartAPI.set_new_part_id: already has id ${this._id}`);
this._id = db_client.instance().new_object_id();
this._id = db_client.instance().new_object_id().toString();
}

/**
* @param {nb.ID} chunk_id
*/
set_chunk(chunk_id) { this.part_info.chunk_id = chunk_id.toHexString(); }
set_chunk(chunk_id) { this.part_info.chunk_id = chunk_id.toString(); }

/**
* @param {nb.ID} obj_id
*/
set_obj_id(obj_id) { this.part_info.obj_id = obj_id.toHexString(); }
set_obj_id(obj_id) { this.part_info.obj_id = obj_id.toString(); }

/** @returns {nb.PartInfo} */
to_api() {
Expand Down
20 changes: 12 additions & 8 deletions src/sdk/nb.d.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
export as namespace nb;

import * as fs from 'fs';
import * as mongodb from 'mongodb';
import { EventEmitter } from 'events';
import { Readable, Writable } from 'stream';
import { IncomingMessage, ServerResponse } from 'http';
import { ObjectPart, Checksum} from '@aws-sdk/client-s3';
import { ObjectPart, Checksum } from '@aws-sdk/client-s3';
import * as mongodb from 'mongodb';

import ObjectID = require("../util/objectid.js");

type Semaphore = import('../util/semaphore');
type KeysSemaphore = import('../util/keys_semaphore');
Expand Down Expand Up @@ -39,15 +41,17 @@ type ReplicationLogCandidates = Record<string, { action: ReplicationLogAction, t

type BucketDiffKeysDiff = { [key: string]: Array<object> };



interface MapByID<T> { [id: string]: T }

interface Base {
toJSON?(): object | string;
toString?(): string;
}

type ID = mongodb.ObjectID;
type DBBuffer = mongodb.Binary | Buffer;
type ID = ObjectID;
type DBBuffer = Buffer;

interface System extends Base {
_id: ID;
Expand Down Expand Up @@ -720,8 +724,8 @@ interface DBClient {
populate(docs: object[] | object, doc_path: string, collection: DBCollection, fields: object): Promise<object[] | object>;
resolve_object_ids_recursive(idmap: object, item: object): object;
resolve_object_ids_paths(idmap: object, item: object, paths: string[], allow_missing: boolean): object;
new_object_id(): mongodb.ObjectId;
parse_object_id(id_str: string): mongodb.ObjectId;
new_object_id().toString(): ObjectID;
parse_object_id(id_str: string): ObjectID;
fix_id_type(doc: object[] | object): object[] | object;
is_object_id(id: object[] | object): boolean;
is_err_duplicate_key(err: object): boolean;
Expand Down Expand Up @@ -824,7 +828,7 @@ interface BucketSpace {

read_account_by_access_key({ access_key: string }): Promise<any>;
read_bucket_sdk_info({ name: string }): Promise<any>;
check_same_stat(bucket_name: string, bucket_stat: nb.NativeFSStats); // only implemented in bucketspace_fs
check_same_stat(bucket_name: string, bucket_stat: nb.NativeFSStats); // only implemented in bucketspace_fs

list_buckets(params: object, object_sdk: ObjectSDK): Promise<any>;
read_bucket(params: object): Promise<any>;
Expand Down Expand Up @@ -1150,4 +1154,4 @@ interface GetObjectAttributesParts {
MaxParts?: number;
IsTruncated?: boolean;
Parts?: ObjectPart[];
}
}
4 changes: 2 additions & 2 deletions src/server/analytic_services/activity_log_store.js
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
/* Copyright (C) 2016 NooBaa */
'use strict';

const mongodb = require('mongodb');
const _ = require('lodash');

const db_client = require('../../util/db_client');
const ObjectID = require('../../util/objectid.js');
const P = require('../../util/promise');
const activity_log_schema = require('./activity_log_schema');
const activity_log_indexes = require('./activity_log_indexes');
Expand All @@ -25,7 +25,7 @@ class ActivityLogStore {
}

make_activity_log_id(id_str) {
return new mongodb.ObjectID(id_str);
return (new ObjectID(id_str)).toString();
}


Expand Down
5 changes: 2 additions & 3 deletions src/server/analytic_services/history_data_store.js
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
/* Copyright (C) 2016 NooBaa */
'use strict';

const mongodb = require('mongodb');

// const dbg = require('../../util/debug_module')(__filename);
const config = require('../../../config.js');
// const pkg = require('../../../package.json');
Expand All @@ -11,6 +9,7 @@ const P = require('../../util/promise');
const db_client = require('../../util/db_client');
const system_history_schema = require('../analytic_services/system_history_schema');

const ObjectID = require('../../util/objectid.js');
class HistoryDataStore {

constructor() {
Expand All @@ -30,7 +29,7 @@ class HistoryDataStore {
const time_stamp = new Date();
const record_expiration_date = new Date(time_stamp.getTime() - config.STATISTICS_COLLECTOR_EXPIRATION);
const record = {
_id: new mongodb.ObjectId(),
_id: (new ObjectID(null)).toString(),
time_stamp,
system_snapshot: item,
history_type: 'SYSTEM'
Expand Down
5 changes: 2 additions & 3 deletions src/server/func_services/func_stats_store.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
'use strict';

// const _ = require('lodash');
const mongodb = require('mongodb');

const ObjectID = require('../../util/objectid.js');
// const dbg = require('../../util/debug_module')(__filename);
const db_client = require('../../util/db_client');

Expand All @@ -26,7 +25,7 @@ class FuncStatsStore {
}

make_func_stat_id(id_str) {
return new mongodb.ObjectId(id_str);
return (new ObjectID(id_str)).toString();
}

async create_func_stat(stat) {
Expand Down
6 changes: 2 additions & 4 deletions src/server/func_services/func_store.js
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
/* Copyright (C) 2016 NooBaa */
'use strict';

const mongodb = require('mongodb');

const db_client = require('../../util/db_client');

const func_schema = require('./func_schema');
const func_indexes = require('./func_indexes');

const ObjectID = require('../../util/objectid.js');
class FuncStore {

constructor() {
Expand All @@ -24,7 +22,7 @@ class FuncStore {
}

make_func_id(id_str) {
return new mongodb.ObjectId(id_str);
return (new ObjectID(id_str)).toString();
}

async create_func(func) {
Expand Down
12 changes: 6 additions & 6 deletions src/server/node_services/node_allocator.js
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ async function refresh_tiering_alloc(tiering, force) {
* @returns {Promise<void>}
*/
async function refresh_pool_alloc(pool, force) {
const pool_id_str = pool._id.toHexString();
const pool_id_str = pool._id;
let group = alloc_group_by_pool[pool_id_str];
if (!group) {
group = {
Expand Down Expand Up @@ -202,7 +202,7 @@ async function refresh_tiers_alloc(tiering_list, force) {
const wait_list = [];

for (const tiering of tiering_list) {
const tiering_id_str = tiering._id.toHexString();
const tiering_id_str = tiering._id;
let group = alloc_group_by_tiering[tiering_id_str];
if (!group) {
group = {
Expand Down Expand Up @@ -266,10 +266,10 @@ function get_tiering_status(tiering) {
/** @type {nb.TieringStatus} */
const tiering_status_by_tier = {};
if (!tiering) return tiering_status_by_tier;
const tiering_id_str = tiering._id.toHexString();
const tiering_id_str = tiering._id;
const alloc_group = alloc_group_by_tiering[tiering_id_str];
_.each(tiering.tiers, ({ tier }) => {
const tier_id_str = tier._id.toHexString();
const tier_id_str = tier._id;
const mirrors_storage = alloc_group && alloc_group.mirrors_storage_by_tier_id[tier_id_str];
let tier_pools = [];
// Inside the Tier, pools are unique and we don't need to filter afterwards
Expand Down Expand Up @@ -310,7 +310,7 @@ function _get_tier_pools_status(pools, required_valid_nodes) {
} else if (num_nodes < required_valid_nodes) {
valid_for_allocation = false;
}
pools_status_by_id[pool._id.toHexString()] = {
pools_status_by_id[pool._id] = {
valid_for_allocation,
num_nodes,
resource_type: pool.resource_type
Expand Down Expand Up @@ -344,7 +344,7 @@ function allocate_node({ avoid_nodes, allocated_hosts, pools = [] }) {
// Since we will merge the two groups we will eventually have two average groups
// This is bad since we will have two groups with each having fast and slow drives
pools.forEach(pool => {
const group = alloc_group_by_pool[pool._id.toHexString()];
const group = alloc_group_by_pool[pool._id];
if (group && group.latency_groups) {
group.latency_groups.forEach((value, index) => {
if (pools_latency_groups[index]) {
Expand Down
Loading

0 comments on commit f2f499b

Please sign in to comment.