Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename redisDb to serverDb #156

Merged
merged 1 commit into from
Apr 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/aof.c
Original file line number Diff line number Diff line change
Expand Up @@ -2260,7 +2260,7 @@ int rewriteAppendOnlyFileRio(rio *aof) {

for (j = 0; j < server.dbnum; j++) {
char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n";
redisDb *db = server.db + j;
serverDb *db = server.db + j;
if (kvstoreSize(db->keys) == 0) continue;

/* SELECT the new DB */
Expand Down
6 changes: 3 additions & 3 deletions src/blocked.c
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ static blocking_type getBlockedTypeByType(int type) {
* made by a script or in the context of MULTI/EXEC.
*
* The list will be finally processed by handleClientsBlockedOnKeys() */
static void signalKeyAsReadyLogic(redisDb *db, robj *key, int type, int deleted) {
static void signalKeyAsReadyLogic(serverDb *db, robj *key, int type, int deleted) {
readyList *rl;

/* Quick returns. */
Expand Down Expand Up @@ -548,11 +548,11 @@ static void releaseBlockedEntry(client *c, dictEntry *de, int remove_key) {
dictDelete(c->bstate.keys, key);
}

void signalKeyAsReady(redisDb *db, robj *key, int type) {
void signalKeyAsReady(serverDb *db, robj *key, int type) {
signalKeyAsReadyLogic(db, key, type, 0);
}

void signalDeletedKeyAsReady(redisDb *db, robj *key, int type) {
void signalDeletedKeyAsReady(serverDb *db, robj *key, int type) {
signalKeyAsReadyLogic(db, key, type, 1);
}

Expand Down
94 changes: 47 additions & 47 deletions src/db.c

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions src/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ void mixStringObjectDigest(unsigned char *digest, robj *o) {
* Note that this function does not reset the initial 'digest' passed, it
* will continue mixing this object digest to anything that was already
* present. */
void xorObjectDigest(redisDb *db, robj *keyobj, unsigned char *digest, robj *o) {
void xorObjectDigest(serverDb *db, robj *keyobj, unsigned char *digest, robj *o) {
uint32_t aux = htonl(o->type);
mixDigest(digest,&aux,sizeof(aux));
long long expiretime = getExpire(db,keyobj);
Expand Down Expand Up @@ -288,7 +288,7 @@ void computeDatasetDigest(unsigned char *final) {
memset(final,0,20); /* Start with a clean result */

for (j = 0; j < server.dbnum; j++) {
redisDb *db = server.db+j;
serverDb *db = server.db+j;
if (kvstoreSize(db->keys) == 0)
continue;
kvstoreIterator *kvs_it = kvstoreIteratorInit(db->keys);
Expand Down
20 changes: 10 additions & 10 deletions src/defrag.c
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ void activeDefragQuickListNodes(quicklist *ql) {
/* when the value has lots of elements, we want to handle it later and not as
* part of the main dictionary scan. this is needed in order to prevent latency
* spikes when handling large items */
void defragLater(redisDb *db, dictEntry *kde) {
void defragLater(serverDb *db, dictEntry *kde) {
sds key = sdsdup(dictGetKey(kde));
listAddNodeTail(db->defrag_later, key);
}
Expand Down Expand Up @@ -449,7 +449,7 @@ void scanLaterHash(robj *ob, unsigned long *cursor) {
*cursor = dictScanDefrag(d, *cursor, scanCallbackCountScanned, &defragfns, NULL);
}

void defragQuicklist(redisDb *db, dictEntry *kde) {
void defragQuicklist(serverDb *db, dictEntry *kde) {
robj *ob = dictGetVal(kde);
quicklist *ql = ob->ptr, *newql;
serverAssert(ob->type == OBJ_LIST && ob->encoding == OBJ_ENCODING_QUICKLIST);
Expand All @@ -461,7 +461,7 @@ void defragQuicklist(redisDb *db, dictEntry *kde) {
activeDefragQuickListNodes(ql);
}

void defragZsetSkiplist(redisDb *db, dictEntry *kde) {
void defragZsetSkiplist(serverDb *db, dictEntry *kde) {
robj *ob = dictGetVal(kde);
zset *zs = (zset*)ob->ptr;
zset *newzs;
Expand Down Expand Up @@ -490,7 +490,7 @@ void defragZsetSkiplist(redisDb *db, dictEntry *kde) {
zs->dict = newdict;
}

void defragHash(redisDb *db, dictEntry *kde) {
void defragHash(serverDb *db, dictEntry *kde) {
robj *ob = dictGetVal(kde);
dict *d, *newd;
serverAssert(ob->type == OBJ_HASH && ob->encoding == OBJ_ENCODING_HT);
Expand All @@ -504,7 +504,7 @@ void defragHash(redisDb *db, dictEntry *kde) {
ob->ptr = newd;
}

void defragSet(redisDb *db, dictEntry *kde) {
void defragSet(serverDb *db, dictEntry *kde) {
robj *ob = dictGetVal(kde);
dict *d, *newd;
serverAssert(ob->type == OBJ_SET && ob->encoding == OBJ_ENCODING_HT);
Expand Down Expand Up @@ -657,7 +657,7 @@ void* defragStreamConsumerGroup(raxIterator *ri, void *privdata) {
return NULL;
}

void defragStream(redisDb *db, dictEntry *kde) {
void defragStream(serverDb *db, dictEntry *kde) {
robj *ob = dictGetVal(kde);
serverAssert(ob->type == OBJ_STREAM && ob->encoding == OBJ_ENCODING_STREAM);
stream *s = ob->ptr, *news;
Expand All @@ -681,7 +681,7 @@ void defragStream(redisDb *db, dictEntry *kde) {
/* Defrag a module key. This is either done immediately or scheduled
* for later. Returns then number of pointers defragged.
*/
void defragModule(redisDb *db, dictEntry *kde) {
void defragModule(serverDb *db, dictEntry *kde) {
robj *obj = dictGetVal(kde);
serverAssert(obj->type == OBJ_MODULE);

Expand All @@ -696,7 +696,7 @@ void defragKey(defragCtx *ctx, dictEntry *de) {
robj *newob, *ob;
unsigned char *newzl;
sds newsds;
redisDb *db = ctx->privdata;
serverDb *db = ctx->privdata;
int slot = ctx->slot;
/* Try to defrag the key name. */
newsds = activeDefragSds(keysds);
Expand Down Expand Up @@ -884,7 +884,7 @@ static sds defrag_later_current_key = NULL;
static unsigned long defrag_later_cursor = 0;

/* returns 0 if no more work needs to be been done, and 1 if time is up and more work is needed. */
int defragLaterStep(redisDb *db, int slot, long long endtime) {
int defragLaterStep(serverDb *db, int slot, long long endtime) {
unsigned int iterations = 0;
unsigned long long prev_defragged = server.stat_active_defrag_hits;
unsigned long long prev_scanned = server.stat_active_defrag_scanned;
Expand Down Expand Up @@ -993,7 +993,7 @@ void activeDefragCycle(void) {
static int defrag_later_item_in_progress = 0;
static int defrag_stage = 0;
static unsigned long defrag_cursor = 0;
static redisDb *db = NULL;
static serverDb *db = NULL;
static long long start_scan, start_stat;
unsigned int iterations = 0;
unsigned long long prev_defragged = server.stat_active_defrag_hits;
Expand Down
4 changes: 2 additions & 2 deletions src/evict.c
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ void evictionPoolAlloc(void) {
* We insert keys on place in ascending order, so keys with the smaller
* idle time are on the left, and keys with the higher idle time on the
* right. */
int evictionPoolPopulate(redisDb *db, kvstore *samplekvs, struct evictionPoolEntry *pool) {
int evictionPoolPopulate(serverDb *db, kvstore *samplekvs, struct evictionPoolEntry *pool) {
int j, k, count;
dictEntry *samples[server.maxmemory_samples];

Expand Down Expand Up @@ -579,7 +579,7 @@ int performEvictions(void) {
static unsigned int next_db = 0;
sds bestkey = NULL;
int bestdbid;
redisDb *db;
serverDb *db;
dictEntry *de;

if (server.maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) ||
Expand Down
10 changes: 5 additions & 5 deletions src/expire.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static double avg_ttl_factor[16] = {0.98, 0.9604, 0.941192, 0.922368, 0.903921,
*
* The parameter 'now' is the current time in milliseconds as is passed
* to the function to avoid too many gettimeofday() syscalls. */
int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) {
int activeExpireCycleTryExpire(serverDb *db, dictEntry *de, long long now) {
long long t = dictGetSignedIntegerVal(de);
if (now > t) {
enterExecutionUnit(1, 0);
Expand Down Expand Up @@ -118,7 +118,7 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) {

/* Data used by the expire dict scan callback. */
typedef struct {
redisDb *db;
serverDb *db;
long long now;
unsigned long sampled; /* num keys checked */
unsigned long expired; /* num keys expired */
Expand Down Expand Up @@ -242,7 +242,7 @@ void activeExpireCycle(int type) {
data.ttl_sum = 0;
data.ttl_samples = 0;

redisDb *db = server.db+(current_db % server.dbnum);
serverDb *db = server.db+(current_db % server.dbnum);
data.db = db;

int db_done = 0; /* The scan of the current DB is done? */
Expand Down Expand Up @@ -429,7 +429,7 @@ void expireSlaveKeys(void) {
int dbid = 0;
while(dbids && dbid < server.dbnum) {
if ((dbids & 1) != 0) {
redisDb *db = server.db+dbid;
serverDb *db = server.db+dbid;
dictEntry *expire = dbFindExpires(db, keyname);
int expired = 0;

Expand Down Expand Up @@ -474,7 +474,7 @@ void expireSlaveKeys(void) {

/* Track keys that received an EXPIRE or similar command in the context
* of a writable slave. */
void rememberSlaveKeyWithExpire(redisDb *db, robj *key) {
void rememberSlaveKeyWithExpire(serverDb *db, robj *key) {
if (slaveKeysWithExpire == NULL) {
static dictType dt = {
dictSdsHash, /* hash function */
Expand Down
2 changes: 1 addition & 1 deletion src/lazyfree.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ void freeObjAsync(robj *key, robj *obj, int dbid) {
/* Empty a Redis DB asynchronously. What the function does actually is to
* create a new empty set of hash tables and scheduling the old ones for
* lazy freeing. */
void emptyDbAsync(redisDb *db) {
void emptyDbAsync(serverDb *db) {
int slot_count_bits = 0;
int flags = KVSTORE_ALLOCATE_DICTS_ON_DEMAND;
if (server.cluster_enabled) {
Expand Down
2 changes: 1 addition & 1 deletion src/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ typedef struct RedisModuleCtx RedisModuleCtx;
/* This represents a Redis key opened with RM_OpenKey(). */
struct RedisModuleKey {
RedisModuleCtx *ctx;
redisDb *db;
serverDb *db;
robj *key; /* Key name object. */
robj *value; /* Value object, or NULL if the key was not found. */
void *iter; /* Iterator. */
Expand Down
6 changes: 3 additions & 3 deletions src/multi.c
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ void execCommand(client *c) {
typedef struct watchedKey {
listNode node;
robj *key;
redisDb *db;
serverDb *db;
client *client;
unsigned expired:1; /* Flag that we're watching an already expired key. */
} watchedKey;
Expand Down Expand Up @@ -377,7 +377,7 @@ int isWatchedKeyExpired(client *c) {

/* "Touch" a key, so that if this key is being WATCHed by some client the
* next EXEC will fail. */
void touchWatchedKey(redisDb *db, robj *key) {
void touchWatchedKey(serverDb *db, robj *key) {
list *clients;
listIter li;
listNode *ln;
Expand Down Expand Up @@ -425,7 +425,7 @@ void touchWatchedKey(redisDb *db, robj *key) {
* replaced_with: for SWAPDB, the WATCH should be invalidated if
* the key exists in either of them, and skipped only if it
* doesn't exist in both. */
void touchAllWatchedKeysInDb(redisDb *emptied, redisDb *replaced_with) {
void touchAllWatchedKeysInDb(serverDb *emptied, serverDb *replaced_with) {
listIter li;
listNode *ln;
dictEntry *de;
Expand Down
2 changes: 1 addition & 1 deletion src/object.c
Original file line number Diff line number Diff line change
Expand Up @@ -1245,7 +1245,7 @@ struct redisMemOverhead *getMemoryOverheadData(void) {
mem_total+=mh->functions_caches;

for (j = 0; j < server.dbnum; j++) {
redisDb *db = server.db+j;
serverDb *db = server.db+j;
if (!kvstoreNumAllocatedDicts(db->keys)) continue;

unsigned long long keyscount = kvstoreSize(db->keys);
Expand Down
4 changes: 2 additions & 2 deletions src/rdb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1305,7 +1305,7 @@ ssize_t rdbSaveDb(rio *rdb, int dbid, int rdbflags, long *key_counter) {
static long long info_updated_time = 0;
char *pname = (rdbflags & RDBFLAGS_AOF_PREAMBLE) ? "AOF rewrite" : "RDB";

redisDb *db = server.db + dbid;
serverDb *db = server.db + dbid;
unsigned long long int db_size = kvstoreSize(db->keys);
if (db_size == 0) return 0;

Expand Down Expand Up @@ -3033,7 +3033,7 @@ int rdbLoadRioWithLoadingCtx(rio *rdb, int rdbflags, rdbSaveInfo *rsi, rdbLoadin
int type, rdbver;
uint64_t db_size = 0, expires_size = 0;
int should_expand_db = 0;
redisDb *db = rdb_loading_ctx->dbarray+0;
serverDb *db = rdb_loading_ctx->dbarray+0;
char buf[1024];
int error;
long long empty_keys_skipped = 0;
Expand Down
8 changes: 4 additions & 4 deletions src/replication.c
Original file line number Diff line number Diff line change
Expand Up @@ -1840,13 +1840,13 @@ static int useDisklessLoad(void) {
/* Helper function for readSyncBulkPayload() to initialize tempDb
* before socket-loading the new db from master. The tempDb may be populated
* by swapMainDbWithTempDb or freed by disklessLoadDiscardTempDb later. */
redisDb *disklessLoadInitTempDb(void) {
serverDb *disklessLoadInitTempDb(void) {
return initTempDb();
}

/* Helper function for readSyncBulkPayload() to discard our tempDb
* when the loading succeeded or failed. */
void disklessLoadDiscardTempDb(redisDb *tempDb) {
void disklessLoadDiscardTempDb(serverDb *tempDb) {
discardTempDb(tempDb, replicationEmptyDbCallback);
}

Expand All @@ -1870,7 +1870,7 @@ void readSyncBulkPayload(connection *conn) {
char buf[PROTO_IOBUF_LEN];
ssize_t nread, readlen, nwritten;
int use_diskless_load = useDisklessLoad();
redisDb *diskless_load_tempDb = NULL;
serverDb *diskless_load_tempDb = NULL;
functionsLibCtx* temp_functions_lib_ctx = NULL;
int empty_db_flags = server.repl_slave_lazy_flush ? EMPTYDB_ASYNC :
EMPTYDB_NO_FLAGS;
Expand Down Expand Up @@ -2088,7 +2088,7 @@ void readSyncBulkPayload(connection *conn) {
rdbSaveInfo rsi = RDB_SAVE_INFO_INIT;
if (use_diskless_load) {
rio rdb;
redisDb *dbarray;
serverDb *dbarray;
functionsLibCtx* functions_lib_ctx;
int asyncLoading = 0;

Expand Down
6 changes: 3 additions & 3 deletions src/server.c
Original file line number Diff line number Diff line change
Expand Up @@ -1082,7 +1082,7 @@ void databasesCron(void) {
if (dbs_per_call > server.dbnum) dbs_per_call = server.dbnum;

for (j = 0; j < dbs_per_call; j++) {
redisDb *db = &server.db[resize_db % server.dbnum];
serverDb *db = &server.db[resize_db % server.dbnum];
kvstoreTryResizeDicts(db->keys, CRON_DICTS_PER_DB);
kvstoreTryResizeDicts(db->expires, CRON_DICTS_PER_DB);
resize_db++;
Expand All @@ -1092,7 +1092,7 @@ void databasesCron(void) {
if (server.activerehashing) {
uint64_t elapsed_us = 0;
for (j = 0; j < dbs_per_call; j++) {
redisDb *db = &server.db[rehash_db % server.dbnum];
serverDb *db = &server.db[rehash_db % server.dbnum];
elapsed_us += kvstoreIncrementallyRehash(db->keys, INCREMENTAL_REHASHING_THRESHOLD_US - elapsed_us);
if (elapsed_us >= INCREMENTAL_REHASHING_THRESHOLD_US)
break;
Expand Down Expand Up @@ -2655,7 +2655,7 @@ void initServer(void) {
strerror(errno));
exit(1);
}
server.db = zmalloc(sizeof(redisDb)*server.dbnum);
server.db = zmalloc(sizeof(server)*server.dbnum);

/* Create the Redis databases, and initialize other internal state. */
int slot_count_bits = 0;
Expand Down
Loading
Loading