diff --git a/README.md b/README.md index 6f95dc923..13c2de99e 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ All other configuration options behave as you'd expect. Your existing configura Building KeyDB -------------- -KeyDB can be compiled and is tested for use on Linux. KeyDB currently relies on SO_REUSEADDR's load balancing behavior which is available only in Linux. When we support marshalling connections across threads we plan to support other operating systems such as FreeBSD. +KeyDB can be compiled and is tested for use on Linux. KeyDB currently relies on SO_REUSEPORT's load balancing behavior which is available only in Linux. When we support marshalling connections across threads we plan to support other operating systems such as FreeBSD. Install dependencies: diff --git a/src/Makefile b/src/Makefile index 345e1c04b..6ba411ecc 100644 --- a/src/Makefile +++ b/src/Makefile @@ -21,7 +21,7 @@ NODEPS:=clean distclean # Default settings STD=-std=c99 -pedantic -DREDIS_STATIC='' -CXX_STD=-std=c++14 -pedantic -fno-rtti -fno-exceptions +CXX_STD=-std=c++14 -pedantic -fno-rtti -fno-exceptions -D__STDC_FORMAT_MACROS ifneq (,$(findstring clang,$(CC))) ifneq (,$(findstring FreeBSD,$(uname_S))) STD+=-Wno-c11-extensions diff --git a/src/acl.cpp b/src/acl.cpp index 5dfcf609a..d7d352d42 100644 --- a/src/acl.cpp +++ b/src/acl.cpp @@ -227,7 +227,7 @@ void ACLFreeUser(user *u) { void ACLFreeUserAndKillClients(user *u) { listIter li; listNode *ln; - listRewind(server.clients,&li); + listRewind(g_pserver->clients,&li); while ((ln = listNext(&li)) != NULL) { client *c = (client*)listNodeValue(ln); if (c->puser == u) { @@ -334,7 +334,7 @@ void ACLSetUserCommandBit(user *u, unsigned long id, int value) { int ACLSetUserCommandBitsForCategory(user *u, const char *category, int value) { uint64_t cflag = ACLGetCommandCategoryFlagByName(category); if (!cflag) return C_ERR; - dictIterator *di = dictGetIterator(server.orig_commands); + dictIterator *di = dictGetIterator(g_pserver->orig_commands); dictEntry *de; while ((de = dictNext(di)) != NULL) { struct redisCommand *cmd = (redisCommand*)dictGetVal(de); @@ -359,7 +359,7 @@ int ACLCountCategoryBitsForUser(user *u, unsigned long *on, unsigned long *off, if (!cflag) return C_ERR; *on = *off = 0; - dictIterator *di = dictGetIterator(server.orig_commands); + dictIterator *di = dictGetIterator(g_pserver->orig_commands); dictEntry *de; while ((de = dictNext(di)) != NULL) { struct redisCommand *cmd = (redisCommand*)dictGetVal(de); @@ -428,7 +428,7 @@ sds ACLDescribeUserCommandRules(user *u) { } /* Fix the final ACLs with single commands differences. */ - dictIterator *di = dictGetIterator(server.orig_commands); + dictIterator *di = dictGetIterator(g_pserver->orig_commands); dictEntry *de; while ((de = dictNext(di)) != NULL) { struct redisCommand *cmd = (redisCommand*)dictGetVal(de); @@ -533,7 +533,7 @@ sds ACLDescribeUser(user *u) { struct redisCommand *ACLLookupCommand(const char *name) { struct redisCommand *cmd; sds sdsname = sdsnew(name); - cmd = (redisCommand*)dictFetchValue(server.orig_commands, sdsname); + cmd = (redisCommand*)dictFetchValue(g_pserver->orig_commands, sdsname); sdsfree(sdsname); return cmd; } @@ -1194,7 +1194,7 @@ sds ACLLoadFromFile(const char *filename) { if (argv == NULL) { errors = sdscatprintf(errors, "%s:%d: unbalanced quotes in acl line. ", - server.acl_filename, linenum); + g_pserver->acl_filename, linenum); continue; } @@ -1208,7 +1208,7 @@ sds ACLLoadFromFile(const char *filename) { if (strcmp(argv[0],"user") || argc < 2) { errors = sdscatprintf(errors, "%s:%d should start with user keyword followed " - "by the username. ", server.acl_filename, + "by the username. ", g_pserver->acl_filename, linenum); sdsfreesplitres(argv,argc); continue; @@ -1223,7 +1223,7 @@ sds ACLLoadFromFile(const char *filename) { const char *errmsg = ACLSetUserStringError(); errors = sdscatprintf(errors, "%s:%d: %s. ", - server.acl_filename, linenum, errmsg); + g_pserver->acl_filename, linenum, errmsg); continue; } } @@ -1349,7 +1349,7 @@ int ACLSaveToFile(const char *filename) { * The function will just exit with an error if the user is trying to mix * both the loading methods. */ void ACLLoadUsersAtStartup(void) { - if (server.acl_filename[0] != '\0' && listLength(UsersToLoad) != 0) { + if (g_pserver->acl_filename[0] != '\0' && listLength(UsersToLoad) != 0) { serverLog(LL_WARNING, "Configuring Redis with users defined in redis.conf and at " "the same setting an ACL file path is invalid. This setup " @@ -1365,8 +1365,8 @@ void ACLLoadUsersAtStartup(void) { exit(1); } - if (server.acl_filename[0] != '\0') { - sds errors = ACLLoadFromFile(server.acl_filename); + if (g_pserver->acl_filename[0] != '\0') { + sds errors = ACLLoadFromFile(g_pserver->acl_filename); if (errors) { serverLog(LL_WARNING, "Aborting Redis startup because of ACL errors: %s", errors); @@ -1526,13 +1526,13 @@ void aclCommand(client *c) { } else { addReplyNull(c); } - } else if (server.acl_filename[0] == '\0' && + } else if (g_pserver->acl_filename[0] == '\0' && (!strcasecmp(sub,"load") || !strcasecmp(sub,"save"))) { addReplyError(c,"This Redis instance is not configured to use an ACL file. You may want to specify users via the ACL SETUSER command and then issue a CONFIG REWRITE (assuming you have a Redis configuration file set) in order to store users in the Redis configuration."); return; } else if (!strcasecmp(sub,"load") && c->argc == 2) { - sds errors = ACLLoadFromFile(server.acl_filename); + sds errors = ACLLoadFromFile(g_pserver->acl_filename); if (errors == NULL) { addReply(c,shared.ok); } else { @@ -1540,7 +1540,7 @@ void aclCommand(client *c) { sdsfree(errors); } } else if (!strcasecmp(sub,"save") && c->argc == 2) { - if (ACLSaveToFile(server.acl_filename) == C_OK) { + if (ACLSaveToFile(g_pserver->acl_filename) == C_OK) { addReply(c,shared.ok); } else { addReplyError(c,"There was an error trying to save the ACLs. " @@ -1561,7 +1561,7 @@ void aclCommand(client *c) { } int arraylen = 0; void *dl = addReplyDeferredLen(c); - dictIterator *di = dictGetIterator(server.orig_commands); + dictIterator *di = dictGetIterator(g_pserver->orig_commands); dictEntry *de; while ((de = dictNext(di)) != NULL) { struct redisCommand *cmd = (redisCommand*)dictGetVal(de); diff --git a/src/ae.cpp b/src/ae.cpp index 244300fed..99f09d49f 100644 --- a/src/ae.cpp +++ b/src/ae.cpp @@ -46,10 +46,8 @@ #include "ae.h" #include "fastlock.h" -extern "C" { #include "zmalloc.h" #include "config.h" -} #ifdef USE_MUTEX thread_local int cOwnLock = 0; @@ -209,7 +207,7 @@ int aeCreateRemoteFileEvent(aeEventLoop *eventLoop, int fd, int mask, cmd.clientData = clientData; cmd.pctl = nullptr; if (fSynchronous) - cmd.pctl = new aeCommandControl(); + cmd.pctl = new (MALLOC_LOCAL) aeCommandControl(); std::unique_lock ulock(cmd.pctl->mutexcv, std::defer_lock); if (fSynchronous) @@ -257,10 +255,10 @@ int aePostFunction(aeEventLoop *eventLoop, std::function fn, bool fSynch aeCommand cmd; cmd.op = AE_ASYNC_OP::PostCppFunction; - cmd.pfn = new std::function(fn); + cmd.pfn = new (MALLOC_LOCAL) std::function(fn); cmd.pctl = nullptr; if (fSynchronous) - cmd.pctl = new aeCommandControl(); + cmd.pctl = new (MALLOC_LOCAL) aeCommandControl(); std::unique_lock ulock(cmd.pctl->mutexcv, std::defer_lock); if (fSynchronous) cmd.pctl->mutexcv.lock(); diff --git a/src/aof.cpp b/src/aof.cpp index 049e75e93..19c6c4a12 100644 --- a/src/aof.cpp +++ b/src/aof.cpp @@ -65,14 +65,14 @@ typedef struct aofrwblock { } aofrwblock; /* This function free the old AOF rewrite buffer if needed, and initialize - * a fresh new one. It tests for server.aof_rewrite_buf_blocks equal to NULL + * a fresh new one. It tests for g_pserver->aof_rewrite_buf_blocks equal to NULL * so can be used for the first initialization as well. */ void aofRewriteBufferReset(void) { - if (server.aof_rewrite_buf_blocks) - listRelease(server.aof_rewrite_buf_blocks); + if (g_pserver->aof_rewrite_buf_blocks) + listRelease(g_pserver->aof_rewrite_buf_blocks); - server.aof_rewrite_buf_blocks = listCreate(); - listSetFreeMethod(server.aof_rewrite_buf_blocks,zfree); + g_pserver->aof_rewrite_buf_blocks = listCreate(); + listSetFreeMethod(g_pserver->aof_rewrite_buf_blocks,zfree); } /* Return the current size of the AOF rewrite buffer. */ @@ -81,7 +81,7 @@ unsigned long aofRewriteBufferSize(void) { listIter li; unsigned long size = 0; - listRewind(server.aof_rewrite_buf_blocks,&li); + listRewind(g_pserver->aof_rewrite_buf_blocks,&li); while((ln = listNext(&li))) { aofrwblock *block = (aofrwblock*)listNodeValue(ln); size += block->used; @@ -104,28 +104,28 @@ void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) { UNUSED(mask); while(1) { - ln = listFirst(server.aof_rewrite_buf_blocks); + ln = listFirst(g_pserver->aof_rewrite_buf_blocks); block = (aofrwblock*)(ln ? ln->value : NULL); - if (server.aof_stop_sending_diff || !block) { - aeDeleteFileEvent(el,server.aof_pipe_write_data_to_child, + if (g_pserver->aof_stop_sending_diff || !block) { + aeDeleteFileEvent(el,g_pserver->aof_pipe_write_data_to_child, AE_WRITABLE); return; } if (block->used > 0) { - nwritten = write(server.aof_pipe_write_data_to_child, + nwritten = write(g_pserver->aof_pipe_write_data_to_child, block->buf,block->used); if (nwritten <= 0) return; memmove(block->buf,block->buf+nwritten,block->used-nwritten); block->used -= nwritten; block->free += nwritten; } - if (block->used == 0) listDelNode(server.aof_rewrite_buf_blocks,ln); + if (block->used == 0) listDelNode(g_pserver->aof_rewrite_buf_blocks,ln); } } /* Append data to the AOF rewrite buffer, allocating new blocks if needed. */ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) { - listNode *ln = listLast(server.aof_rewrite_buf_blocks); + listNode *ln = listLast(g_pserver->aof_rewrite_buf_blocks); aofrwblock *block = (aofrwblock*)(ln ? ln->value : NULL); while(len) { @@ -148,11 +148,11 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) { block = (aofrwblock*)zmalloc(sizeof(*block), MALLOC_LOCAL); block->free = AOF_RW_BUF_BLOCK_SIZE; block->used = 0; - listAddNodeTail(server.aof_rewrite_buf_blocks,block); + listAddNodeTail(g_pserver->aof_rewrite_buf_blocks,block); /* Log every time we cross more 10 or 100 blocks, respectively * as a notice or warning. */ - numblocks = listLength(server.aof_rewrite_buf_blocks); + numblocks = listLength(g_pserver->aof_rewrite_buf_blocks); if (((numblocks+1) % 10) == 0) { int level = ((numblocks+1) % 100) == 0 ? LL_WARNING : LL_NOTICE; @@ -164,8 +164,8 @@ void aofRewriteBufferAppend(unsigned char *s, unsigned long len) { /* Install a file event to send data to the rewrite child if there is * not one already. */ - if (aeGetFileEvents(serverTL->el,server.aof_pipe_write_data_to_child) == 0) { - aeCreateFileEvent(serverTL->el, server.aof_pipe_write_data_to_child, + if (aeGetFileEvents(serverTL->el,g_pserver->aof_pipe_write_data_to_child) == 0) { + aeCreateFileEvent(serverTL->el, g_pserver->aof_pipe_write_data_to_child, AE_WRITABLE, aofChildWriteDiffData, NULL); } } @@ -178,7 +178,7 @@ ssize_t aofRewriteBufferWrite(int fd) { listIter li; ssize_t count = 0; - listRewind(server.aof_rewrite_buf_blocks,&li); + listRewind(g_pserver->aof_rewrite_buf_blocks,&li); while((ln = listNext(&li))) { aofrwblock *block = (aofrwblock*)listNodeValue(ln); ssize_t nwritten; @@ -209,18 +209,18 @@ void aof_background_fsync(int fd) { void killAppendOnlyChild(void) { int statloc; /* No AOFRW child? return. */ - if (server.aof_child_pid == -1) return; + if (g_pserver->aof_child_pid == -1) return; /* Kill AOFRW child, wait for child exit. */ serverLog(LL_NOTICE,"Killing running AOF rewrite child: %ld", - (long) server.aof_child_pid); - if (kill(server.aof_child_pid,SIGUSR1) != -1) { - while(wait3(&statloc,0,NULL) != server.aof_child_pid); + (long) g_pserver->aof_child_pid); + if (kill(g_pserver->aof_child_pid,SIGUSR1) != -1) { + while(wait3(&statloc,0,NULL) != g_pserver->aof_child_pid); } /* Reset the buffer accumulating changes while the child saves. */ aofRewriteBufferReset(); - aofRemoveTempFile(server.aof_child_pid); - server.aof_child_pid = -1; - server.aof_rewrite_time_start = -1; + aofRemoveTempFile(g_pserver->aof_child_pid); + g_pserver->aof_child_pid = -1; + g_pserver->aof_rewrite_time_start = -1; /* Close pipes used for IPC between the two processes. */ aofClosePipes(); closeChildInfoPipe(); @@ -230,14 +230,14 @@ void killAppendOnlyChild(void) { /* Called when the user switches from "appendonly yes" to "appendonly no" * at runtime using the CONFIG command. */ void stopAppendOnly(void) { - serverAssert(server.aof_state != AOF_OFF); + serverAssert(g_pserver->aof_state != AOF_OFF); flushAppendOnlyFile(1); - redis_fsync(server.aof_fd); - close(server.aof_fd); + redis_fsync(g_pserver->aof_fd); + close(g_pserver->aof_fd); - server.aof_fd = -1; - server.aof_selected_db = -1; - server.aof_state = AOF_OFF; + g_pserver->aof_fd = -1; + g_pserver->aof_selected_db = -1; + g_pserver->aof_state = AOF_OFF; killAppendOnlyChild(); } @@ -247,27 +247,27 @@ int startAppendOnly(void) { char cwd[MAXPATHLEN]; /* Current working dir path for error messages. */ int newfd; - newfd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644); - serverAssert(server.aof_state == AOF_OFF); + newfd = open(g_pserver->aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644); + serverAssert(g_pserver->aof_state == AOF_OFF); if (newfd == -1) { char *cwdp = getcwd(cwd,MAXPATHLEN); serverLog(LL_WARNING, "Redis needs to enable the AOF but can't open the " "append only file %s (in server root dir %s): %s", - server.aof_filename, + g_pserver->aof_filename, cwdp ? cwdp : "unknown", strerror(errno)); return C_ERR; } - if (server.rdb_child_pid != -1) { - server.aof_rewrite_scheduled = 1; + if (g_pserver->rdb_child_pid != -1) { + g_pserver->aof_rewrite_scheduled = 1; serverLog(LL_WARNING,"AOF was enabled but there is already a child process saving an RDB file on disk. An AOF background was scheduled to start when possible."); } else { /* If there is a pending AOF rewrite, we need to switch it off and * start a new one: the old one cannot be reused because it is not * accumulating the AOF buffer. */ - if (server.aof_child_pid != -1) { + if (g_pserver->aof_child_pid != -1) { serverLog(LL_WARNING,"AOF was enabled but there is already an AOF rewriting in background. Stopping background AOF and starting a rewrite now."); killAppendOnlyChild(); } @@ -279,9 +279,9 @@ int startAppendOnly(void) { } /* We correctly switched on AOF, now wait for the rewrite to be complete * in order to append data on disk. */ - server.aof_state = AOF_WAIT_REWRITE; - server.aof_last_fsync = server.unixtime; - server.aof_fd = newfd; + g_pserver->aof_state = AOF_WAIT_REWRITE; + g_pserver->aof_last_fsync = g_pserver->unixtime; + g_pserver->aof_fd = newfd; return C_OK; } @@ -337,29 +337,29 @@ void flushAppendOnlyFile(int force) { int sync_in_progress = 0; mstime_t latency; - if (sdslen(server.aof_buf) == 0) return; + if (sdslen(g_pserver->aof_buf) == 0) return; - if (server.aof_fsync == AOF_FSYNC_EVERYSEC) + if (g_pserver->aof_fsync == AOF_FSYNC_EVERYSEC) sync_in_progress = bioPendingJobsOfType(BIO_AOF_FSYNC) != 0; - if (server.aof_fsync == AOF_FSYNC_EVERYSEC && !force) { + if (g_pserver->aof_fsync == AOF_FSYNC_EVERYSEC && !force) { /* With this append fsync policy we do background fsyncing. * If the fsync is still in progress we can try to delay * the write for a couple of seconds. */ if (sync_in_progress) { - if (server.aof_flush_postponed_start == 0) { + if (g_pserver->aof_flush_postponed_start == 0) { /* No previous write postponing, remember that we are * postponing the flush and return. */ - server.aof_flush_postponed_start = server.unixtime; + g_pserver->aof_flush_postponed_start = g_pserver->unixtime; return; - } else if (server.unixtime - server.aof_flush_postponed_start < 2) { + } else if (g_pserver->unixtime - g_pserver->aof_flush_postponed_start < 2) { /* We were already waiting for fsync to finish, but for less * than two seconds this is still ok. Postpone again. */ return; } /* Otherwise fall trough, and go write since we can't wait * over two seconds. */ - server.aof_delayed_fsync++; + g_pserver->aof_delayed_fsync++; serverLog(LL_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis."); } } @@ -370,7 +370,7 @@ void flushAppendOnlyFile(int force) { * or alike */ latencyStartMonitor(latency); - nwritten = aofWrite(server.aof_fd,server.aof_buf,sdslen(server.aof_buf)); + nwritten = aofWrite(g_pserver->aof_fd,g_pserver->aof_buf,sdslen(g_pserver->aof_buf)); latencyEndMonitor(latency); /* We want to capture different events for delayed writes: * when the delay happens with a pending fsync, or with a saving child @@ -379,7 +379,7 @@ void flushAppendOnlyFile(int force) { * useful for graphing / monitoring purposes. */ if (sync_in_progress) { latencyAddSampleIfNeeded("aof-write-pending-fsync",latency); - } else if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) { + } else if (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1) { latencyAddSampleIfNeeded("aof-write-active-child",latency); } else { latencyAddSampleIfNeeded("aof-write-alone",latency); @@ -387,16 +387,16 @@ void flushAppendOnlyFile(int force) { latencyAddSampleIfNeeded("aof-write",latency); /* We performed the write so reset the postponed flush sentinel to zero. */ - server.aof_flush_postponed_start = 0; + g_pserver->aof_flush_postponed_start = 0; - if (nwritten != (ssize_t)sdslen(server.aof_buf)) { + if (nwritten != (ssize_t)sdslen(g_pserver->aof_buf)) { static time_t last_write_error_log = 0; int can_log = 0; /* Limit logging rate to 1 line per AOF_WRITE_LOG_ERROR_RATE seconds. */ - if ((server.unixtime - last_write_error_log) > AOF_WRITE_LOG_ERROR_RATE) { + if ((g_pserver->unixtime - last_write_error_log) > AOF_WRITE_LOG_ERROR_RATE) { can_log = 1; - last_write_error_log = server.unixtime; + last_write_error_log = g_pserver->unixtime; } /* Log the AOF write error and record the error code. */ @@ -404,7 +404,7 @@ void flushAppendOnlyFile(int force) { if (can_log) { serverLog(LL_WARNING,"Error writing to the AOF file: %s", strerror(errno)); - server.aof_last_write_errno = errno; + g_pserver->aof_last_write_errno = errno; } } else { if (can_log) { @@ -412,10 +412,10 @@ void flushAppendOnlyFile(int force) { "the AOF file: (nwritten=%lld, " "expected=%lld)", (long long)nwritten, - (long long)sdslen(server.aof_buf)); + (long long)sdslen(g_pserver->aof_buf)); } - if (ftruncate(server.aof_fd, server.aof_current_size) == -1) { + if (ftruncate(g_pserver->aof_fd, g_pserver->aof_current_size) == -1) { if (can_log) { serverLog(LL_WARNING, "Could not remove short write " "from the append-only file. Redis may refuse " @@ -427,11 +427,11 @@ void flushAppendOnlyFile(int force) { * -1 since there is no longer partial data into the AOF. */ nwritten = -1; } - server.aof_last_write_errno = ENOSPC; + g_pserver->aof_last_write_errno = ENOSPC; } /* Handle the AOF write error. */ - if (server.aof_fsync == AOF_FSYNC_ALWAYS) { + if (g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) { /* We can't recover when the fsync policy is ALWAYS since the * reply for the client is already in the output buffers, and we * have the contract with the user that on acknowledged write data @@ -442,55 +442,55 @@ void flushAppendOnlyFile(int force) { /* Recover from failed write leaving data into the buffer. However * set an error to stop accepting writes as long as the error * condition is not cleared. */ - server.aof_last_write_status = C_ERR; + g_pserver->aof_last_write_status = C_ERR; /* Trim the sds buffer if there was a partial write, and there * was no way to undo it with ftruncate(2). */ if (nwritten > 0) { - server.aof_current_size += nwritten; - sdsrange(server.aof_buf,nwritten,-1); + g_pserver->aof_current_size += nwritten; + sdsrange(g_pserver->aof_buf,nwritten,-1); } return; /* We'll try again on the next call... */ } } else { /* Successful write(2). If AOF was in error state, restore the * OK state and log the event. */ - if (server.aof_last_write_status == C_ERR) { + if (g_pserver->aof_last_write_status == C_ERR) { serverLog(LL_WARNING, "AOF write error looks solved, Redis can write again."); - server.aof_last_write_status = C_OK; + g_pserver->aof_last_write_status = C_OK; } } - server.aof_current_size += nwritten; + g_pserver->aof_current_size += nwritten; /* Re-use AOF buffer when it is small enough. The maximum comes from the * arena size of 4k minus some overhead (but is otherwise arbitrary). */ - if ((sdslen(server.aof_buf)+sdsavail(server.aof_buf)) < 4000) { - sdsclear(server.aof_buf); + if ((sdslen(g_pserver->aof_buf)+sdsavail(g_pserver->aof_buf)) < 4000) { + sdsclear(g_pserver->aof_buf); } else { - sdsfree(server.aof_buf); - server.aof_buf = sdsempty(); + sdsfree(g_pserver->aof_buf); + g_pserver->aof_buf = sdsempty(); } /* Don't fsync if no-appendfsync-on-rewrite is set to yes and there are * children doing I/O in the background. */ - if (server.aof_no_fsync_on_rewrite && - (server.aof_child_pid != -1 || server.rdb_child_pid != -1)) + if (g_pserver->aof_no_fsync_on_rewrite && + (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1)) return; /* Perform the fsync if needed. */ - if (server.aof_fsync == AOF_FSYNC_ALWAYS) { + if (g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) { /* redis_fsync is defined as fdatasync() for Linux in order to avoid * flushing metadata. */ latencyStartMonitor(latency); - redis_fsync(server.aof_fd); /* Let's try to get this data on the disk */ + redis_fsync(g_pserver->aof_fd); /* Let's try to get this data on the disk */ latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-fsync-always",latency); - server.aof_last_fsync = server.unixtime; - } else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC && - server.unixtime > server.aof_last_fsync)) { - if (!sync_in_progress) aof_background_fsync(server.aof_fd); - server.aof_last_fsync = server.unixtime; + g_pserver->aof_last_fsync = g_pserver->unixtime; + } else if ((g_pserver->aof_fsync == AOF_FSYNC_EVERYSEC && + g_pserver->unixtime > g_pserver->aof_last_fsync)) { + if (!sync_in_progress) aof_background_fsync(g_pserver->aof_fd); + g_pserver->aof_last_fsync = g_pserver->unixtime; } } @@ -562,13 +562,13 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a /* The DB this command was targeting is not the same as the last command * we appended. To issue a SELECT command is needed. */ - if (dictid != server.aof_selected_db) { + if (dictid != g_pserver->aof_selected_db) { char seldb[64]; snprintf(seldb,sizeof(seldb),"%d",dictid); buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n", (unsigned long)strlen(seldb),seldb); - server.aof_selected_db = dictid; + g_pserver->aof_selected_db = dictid; } if (cmd->proc == expireCommand || cmd->proc == pexpireCommand || @@ -594,10 +594,10 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a } serverAssert(!(exarg && pxarg)); if (exarg) - buf = catAppendOnlyExpireAtCommand(buf,server.expireCommand,argv[1], + buf = catAppendOnlyExpireAtCommand(buf,cserver.expireCommand,argv[1], exarg); if (pxarg) - buf = catAppendOnlyExpireAtCommand(buf,server.pexpireCommand,argv[1], + buf = catAppendOnlyExpireAtCommand(buf,cserver.pexpireCommand,argv[1], pxarg); } else { /* All the other commands don't need translation or need the @@ -609,14 +609,14 @@ void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int a /* Append to the AOF buffer. This will be flushed on disk just before * of re-entering the event loop, so before the client will get a * positive reply about the operation performed. */ - if (server.aof_state == AOF_ON) - server.aof_buf = sdscatlen(server.aof_buf,buf,sdslen(buf)); + if (g_pserver->aof_state == AOF_ON) + g_pserver->aof_buf = sdscatlen(g_pserver->aof_buf,buf,sdslen(buf)); /* If a background append only file rewriting is in progress we want to * accumulate the differences between the child DB and the current one * in a buffer, so that when the child process will do its work we * can append the differences to the new append only file. */ - if (server.aof_child_pid != -1) + if (g_pserver->aof_child_pid != -1) aofRewriteBufferAppend((unsigned char*)buf,sdslen(buf)); sdsfree(buf); @@ -686,7 +686,7 @@ int loadAppendOnlyFile(char *filename) { struct client *fakeClient; FILE *fp = fopen(filename,"r"); struct redis_stat sb; - int old_aof_state = server.aof_state; + int old_aof_state = g_pserver->aof_state; long loops = 0; off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */ off_t valid_before_multi = 0; /* Offset before MULTI command loaded. */ @@ -702,14 +702,14 @@ int loadAppendOnlyFile(char *filename) { * a zero length file at startup, that will remain like that if no write * operation is received. */ if (fp && redis_fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) { - server.aof_current_size = 0; + g_pserver->aof_current_size = 0; fclose(fp); return C_ERR; } /* Temporarily disable AOF, to prevent EXEC from feeding a MULTI * to the same file we're about to read. */ - server.aof_state = AOF_OFF; + g_pserver->aof_state = AOF_OFF; fakeClient = createFakeClient(); startLoading(fp); @@ -748,7 +748,7 @@ int loadAppendOnlyFile(char *filename) { /* Serve the clients from time to time */ if (!(loops++ % 1000)) { loadingProgress(ftello(fp)); - processEventsWhileBlocked(serverTL - server.rgthreadvar); + processEventsWhileBlocked(serverTL - g_pserver->rgthreadvar); } if (fgets(buf,sizeof(buf),fp) == NULL) { @@ -798,7 +798,7 @@ int loadAppendOnlyFile(char *filename) { exit(1); } - if (cmd == server.multiCommand) valid_before_multi = valid_up_to; + if (cmd == cserver.multiCommand) valid_before_multi = valid_up_to; /* Run the command in the context of a fake client */ fakeClient->cmd = cmd; @@ -821,7 +821,7 @@ int loadAppendOnlyFile(char *filename) { * argv/argc of the client instead of the local variables. */ freeFakeClientArgv(fakeClient); fakeClient->cmd = NULL; - if (server.aof_load_truncated) valid_up_to = ftello(fp); + if (g_pserver->aof_load_truncated) valid_up_to = ftello(fp); } /* This point can only be reached when EOF is reached without errors. @@ -838,10 +838,10 @@ int loadAppendOnlyFile(char *filename) { loaded_ok: /* DB loaded, cleanup and return C_OK to the caller. */ fclose(fp); freeFakeClient(fakeClient); - server.aof_state = old_aof_state; + g_pserver->aof_state = old_aof_state; stopLoading(); aofUpdateCurrentSize(); - server.aof_rewrite_base_size = server.aof_current_size; + g_pserver->aof_rewrite_base_size = g_pserver->aof_current_size; return C_OK; readerr: /* Read error. If feof(fp) is true, fall through to unexpected EOF. */ @@ -852,7 +852,7 @@ int loadAppendOnlyFile(char *filename) { } uxeof: /* Unexpected AOF end of file. */ - if (server.aof_load_truncated) { + if (g_pserver->aof_load_truncated) { serverLog(LL_WARNING,"!!! Warning: short read while loading the AOF file !!!"); serverLog(LL_WARNING,"!!! Truncating the AOF at offset %llu !!!", (unsigned long long) valid_up_to); @@ -866,7 +866,7 @@ int loadAppendOnlyFile(char *filename) { } else { /* Make sure the AOF file descriptor points to the end of the * file after the truncate call. */ - if (server.aof_fd != -1 && lseek(server.aof_fd,0,SEEK_END) == -1) { + if (g_pserver->aof_fd != -1 && lseek(g_pserver->aof_fd,0,SEEK_END) == -1) { serverLog(LL_WARNING,"Can't seek the end of the AOF file: %s", strerror(errno)); } else { @@ -877,7 +877,7 @@ int loadAppendOnlyFile(char *filename) { } } if (fakeClient) freeFakeClient(fakeClient); /* avoid valgrind warning */ - serverLog(LL_WARNING,"Unexpected end of file reading the append only file. You can: 1) Make a backup of your AOF file, then use ./keydb-check-aof --fix . 2) Alternatively you can set the 'aof-load-truncated' configuration option to yes and restart the server."); + serverLog(LL_WARNING,"Unexpected end of file reading the append only file. You can: 1) Make a backup of your AOF file, then use ./keydb-check-aof --fix . 2) Alternatively you can set the 'aof-load-truncated' configuration option to yes and restart the g_pserver->"); exit(1); fmterr: /* Format error. */ @@ -891,7 +891,7 @@ int loadAppendOnlyFile(char *filename) { * ------------------------------------------------------------------------- */ /* Delegate writing an object to writing a bulk string or bulk long long. - * This is not placed in rio.c since that adds the server.h dependency. */ + * This is not placed in rio.c since that adds the g_pserver->h dependency. */ int rioWriteBulkObject(rio *r, robj *obj) { /* Avoid using getDecodedObject to help copy-on-write (we are often * in a child process when this function is called). */ @@ -1266,8 +1266,8 @@ ssize_t aofReadDiffFromParent(void) { ssize_t nread, total = 0; while ((nread = - read(server.aof_pipe_read_data_from_parent,buf,sizeof(buf))) > 0) { - server.aof_child_diff = sdscatlen(server.aof_child_diff,buf,nread); + read(g_pserver->aof_pipe_read_data_from_parent,buf,sizeof(buf))) > 0) { + g_pserver->aof_child_diff = sdscatlen(g_pserver->aof_child_diff,buf,nread); total += nread; } return total; @@ -1279,9 +1279,9 @@ int rewriteAppendOnlyFileRio(rio *aof) { size_t processed = 0; int j; - for (j = 0; j < server.dbnum; j++) { + for (j = 0; j < cserver.dbnum; j++) { char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n"; - redisDb *db = server.db+j; + redisDb *db = g_pserver->db+j; dict *d = db->pdict; if (dictSize(d) == 0) continue; di = dictGetSafeIterator(d); @@ -1372,13 +1372,13 @@ int rewriteAppendOnlyFile(char *filename) { return C_ERR; } - server.aof_child_diff = sdsempty(); + g_pserver->aof_child_diff = sdsempty(); rioInitWithFile(&aof,fileno(fp)); - if (server.aof_rewrite_incremental_fsync) + if (g_pserver->aof_rewrite_incremental_fsync) rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES); - if (server.aof_use_rdb_preamble) { + if (g_pserver->aof_use_rdb_preamble) { int error; if (rdbSaveRio(&aof,&error,RDB_SAVE_AOF_PREAMBLE,NULL) == C_ERR) { errno = error; @@ -1401,7 +1401,7 @@ int rewriteAppendOnlyFile(char *filename) { * happens after 20 ms without new data). */ start = mstime(); while(mstime()-start < 1000 && nodata < 20) { - if (aeWait(server.aof_pipe_read_data_from_parent, AE_READABLE, 1) <= 0) + if (aeWait(g_pserver->aof_pipe_read_data_from_parent, AE_READABLE, 1) <= 0) { nodata++; continue; @@ -1412,13 +1412,13 @@ int rewriteAppendOnlyFile(char *filename) { } /* Ask the master to stop sending diffs. */ - if (write(server.aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr; - if (anetNonBlock(NULL,server.aof_pipe_read_ack_from_parent) != ANET_OK) + if (write(g_pserver->aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr; + if (anetNonBlock(NULL,g_pserver->aof_pipe_read_ack_from_parent) != ANET_OK) goto werr; /* We read the ACK from the server using a 10 seconds timeout. Normally * it should reply ASAP, but just in case we lose its reply, we are sure * the child will eventually get terminated. */ - if (syncRead(server.aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 || + if (syncRead(g_pserver->aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 || byte != '!') goto werr; serverLog(LL_NOTICE,"Parent agreed to stop sending diffs. Finalizing AOF..."); @@ -1428,8 +1428,8 @@ int rewriteAppendOnlyFile(char *filename) { /* Write the received diff to the file. */ serverLog(LL_NOTICE, "Concatenating %.2f MB of AOF diff received from parent.", - (double) sdslen(server.aof_child_diff) / (1024*1024)); - if (rioWrite(&aof,server.aof_child_diff,sdslen(server.aof_child_diff)) == 0) + (double) sdslen(g_pserver->aof_child_diff) / (1024*1024)); + if (rioWrite(&aof,g_pserver->aof_child_diff,sdslen(g_pserver->aof_child_diff)) == 0) goto werr; /* Make sure data will not remain on the OS's output buffers */ @@ -1469,8 +1469,8 @@ void aofChildPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask) { if (read(fd,&byte,1) == 1 && byte == '!') { serverLog(LL_NOTICE,"AOF rewrite child asks to stop sending diffs."); - server.aof_stop_sending_diff = 1; - if (write(server.aof_pipe_write_ack_to_child,"!",1) != 1) { + g_pserver->aof_stop_sending_diff = 1; + if (write(g_pserver->aof_pipe_write_ack_to_child,"!",1) != 1) { /* If we can't send the ack, inform the user, but don't try again * since in the other side the children will use a timeout if the * kernel can't buffer our write, or, the children was @@ -1481,7 +1481,7 @@ void aofChildPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask) { } /* Remove the handler since this can be called only one time during a * rewrite. */ - aeDeleteFileEventAsync(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.aof_pipe_read_ack_from_child,AE_READABLE); + aeDeleteFileEventAsync(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,g_pserver->aof_pipe_read_ack_from_child,AE_READABLE); } /* Create the pipes used for parent - child process IPC during rewrite. @@ -1501,14 +1501,14 @@ int aofCreatePipes(void) { if (anetNonBlock(NULL,fds[1]) != ANET_OK) goto error; if (aeCreateFileEvent(serverTL->el, fds[2], AE_READABLE, aofChildPipeReadable, NULL) == AE_ERR) goto error; - server.aof_pipe_write_data_to_child = fds[1]; - server.aof_pipe_read_data_from_parent = fds[0]; - server.aof_pipe_write_ack_to_parent = fds[3]; - server.aof_pipe_read_ack_from_child = fds[2]; - server.el_alf_pip_read_ack_from_child = serverTL->el; - server.aof_pipe_write_ack_to_child = fds[5]; - server.aof_pipe_read_ack_from_parent = fds[4]; - server.aof_stop_sending_diff = 0; + g_pserver->aof_pipe_write_data_to_child = fds[1]; + g_pserver->aof_pipe_read_data_from_parent = fds[0]; + g_pserver->aof_pipe_write_ack_to_parent = fds[3]; + g_pserver->aof_pipe_read_ack_from_child = fds[2]; + g_pserver->el_alf_pip_read_ack_from_child = serverTL->el; + g_pserver->aof_pipe_write_ack_to_child = fds[5]; + g_pserver->aof_pipe_read_ack_from_parent = fds[4]; + g_pserver->aof_stop_sending_diff = 0; return C_OK; error: @@ -1519,14 +1519,14 @@ int aofCreatePipes(void) { } void aofClosePipes(void) { - aeDeleteFileEventAsync(server.el_alf_pip_read_ack_from_child,server.aof_pipe_read_ack_from_child,AE_READABLE); - aeDeleteFileEventAsync(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.aof_pipe_write_data_to_child,AE_WRITABLE); - close(server.aof_pipe_write_data_to_child); - close(server.aof_pipe_read_data_from_parent); - close(server.aof_pipe_write_ack_to_parent); - close(server.aof_pipe_read_ack_from_child); - close(server.aof_pipe_write_ack_to_child); - close(server.aof_pipe_read_ack_from_parent); + aeDeleteFileEventAsync(g_pserver->el_alf_pip_read_ack_from_child,g_pserver->aof_pipe_read_ack_from_child,AE_READABLE); + aeDeleteFileEventAsync(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,g_pserver->aof_pipe_write_data_to_child,AE_WRITABLE); + close(g_pserver->aof_pipe_write_data_to_child); + close(g_pserver->aof_pipe_read_data_from_parent); + close(g_pserver->aof_pipe_write_ack_to_parent); + close(g_pserver->aof_pipe_read_ack_from_child); + close(g_pserver->aof_pipe_write_ack_to_child); + close(g_pserver->aof_pipe_read_ack_from_parent); } /* ---------------------------------------------------------------------------- @@ -1538,10 +1538,10 @@ void aofClosePipes(void) { * 1) The user calls BGREWRITEAOF * 2) Redis calls this function, that forks(): * 2a) the child rewrite the append only file in a temp file. - * 2b) the parent accumulates differences in server.aof_rewrite_buf. + * 2b) the parent accumulates differences in g_pserver->aof_rewrite_buf. * 3) When the child finished '2a' exists. * 4) The parent will trap the exit code, if it's OK, will append the - * data accumulated into server.aof_rewrite_buf into the temp file, and + * data accumulated into g_pserver->aof_rewrite_buf into the temp file, and * finally will rename(2) the temp file in the actual file name. * The the new file is reopened as the new append only file. Profit! */ @@ -1549,7 +1549,7 @@ int rewriteAppendOnlyFileBackground(void) { pid_t childpid; long long start; - if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) return C_ERR; + if (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1) return C_ERR; if (aofCreatePipes() != C_OK) return C_ERR; openChildInfoPipe(); start = ustime(); @@ -1569,7 +1569,7 @@ int rewriteAppendOnlyFileBackground(void) { private_dirty/(1024*1024)); } - server.child_info_data.cow_size = private_dirty; + g_pserver->child_info_data.cow_size = private_dirty; sendChildInfo(CHILD_INFO_TYPE_AOF); exitFromChild(0); } else { @@ -1577,9 +1577,9 @@ int rewriteAppendOnlyFileBackground(void) { } } else { /* Parent */ - server.stat_fork_time = ustime()-start; - server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */ - latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000); + g_pserver->stat_fork_time = ustime()-start; + g_pserver->stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / g_pserver->stat_fork_time / (1024*1024*1024); /* GB per second. */ + latencyAddSampleIfNeeded("fork",g_pserver->stat_fork_time/1000); if (childpid == -1) { closeChildInfoPipe(); serverLog(LL_WARNING, @@ -1590,15 +1590,15 @@ int rewriteAppendOnlyFileBackground(void) { } serverLog(LL_NOTICE, "Background append only file rewriting started by pid %d",childpid); - server.aof_rewrite_scheduled = 0; - server.aof_rewrite_time_start = time(NULL); - server.aof_child_pid = childpid; + g_pserver->aof_rewrite_scheduled = 0; + g_pserver->aof_rewrite_time_start = time(NULL); + g_pserver->aof_child_pid = childpid; updateDictResizePolicy(); /* We set appendseldb to -1 in order to force the next call to the * feedAppendOnlyFile() to issue a SELECT command, so the differences - * accumulated by the parent into server.aof_rewrite_buf will start + * accumulated by the parent into g_pserver->aof_rewrite_buf will start * with a SELECT statement and it will be safe to merge. */ - server.aof_selected_db = -1; + g_pserver->aof_selected_db = -1; replicationScriptCacheFlush(); return C_OK; } @@ -1606,10 +1606,10 @@ int rewriteAppendOnlyFileBackground(void) { } void bgrewriteaofCommand(client *c) { - if (server.aof_child_pid != -1) { + if (g_pserver->aof_child_pid != -1) { addReplyError(c,"Background append only file rewriting already in progress"); - } else if (server.rdb_child_pid != -1) { - server.aof_rewrite_scheduled = 1; + } else if (g_pserver->rdb_child_pid != -1) { + g_pserver->aof_rewrite_scheduled = 1; addReplyStatus(c,"Background append only file rewriting scheduled"); } else if (rewriteAppendOnlyFileBackground() == C_OK) { addReplyStatus(c,"Background append only file rewriting started"); @@ -1628,7 +1628,7 @@ void aofRemoveTempFile(pid_t childpid) { unlink(tmpfile); } -/* Update the server.aof_current_size field explicitly using stat(2) +/* Update the g_pserver->aof_current_size field explicitly using stat(2) * to check the size of the file. This is useful after a rewrite or after * a restart, normally the size is updated just adding the write length * to the current length, that is much faster. */ @@ -1637,11 +1637,11 @@ void aofUpdateCurrentSize(void) { mstime_t latency; latencyStartMonitor(latency); - if (redis_fstat(server.aof_fd,&sb) == -1) { + if (redis_fstat(g_pserver->aof_fd,&sb) == -1) { serverLog(LL_WARNING,"Unable to obtain the AOF file length. stat: %s", strerror(errno)); } else { - server.aof_current_size = sb.st_size; + g_pserver->aof_current_size = sb.st_size; } latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-fstat",latency); @@ -1663,7 +1663,7 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { * rewritten AOF. */ latencyStartMonitor(latency); snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", - (int)server.aof_child_pid); + (int)g_pserver->aof_child_pid); newfd = open(tmpfile,O_WRONLY|O_APPEND); if (newfd == -1) { serverLog(LL_WARNING, @@ -1692,14 +1692,14 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { * * 1) AOF is DISABLED and this was a one time rewrite. The temporary * file will be renamed to the configured file. When this file already - * exists, it will be unlinked, which may block the server. + * exists, it will be unlinked, which may block the g_pserver-> * * 2) AOF is ENABLED and the rewritten AOF will immediately start * receiving writes. After the temporary file is renamed to the * configured file, the original AOF file descriptor will be closed. * Since this will be the last reference to that file, closing it * causes the underlying file to be unlinked, which may block the - * server. + * g_pserver-> * * To mitigate the blocking effect of the unlink operation (either * caused by rename(2) in scenario 1, or by close(2) in scenario 2), we @@ -1710,13 +1710,13 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { * guarantee atomicity for this switch has already happened by then, so * we don't care what the outcome or duration of that close operation * is, as long as the file descriptor is released again. */ - if (server.aof_fd == -1) { + if (g_pserver->aof_fd == -1) { /* AOF disabled */ /* Don't care if this fails: oldfd will be -1 and we handle that. * One notable case of -1 return is if the old file does * not exist. */ - oldfd = open(server.aof_filename,O_RDONLY|O_NONBLOCK); + oldfd = open(g_pserver->aof_filename,O_RDONLY|O_NONBLOCK); } else { /* AOF enabled */ oldfd = -1; /* We'll set this to the current AOF filedes later. */ @@ -1725,11 +1725,11 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { /* Rename the temporary file. This will not unlink the target file if * it exists, because we reference it with "oldfd". */ latencyStartMonitor(latency); - if (rename(tmpfile,server.aof_filename) == -1) { + if (rename(tmpfile,g_pserver->aof_filename) == -1) { serverLog(LL_WARNING, "Error trying to rename the temporary AOF file %s into %s: %s", tmpfile, - server.aof_filename, + g_pserver->aof_filename, strerror(errno)); close(newfd); if (oldfd != -1) close(oldfd); @@ -1738,34 +1738,34 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { latencyEndMonitor(latency); latencyAddSampleIfNeeded("aof-rename",latency); - if (server.aof_fd == -1) { + if (g_pserver->aof_fd == -1) { /* AOF disabled, we don't need to set the AOF file descriptor * to this new file, so we can close it. */ close(newfd); } else { /* AOF enabled, replace the old fd with the new one. */ - oldfd = server.aof_fd; - server.aof_fd = newfd; - if (server.aof_fsync == AOF_FSYNC_ALWAYS) + oldfd = g_pserver->aof_fd; + g_pserver->aof_fd = newfd; + if (g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) redis_fsync(newfd); - else if (server.aof_fsync == AOF_FSYNC_EVERYSEC) + else if (g_pserver->aof_fsync == AOF_FSYNC_EVERYSEC) aof_background_fsync(newfd); - server.aof_selected_db = -1; /* Make sure SELECT is re-issued */ + g_pserver->aof_selected_db = -1; /* Make sure SELECT is re-issued */ aofUpdateCurrentSize(); - server.aof_rewrite_base_size = server.aof_current_size; + g_pserver->aof_rewrite_base_size = g_pserver->aof_current_size; /* Clear regular AOF buffer since its contents was just written to * the new AOF from the background rewrite buffer. */ - sdsfree(server.aof_buf); - server.aof_buf = sdsempty(); + sdsfree(g_pserver->aof_buf); + g_pserver->aof_buf = sdsempty(); } - server.aof_lastbgrewrite_status = C_OK; + g_pserver->aof_lastbgrewrite_status = C_OK; serverLog(LL_NOTICE, "Background AOF rewrite finished successfully"); /* Change state from WAIT_REWRITE to ON if needed */ - if (server.aof_state == AOF_WAIT_REWRITE) - server.aof_state = AOF_ON; + if (g_pserver->aof_state == AOF_WAIT_REWRITE) + g_pserver->aof_state = AOF_ON; /* Asynchronously close the overwritten AOF. */ if (oldfd != -1) bioCreateBackgroundJob(BIO_CLOSE_FILE,(void*)(long)oldfd,NULL,NULL); @@ -1776,11 +1776,11 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { /* SIGUSR1 is whitelisted, so we have a way to kill a child without * tirggering an error condition. */ if (bysignal != SIGUSR1) - server.aof_lastbgrewrite_status = C_ERR; + g_pserver->aof_lastbgrewrite_status = C_ERR; serverLog(LL_WARNING, "Background AOF rewrite terminated with error"); } else { - server.aof_lastbgrewrite_status = C_ERR; + g_pserver->aof_lastbgrewrite_status = C_ERR; serverLog(LL_WARNING, "Background AOF rewrite terminated by signal %d", bysignal); @@ -1789,11 +1789,11 @@ void backgroundRewriteDoneHandler(int exitcode, int bysignal) { cleanup: aofClosePipes(); aofRewriteBufferReset(); - aofRemoveTempFile(server.aof_child_pid); - server.aof_child_pid = -1; - server.aof_rewrite_time_last = time(NULL)-server.aof_rewrite_time_start; - server.aof_rewrite_time_start = -1; + aofRemoveTempFile(g_pserver->aof_child_pid); + g_pserver->aof_child_pid = -1; + g_pserver->aof_rewrite_time_last = time(NULL)-g_pserver->aof_rewrite_time_start; + g_pserver->aof_rewrite_time_start = -1; /* Schedule a new rewrite if we are waiting for it to switch the AOF ON. */ - if (server.aof_state == AOF_WAIT_REWRITE) - server.aof_rewrite_scheduled = 1; + if (g_pserver->aof_state == AOF_WAIT_REWRITE) + g_pserver->aof_rewrite_scheduled = 1; } diff --git a/src/bio.cpp b/src/bio.cpp index effedbebd..62f6615a6 100644 --- a/src/bio.cpp +++ b/src/bio.cpp @@ -4,7 +4,7 @@ * Currently there is only a single operation, that is a background close(2) * system call. This is needed as when the process is the last owner of a * reference to a file closing it means unlinking it, and the deletion of the - * file is slow, blocking the server. + * file is slow, blocking the g_pserver-> * * In the future we'll either continue implementing new things we need or * we'll switch to libeio. However there are probably long term uses for this diff --git a/src/bitops.cpp b/src/bitops.cpp index ac942b977..98e8b9bc7 100644 --- a/src/bitops.cpp +++ b/src/bitops.cpp @@ -556,7 +556,7 @@ void setbitCommand(client *c) { ((uint8_t*)ptrFromObj(o))[byte] = byteval; signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STRING,"setbit",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; addReply(c, bitval ? shared.cone : shared.czero); } @@ -762,7 +762,7 @@ void bitopCommand(client *c) { signalModifiedKey(c->db,targetkey); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",targetkey,c->db->id); } - server.dirty++; + g_pserver->dirty++; addReplyLongLong(c,maxlen); /* Return the output string length in bytes. */ } @@ -1120,7 +1120,7 @@ void bitfieldCommand(client *c) { if (changes) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STRING,"setbit",c->argv[1],c->db->id); - server.dirty += changes; + g_pserver->dirty += changes; } zfree(ops); } diff --git a/src/blocked.cpp b/src/blocked.cpp index 327726740..1f807dac3 100644 --- a/src/blocked.cpp +++ b/src/blocked.cpp @@ -110,8 +110,8 @@ void blockClient(client *c, int btype) { serverAssert(GlobalLocksAcquired()); c->flags |= CLIENT_BLOCKED; c->btype = btype; - server.blocked_clients++; - server.blocked_clients_by_type[btype]++; + g_pserver->blocked_clients++; + g_pserver->blocked_clients_by_type[btype]++; } /* This function is called in the beforeSleep() function of the event loop @@ -122,8 +122,8 @@ void processUnblockedClients(int iel) { listNode *ln; client *c; - list *unblocked_clients = server.rgthreadvar[iel].unblocked_clients; - serverAssert(iel == (serverTL - server.rgthreadvar)); + list *unblocked_clients = g_pserver->rgthreadvar[iel].unblocked_clients; + serverAssert(iel == (serverTL - g_pserver->rgthreadvar)); while (listLength(unblocked_clients)) { ln = listFirst(unblocked_clients); @@ -171,7 +171,7 @@ void queueClientForReprocessing(client *c) { fastlock_lock(&c->lock); if (!(c->flags & CLIENT_UNBLOCKED)) { c->flags |= CLIENT_UNBLOCKED; - listAddNodeTail(server.rgthreadvar[c->iel].unblocked_clients,c); + listAddNodeTail(g_pserver->rgthreadvar[c->iel].unblocked_clients,c); } fastlock_unlock(&c->lock); } @@ -193,8 +193,8 @@ void unblockClient(client *c) { } /* Clear the flags, and put the client in the unblocked list so that * we'll process new commands in its query buffer ASAP. */ - server.blocked_clients--; - server.blocked_clients_by_type[c->btype]--; + g_pserver->blocked_clients--; + g_pserver->blocked_clients_by_type[c->btype]--; c->flags &= ~CLIENT_BLOCKED; c->btype = BLOCKED_NONE; queueClientForReprocessing(c); @@ -229,7 +229,7 @@ void disconnectAllBlockedClients(void) { listNode *ln; listIter li; - listRewind(server.clients,&li); + listRewind(g_pserver->clients,&li); while((ln = listNext(&li))) { client *c = (client*)listNodeValue(ln); @@ -252,7 +252,7 @@ void disconnectAllBlockedClients(void) { * * All the keys with at least one client blocked that received at least * one new element via some write operation are accumulated into - * the server.ready_keys list. This function will run the list and will + * the g_pserver->ready_keys list. This function will run the list and will * serve clients accordingly. Note that the function will iterate again and * again as a result of serving BRPOPLPUSH we can have new blocking clients * to serve because of the PUSH side of BRPOPLPUSH. @@ -268,15 +268,15 @@ void disconnectAllBlockedClients(void) { * do, the function is already fair. */ void handleClientsBlockedOnKeys(void) { serverAssert(GlobalLocksAcquired()); - while(listLength(server.ready_keys) != 0) { + while(listLength(g_pserver->ready_keys) != 0) { list *l; - /* Point server.ready_keys to a fresh list and save the current one + /* Point g_pserver->ready_keys to a fresh list and save the current one * locally. This way as we run the old list we are free to call - * signalKeyAsReady() that may push new elements in server.ready_keys + * signalKeyAsReady() that may push new elements in g_pserver->ready_keys * when handling clients blocked into BRPOPLPUSH. */ - l = server.ready_keys; - server.ready_keys = listCreate(); + l = g_pserver->ready_keys; + g_pserver->ready_keys = listCreate(); while(listLength(l) != 0) { listNode *ln = listFirst(l); @@ -385,8 +385,8 @@ void handleClientsBlockedOnKeys(void) { /* Replicate the command. */ robj *argv[2]; struct redisCommand *cmd = where == ZSET_MIN ? - server.zpopminCommand : - server.zpopmaxCommand; + cserver.zpopminCommand : + cserver.zpopmaxCommand; argv[0] = createStringObject(cmd->name,strlen(cmd->name)); argv[1] = rl->key; incrRefCount(rl->key); @@ -613,7 +613,7 @@ void unblockClientWaitingData(client *c) { } /* If the specified key has clients blocked waiting for list pushes, this - * function will put the key reference into the server.ready_keys list. + * function will put the key reference into the g_pserver->ready_keys list. * Note that db->ready_keys is a hash table that allows us to avoid putting * the same key again and again in the list in case of multiple pushes * made by a script or in the context of MULTI/EXEC. @@ -628,12 +628,12 @@ void signalKeyAsReady(redisDb *db, robj *key) { /* Key was already signaled? No need to queue it again. */ if (dictFind(db->ready_keys,key) != NULL) return; - /* Ok, we need to queue this key into server.ready_keys. */ + /* Ok, we need to queue this key into g_pserver->ready_keys. */ rl = (readyList*)zmalloc(sizeof(*rl), MALLOC_SHARED); rl->key = key; rl->db = db; incrRefCount(key); - listAddNodeTail(server.ready_keys,rl); + listAddNodeTail(g_pserver->ready_keys,rl); /* We also add the key in the db->ready_keys dictionary in order * to avoid adding it multiple times into a list with a simple O(1) diff --git a/src/childinfo.cpp b/src/childinfo.cpp index 719025e8c..16f6aeb53 100644 --- a/src/childinfo.cpp +++ b/src/childinfo.cpp @@ -34,52 +34,52 @@ * RDB / AOF saving process from the child to the parent (for instance * the amount of copy on write memory used) */ void openChildInfoPipe(void) { - if (pipe(server.child_info_pipe) == -1) { + if (pipe(g_pserver->child_info_pipe) == -1) { /* On error our two file descriptors should be still set to -1, * but we call anyway cloesChildInfoPipe() since can't hurt. */ closeChildInfoPipe(); - } else if (anetNonBlock(NULL,server.child_info_pipe[0]) != ANET_OK) { + } else if (anetNonBlock(NULL,g_pserver->child_info_pipe[0]) != ANET_OK) { closeChildInfoPipe(); } else { - memset(&server.child_info_data,0,sizeof(server.child_info_data)); + memset(&g_pserver->child_info_data,0,sizeof(g_pserver->child_info_data)); } } /* Close the pipes opened with openChildInfoPipe(). */ void closeChildInfoPipe(void) { - if (server.child_info_pipe[0] != -1 || - server.child_info_pipe[1] != -1) + if (g_pserver->child_info_pipe[0] != -1 || + g_pserver->child_info_pipe[1] != -1) { - close(server.child_info_pipe[0]); - close(server.child_info_pipe[1]); - server.child_info_pipe[0] = -1; - server.child_info_pipe[1] = -1; + close(g_pserver->child_info_pipe[0]); + close(g_pserver->child_info_pipe[1]); + g_pserver->child_info_pipe[0] = -1; + g_pserver->child_info_pipe[1] = -1; } } /* Send COW data to parent. The child should call this function after populating * the corresponding fields it want to sent (according to the process type). */ void sendChildInfo(int ptype) { - if (server.child_info_pipe[1] == -1) return; - server.child_info_data.magic = CHILD_INFO_MAGIC; - server.child_info_data.process_type = ptype; - ssize_t wlen = sizeof(server.child_info_data); - if (write(server.child_info_pipe[1],&server.child_info_data,wlen) != wlen) { + if (g_pserver->child_info_pipe[1] == -1) return; + g_pserver->child_info_data.magic = CHILD_INFO_MAGIC; + g_pserver->child_info_data.process_type = ptype; + ssize_t wlen = sizeof(g_pserver->child_info_data); + if (write(g_pserver->child_info_pipe[1],&g_pserver->child_info_data,wlen) != wlen) { /* Nothing to do on error, this will be detected by the other side. */ } } /* Receive COW data from parent. */ void receiveChildInfo(void) { - if (server.child_info_pipe[0] == -1) return; - ssize_t wlen = sizeof(server.child_info_data); - if (read(server.child_info_pipe[0],&server.child_info_data,wlen) == wlen && - server.child_info_data.magic == CHILD_INFO_MAGIC) + if (g_pserver->child_info_pipe[0] == -1) return; + ssize_t wlen = sizeof(g_pserver->child_info_data); + if (read(g_pserver->child_info_pipe[0],&g_pserver->child_info_data,wlen) == wlen && + g_pserver->child_info_data.magic == CHILD_INFO_MAGIC) { - if (server.child_info_data.process_type == CHILD_INFO_TYPE_RDB) { - server.stat_rdb_cow_bytes = server.child_info_data.cow_size; - } else if (server.child_info_data.process_type == CHILD_INFO_TYPE_AOF) { - server.stat_aof_cow_bytes = server.child_info_data.cow_size; + if (g_pserver->child_info_data.process_type == CHILD_INFO_TYPE_RDB) { + g_pserver->stat_rdb_cow_bytes = g_pserver->child_info_data.cow_size; + } else if (g_pserver->child_info_data.process_type == CHILD_INFO_TYPE_AOF) { + g_pserver->stat_aof_cow_bytes = g_pserver->child_info_data.cow_size; } } } diff --git a/src/cluster.cpp b/src/cluster.cpp index 5aca6237d..42ea437de 100644 --- a/src/cluster.cpp +++ b/src/cluster.cpp @@ -42,7 +42,7 @@ #include /* A global reference to myself is handy to make code more clear. - * Myself always points to server.cluster->myself, that is, the clusterNode + * Myself always points to g_pserver->cluster->myself, that is, the clusterNode * that represents this node. */ clusterNode *myself = NULL; @@ -79,10 +79,10 @@ void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8 struct redisMaster *getFirstMaster() { - serverAssert(listLength(server.masters) <= 1); - if (!listLength(server.masters)) + serverAssert(listLength(g_pserver->masters) <= 1); + if (!listLength(g_pserver->masters)) return NULL; - return (redisMaster*)listFirst(server.masters)->value; + return (redisMaster*)listFirst(g_pserver->masters)->value; } /* ----------------------------------------------------------------------------- @@ -148,10 +148,10 @@ int clusterLoadConfig(char *filename) { if (strcasecmp(argv[0],"vars") == 0) { for (j = 1; j < argc; j += 2) { if (strcasecmp(argv[j],"currentEpoch") == 0) { - server.cluster->currentEpoch = + g_pserver->cluster->currentEpoch = strtoull(argv[j+1],NULL,10); } else if (strcasecmp(argv[j],"lastVoteEpoch") == 0) { - server.cluster->lastVoteEpoch = + g_pserver->cluster->lastVoteEpoch = strtoull(argv[j+1],NULL,10); } else { serverLog(LL_WARNING, @@ -194,8 +194,8 @@ int clusterLoadConfig(char *filename) { p = strchr(s,','); if (p) *p = '\0'; if (!strcasecmp(s,"myself")) { - serverAssert(server.cluster->myself == NULL); - myself = server.cluster->myself = n; + serverAssert(g_pserver->cluster->myself == NULL); + myself = g_pserver->cluster->myself = n; n->flags |= CLUSTER_NODE_MYSELF; } else if (!strcasecmp(s,"master")) { n->flags |= CLUSTER_NODE_MASTER; @@ -262,9 +262,9 @@ int clusterLoadConfig(char *filename) { clusterAddNode(cn); } if (direction == '>') { - server.cluster->migrating_slots_to[slot] = cn; + g_pserver->cluster->migrating_slots_to[slot] = cn; } else { - server.cluster->importing_slots_from[slot] = cn; + g_pserver->cluster->importing_slots_from[slot] = cn; } continue; } else if ((p = strchr(argv[j],'-')) != NULL) { @@ -282,7 +282,7 @@ int clusterLoadConfig(char *filename) { sdsfreesplitres(argv,argc); } /* Config sanity check */ - if (server.cluster->myself == NULL) goto fmterr; + if (g_pserver->cluster->myself == NULL) goto fmterr; zfree(line); fclose(fp); @@ -292,8 +292,8 @@ int clusterLoadConfig(char *filename) { /* Something that should never happen: currentEpoch smaller than * the max epoch found in the nodes configuration. However we handle this * as some form of protection against manual editing of critical files. */ - if (clusterGetMaxEpoch() > server.cluster->currentEpoch) { - server.cluster->currentEpoch = clusterGetMaxEpoch(); + if (clusterGetMaxEpoch() > g_pserver->cluster->currentEpoch) { + g_pserver->cluster->currentEpoch = clusterGetMaxEpoch(); } return C_OK; @@ -323,17 +323,17 @@ int clusterSaveConfig(int do_fsync) { struct stat sb; int fd; - server.cluster->todo_before_sleep &= ~CLUSTER_TODO_SAVE_CONFIG; + g_pserver->cluster->todo_before_sleep &= ~CLUSTER_TODO_SAVE_CONFIG; /* Get the nodes description and concatenate our "vars" directive to * save currentEpoch and lastVoteEpoch. */ ci = clusterGenNodesDescription(CLUSTER_NODE_HANDSHAKE); ci = sdscatprintf(ci,"vars currentEpoch %llu lastVoteEpoch %llu\n", - (unsigned long long) server.cluster->currentEpoch, - (unsigned long long) server.cluster->lastVoteEpoch); + (unsigned long long) g_pserver->cluster->currentEpoch, + (unsigned long long) g_pserver->cluster->lastVoteEpoch); content_size = sdslen(ci); - if ((fd = open(server.cluster_configfile,O_WRONLY|O_CREAT,0644)) + if ((fd = open(g_pserver->cluster_configfile,O_WRONLY|O_CREAT,0644)) == -1) goto err; /* Pad the new payload if the existing file length is greater. */ @@ -345,7 +345,7 @@ int clusterSaveConfig(int do_fsync) { } if (write(fd,ci,sdslen(ci)) != (ssize_t)sdslen(ci)) goto err; if (do_fsync) { - server.cluster->todo_before_sleep &= ~CLUSTER_TODO_FSYNC_CONFIG; + g_pserver->cluster->todo_before_sleep &= ~CLUSTER_TODO_FSYNC_CONFIG; fsync(fd); } @@ -424,7 +424,7 @@ int clusterLockConfig(char *filename) { * set of flags in myself->flags accordingly. */ void clusterUpdateMyselfFlags(void) { int oldflags = myself->flags; - int nofailover = server.cluster_slave_no_failover ? + int nofailover = g_pserver->cluster_slave_no_failover ? CLUSTER_NODE_NOFAILOVER : 0; myself->flags &= ~CLUSTER_NODE_NOFAILOVER; myself->flags |= nofailover; @@ -436,45 +436,45 @@ void clusterUpdateMyselfFlags(void) { void clusterInit(void) { int saveconf = 0; - if (server.enable_multimaster) + if (g_pserver->enable_multimaster) { serverLog(LL_WARNING, "Clusters are not compatible with multi-master"); exit(EXIT_FAILURE); } - server.cluster = (clusterState*)zmalloc(sizeof(clusterState), MALLOC_LOCAL); - server.cluster->myself = NULL; - server.cluster->currentEpoch = 0; - server.cluster->state = CLUSTER_FAIL; - server.cluster->size = 1; - server.cluster->todo_before_sleep = 0; - server.cluster->nodes = dictCreate(&clusterNodesDictType,NULL); - server.cluster->nodes_black_list = + g_pserver->cluster = (clusterState*)zmalloc(sizeof(clusterState), MALLOC_LOCAL); + g_pserver->cluster->myself = NULL; + g_pserver->cluster->currentEpoch = 0; + g_pserver->cluster->state = CLUSTER_FAIL; + g_pserver->cluster->size = 1; + g_pserver->cluster->todo_before_sleep = 0; + g_pserver->cluster->nodes = dictCreate(&clusterNodesDictType,NULL); + g_pserver->cluster->nodes_black_list = dictCreate(&clusterNodesBlackListDictType,NULL); - server.cluster->failover_auth_time = 0; - server.cluster->failover_auth_count = 0; - server.cluster->failover_auth_rank = 0; - server.cluster->failover_auth_epoch = 0; - server.cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE; - server.cluster->lastVoteEpoch = 0; + g_pserver->cluster->failover_auth_time = 0; + g_pserver->cluster->failover_auth_count = 0; + g_pserver->cluster->failover_auth_rank = 0; + g_pserver->cluster->failover_auth_epoch = 0; + g_pserver->cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE; + g_pserver->cluster->lastVoteEpoch = 0; for (int i = 0; i < CLUSTERMSG_TYPE_COUNT; i++) { - server.cluster->stats_bus_messages_sent[i] = 0; - server.cluster->stats_bus_messages_received[i] = 0; + g_pserver->cluster->stats_bus_messages_sent[i] = 0; + g_pserver->cluster->stats_bus_messages_received[i] = 0; } - server.cluster->stats_pfail_nodes = 0; - memset(server.cluster->slots,0, sizeof(server.cluster->slots)); + g_pserver->cluster->stats_pfail_nodes = 0; + memset(g_pserver->cluster->slots,0, sizeof(g_pserver->cluster->slots)); clusterCloseAllSlots(); /* Lock the cluster config file to make sure every node uses * its own nodes.conf. */ - if (clusterLockConfig(server.cluster_configfile) == C_ERR) + if (clusterLockConfig(g_pserver->cluster_configfile) == C_ERR) exit(1); /* Load or create a new nodes configuration. */ - if (clusterLoadConfig(server.cluster_configfile) == C_ERR) { + if (clusterLoadConfig(g_pserver->cluster_configfile) == C_ERR) { /* No configuration found. We will just use the random name provided * by the createClusterNode() function. */ - myself = server.cluster->myself = + myself = g_pserver->cluster->myself = createClusterNode(NULL,CLUSTER_NODE_MYSELF|CLUSTER_NODE_MASTER); serverLog(LL_NOTICE,"No cluster configuration found, I'm %.40s", myself->name); @@ -484,12 +484,12 @@ void clusterInit(void) { if (saveconf) clusterSaveConfigOrDie(1); /* We need a listening TCP port for our cluster messaging needs. */ - server.cfd_count = 0; + g_pserver->cfd_count = 0; /* Port sanity check II * The other handshake port check is triggered too late to stop * us from trying to use a too-high cluster port number. */ - if (server.port > (65535-CLUSTER_PORT_INCR)) { + if (g_pserver->port > (65535-CLUSTER_PORT_INCR)) { serverLog(LL_WARNING, "Redis port number too high. " "Cluster communication port is 10,000 port " "numbers higher than your Redis port. " @@ -498,15 +498,15 @@ void clusterInit(void) { exit(1); } - if (listenToPort(server.port+CLUSTER_PORT_INCR, - server.cfd,&server.cfd_count, 0 /*fReusePort*/) == C_ERR) + if (listenToPort(g_pserver->port+CLUSTER_PORT_INCR, + g_pserver->cfd,&g_pserver->cfd_count, 0 /*fReusePort*/) == C_ERR) { exit(1); } else { int j; - for (j = 0; j < server.cfd_count; j++) { - if (aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, server.cfd[j], AE_READABLE, + for (j = 0; j < g_pserver->cfd_count; j++) { + if (aeCreateFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, g_pserver->cfd[j], AE_READABLE, clusterAcceptHandler, NULL) == AE_ERR) serverPanic("Unrecoverable error creating Redis Cluster " "file event."); @@ -514,20 +514,20 @@ void clusterInit(void) { } /* The slots -> keys map is a radix tree. Initialize it here. */ - server.cluster->slots_to_keys = raxNew(); - memset(server.cluster->slots_keys_count,0, - sizeof(server.cluster->slots_keys_count)); + g_pserver->cluster->slots_to_keys = raxNew(); + memset(g_pserver->cluster->slots_keys_count,0, + sizeof(g_pserver->cluster->slots_keys_count)); /* Set myself->port / cport to my listening ports, we'll just need to * discover the IP address via MEET messages. */ - myself->port = server.port; - myself->cport = server.port+CLUSTER_PORT_INCR; - if (server.cluster_announce_port) - myself->port = server.cluster_announce_port; - if (server.cluster_announce_bus_port) - myself->cport = server.cluster_announce_bus_port; - - server.cluster->mf_end = 0; + myself->port = g_pserver->port; + myself->cport = g_pserver->port+CLUSTER_PORT_INCR; + if (g_pserver->cluster_announce_port) + myself->port = g_pserver->cluster_announce_port; + if (g_pserver->cluster_announce_bus_port) + myself->cport = g_pserver->cluster_announce_bus_port; + + g_pserver->cluster->mf_end = 0; resetManualFailover(); clusterUpdateMyselfFlags(); } @@ -549,10 +549,10 @@ void clusterReset(int hard) { /* Turn into master. */ if (nodeIsSlave(myself)) { clusterSetNodeAsMaster(myself); - if (listLength(server.masters) > 0) + if (listLength(g_pserver->masters) > 0) { - serverAssert(listLength(server.masters) == 1); - replicationUnsetMaster((redisMaster*)listFirst(server.masters)->value); + serverAssert(listLength(g_pserver->masters) == 1); + replicationUnsetMaster((redisMaster*)listFirst(g_pserver->masters)->value); } emptyDb(-1,EMPTYDB_NO_FLAGS,NULL); } @@ -565,7 +565,7 @@ void clusterReset(int hard) { for (j = 0; j < CLUSTER_SLOTS; j++) clusterDelSlot(j); /* Forget all the nodes, but myself. */ - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); @@ -578,15 +578,15 @@ void clusterReset(int hard) { if (hard) { sds oldname; - server.cluster->currentEpoch = 0; - server.cluster->lastVoteEpoch = 0; + g_pserver->cluster->currentEpoch = 0; + g_pserver->cluster->lastVoteEpoch = 0; myself->configEpoch = 0; serverLog(LL_WARNING, "configEpoch set to 0 via CLUSTER RESET HARD"); /* To change the Node ID we need to remove the old name from the * nodes table, change the ID, and re-add back with new name. */ oldname = sdsnewlen(myself->name, CLUSTER_NAMELEN); - dictDelete(server.cluster->nodes,oldname); + dictDelete(g_pserver->cluster->nodes,oldname); sdsfree(oldname); getRandomHexChars(myself->name, CLUSTER_NAMELEN); clusterAddNode(myself); @@ -618,7 +618,7 @@ clusterLink *createClusterLink(clusterNode *node) { * with this link will have the 'link' field set to NULL. */ void freeClusterLink(clusterLink *link) { if (link->fd != -1) { - aeDeleteFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, link->fd, AE_READABLE|AE_WRITABLE); + aeDeleteFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, link->fd, AE_READABLE|AE_WRITABLE); } sdsfree(link->sndbuf); sdsfree(link->rcvbuf); @@ -640,14 +640,14 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { /* If the server is starting up, don't accept cluster connections: * UPDATE messages may interact with the database content. */ - if (listLength(server.masters) == 0 && server.loading) return; + if (listLength(g_pserver->masters) == 0 && g_pserver->loading) return; while(max--) { - cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); + cfd = anetTcpAccept(g_pserver->neterr, fd, cip, sizeof(cip), &cport); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) serverLog(LL_VERBOSE, - "Error accepting cluster node: %s", server.neterr); + "Error accepting cluster node: %s", g_pserver->neterr); return; } anetNonBlock(NULL,cfd); @@ -662,7 +662,7 @@ void clusterAcceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { * node identity. */ link = createClusterLink(NULL); link->fd = cfd; - aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,cfd,AE_READABLE,clusterReadHandler,link); + aeCreateFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,cfd,AE_READABLE,clusterReadHandler,link); } } @@ -783,7 +783,7 @@ void clusterNodeCleanupFailureReports(clusterNode *node) { listNode *ln; listIter li; clusterNodeFailReport *fr; - mstime_t maxtime = server.cluster_node_timeout * + mstime_t maxtime = g_pserver->cluster_node_timeout * CLUSTER_FAIL_REPORT_VALIDITY_MULT; mstime_t now = mstime(); @@ -889,7 +889,7 @@ void freeClusterNode(clusterNode *n) { /* Unlink from the set of nodes. */ nodename = sdsnewlen(n->name, CLUSTER_NAMELEN); - serverAssert(dictDelete(server.cluster->nodes,nodename) == DICT_OK); + serverAssert(dictDelete(g_pserver->cluster->nodes,nodename) == DICT_OK); sdsfree(nodename); /* Release link and associated data structures. */ @@ -903,7 +903,7 @@ void freeClusterNode(clusterNode *n) { int clusterAddNode(clusterNode *node) { int retval; - retval = dictAdd(server.cluster->nodes, + retval = dictAdd(g_pserver->cluster->nodes, sdsnewlen(node->name,CLUSTER_NAMELEN), node); return (retval == DICT_OK) ? C_OK : C_ERR; } @@ -926,16 +926,16 @@ void clusterDelNode(clusterNode *delnode) { /* 1) Mark slots as unassigned. */ for (j = 0; j < CLUSTER_SLOTS; j++) { - if (server.cluster->importing_slots_from[j] == delnode) - server.cluster->importing_slots_from[j] = NULL; - if (server.cluster->migrating_slots_to[j] == delnode) - server.cluster->migrating_slots_to[j] = NULL; - if (server.cluster->slots[j] == delnode) + if (g_pserver->cluster->importing_slots_from[j] == delnode) + g_pserver->cluster->importing_slots_from[j] = NULL; + if (g_pserver->cluster->migrating_slots_to[j] == delnode) + g_pserver->cluster->migrating_slots_to[j] = NULL; + if (g_pserver->cluster->slots[j] == delnode) clusterDelSlot(j); } /* 2) Remove failure reports. */ - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); @@ -953,7 +953,7 @@ clusterNode *clusterLookupNode(const char *name) { sds s = sdsnewlen(name, CLUSTER_NAMELEN); dictEntry *de; - de = dictFind(server.cluster->nodes,s); + de = dictFind(g_pserver->cluster->nodes,s); sdsfree(s); if (de == NULL) return NULL; return (clusterNode*)dictGetVal(de); @@ -969,7 +969,7 @@ void clusterRenameNode(clusterNode *node, char *newname) { serverLog(LL_DEBUG,"Renaming node %.40s into %.40s", node->name, newname); - retval = dictDelete(server.cluster->nodes, s); + retval = dictDelete(g_pserver->cluster->nodes, s); sdsfree(s); serverAssert(retval == DICT_OK); memcpy(node->name, newname, CLUSTER_NAMELEN); @@ -987,13 +987,13 @@ uint64_t clusterGetMaxEpoch(void) { dictIterator *di; dictEntry *de; - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); if (node->configEpoch > max) max = node->configEpoch; } dictReleaseIterator(di); - if (max < server.cluster->currentEpoch) max = server.cluster->currentEpoch; + if (max < g_pserver->cluster->currentEpoch) max = g_pserver->cluster->currentEpoch; return max; } @@ -1032,8 +1032,8 @@ int clusterBumpConfigEpochWithoutConsensus(void) { if (myself->configEpoch == 0 || myself->configEpoch != maxEpoch) { - server.cluster->currentEpoch++; - myself->configEpoch = server.cluster->currentEpoch; + g_pserver->cluster->currentEpoch++; + myself->configEpoch = g_pserver->cluster->currentEpoch; clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| CLUSTER_TODO_FSYNC_CONFIG); serverLog(LL_WARNING, @@ -1098,8 +1098,8 @@ void clusterHandleConfigEpochCollision(clusterNode *sender) { /* Don't act if the colliding node has a smaller Node ID. */ if (memcmp(sender->name,myself->name,CLUSTER_NAMELEN) <= 0) return; /* Get the next ID available at the best of this node knowledge. */ - server.cluster->currentEpoch++; - myself->configEpoch = server.cluster->currentEpoch; + g_pserver->cluster->currentEpoch++; + myself->configEpoch = g_pserver->cluster->currentEpoch; clusterSaveConfigOrDie(1); serverLog(LL_VERBOSE, "WARNING: configEpoch collision with node %.40s." @@ -1143,12 +1143,12 @@ void clusterBlacklistCleanup(void) { dictIterator *di; dictEntry *de; - di = dictGetSafeIterator(server.cluster->nodes_black_list); + di = dictGetSafeIterator(g_pserver->cluster->nodes_black_list); while((de = dictNext(di)) != NULL) { int64_t expire = dictGetUnsignedIntegerVal(de); - if (expire < server.unixtime) - dictDelete(server.cluster->nodes_black_list,dictGetKey(de)); + if (expire < g_pserver->unixtime) + dictDelete(g_pserver->cluster->nodes_black_list,dictGetKey(de)); } dictReleaseIterator(di); } @@ -1159,12 +1159,12 @@ void clusterBlacklistAddNode(clusterNode *node) { sds id = sdsnewlen(node->name,CLUSTER_NAMELEN); clusterBlacklistCleanup(); - if (dictAdd(server.cluster->nodes_black_list,id,NULL) == DICT_OK) { + if (dictAdd(g_pserver->cluster->nodes_black_list,id,NULL) == DICT_OK) { /* If the key was added, duplicate the sds string representation of * the key for the next lookup. We'll free it at the end. */ id = sdsdup(id); } - de = dictFind(server.cluster->nodes_black_list,id); + de = dictFind(g_pserver->cluster->nodes_black_list,id); dictSetUnsignedIntegerVal(de,time(NULL)+CLUSTER_BLACKLIST_TTL); sdsfree(id); } @@ -1177,7 +1177,7 @@ int clusterBlacklistExists(char *nodeid) { int retval; clusterBlacklistCleanup(); - retval = dictFind(server.cluster->nodes_black_list,id) != NULL; + retval = dictFind(g_pserver->cluster->nodes_black_list,id) != NULL; sdsfree(id); return retval; } @@ -1209,7 +1209,7 @@ int clusterBlacklistExists(char *nodeid) { */ void markNodeAsFailingIfNeeded(clusterNode *node) { int failures; - int needed_quorum = (server.cluster->size / 2) + 1; + int needed_quorum = (g_pserver->cluster->size / 2) + 1; if (!nodeTimedOut(node)) return; /* We can reach it. */ if (nodeFailed(node)) return; /* Already FAILing. */ @@ -1258,7 +1258,7 @@ void clearNodeFailureIfNeeded(clusterNode *node) { * Apparently no one is going to fix these slots, clear the FAIL flag. */ if (nodeIsMaster(node) && node->numslots > 0 && (now - node->fail_time) > - (server.cluster_node_timeout * CLUSTER_FAIL_UNDO_TIME_MULT)) + (g_pserver->cluster_node_timeout * CLUSTER_FAIL_UNDO_TIME_MULT)) { serverLog(LL_NOTICE, "Clear FAIL state for node %.40s: is reachable again and nobody is serving its slots after some time.", @@ -1275,7 +1275,7 @@ int clusterHandshakeInProgress(char *ip, int port, int cport) { dictIterator *di; dictEntry *de; - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); @@ -1362,7 +1362,7 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { clusterNode *node; sds ci; - if (server.verbosity == LL_DEBUG) { + if (cserver.verbosity == LL_DEBUG) { ci = representClusterNodeFlags(sdsempty(), flags); serverLog(LL_DEBUG,"GOSSIP %.40s %s:%d@%d %s", g->nodename, @@ -1410,7 +1410,7 @@ void clusterProcessGossipSection(clusterMsg *hdr, clusterLink *link) { * it's greater than our view but is not in the future * (with 500 milliseconds tolerance) from the POV of our * clock. */ - if (pongtime <= (server.mstime+500) && + if (pongtime <= (g_pserver->mstime+500) && pongtime > node->pong_received) { node->pong_received = pongtime; @@ -1511,7 +1511,7 @@ int nodeUpdateAddressIfNeeded(clusterNode *node, clusterLink *link, * replication target as well. */ if (nodeIsSlave(myself) && myself->slaveof == node) { - serverAssert(listLength(server.masters) == 1); + serverAssert(listLength(g_pserver->masters) == 1); replicationAddMaster(node->ip, node->port); } @@ -1574,24 +1574,24 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc for (j = 0; j < CLUSTER_SLOTS; j++) { if (bitmapTestBit(slots,j)) { /* The slot is already bound to the sender of this message. */ - if (server.cluster->slots[j] == sender) continue; + if (g_pserver->cluster->slots[j] == sender) continue; /* The slot is in importing state, it should be modified only * manually via keydb-trib (example: a resharding is in progress * and the migrating side slot was already closed and is advertising * a new config. We still want the slot to be closed manually). */ - if (server.cluster->importing_slots_from[j]) continue; + if (g_pserver->cluster->importing_slots_from[j]) continue; /* We rebind the slot to the new node claiming it if: * 1) The slot was unassigned or the new node claims it with a * greater configEpoch. * 2) We are not currently importing the slot. */ - if (server.cluster->slots[j] == NULL || - server.cluster->slots[j]->configEpoch < senderConfigEpoch) + if (g_pserver->cluster->slots[j] == NULL || + g_pserver->cluster->slots[j]->configEpoch < senderConfigEpoch) { /* Was this slot mine, and still contains keys? Mark it as * a dirty slot. */ - if (server.cluster->slots[j] == myself && + if (g_pserver->cluster->slots[j] == myself && countKeysInSlot(j) && sender != myself) { @@ -1599,7 +1599,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc dirty_slots_count++; } - if (server.cluster->slots[j] == curmaster) + if (g_pserver->cluster->slots[j] == curmaster) newmaster = sender; clusterDelSlot(j); clusterAddSlot(sender,j); @@ -1613,7 +1613,7 @@ void clusterUpdateSlotsConfigWith(clusterNode *sender, uint64_t senderConfigEpoc /* After updating the slots configuration, don't do any actual change * in the state of the server if a module disabled Redis Cluster * keys redirections. */ - if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) + if (g_pserver->cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return; /* If at least one slot was reassigned from a node to another node @@ -1659,7 +1659,7 @@ int clusterProcessPacket(clusterLink *link) { uint16_t type = ntohs(hdr->type); if (type < CLUSTERMSG_TYPE_COUNT) - server.cluster->stats_bus_messages_received[type]++; + g_pserver->cluster->stats_bus_messages_received[type]++; serverLog(LL_DEBUG,"--- Processing packet of type %d, %lu bytes", type, (unsigned long) totlen); @@ -1724,8 +1724,8 @@ int clusterProcessPacket(clusterLink *link) { /* Update our curretEpoch if we see a newer epoch in the cluster. */ senderCurrentEpoch = ntohu64(hdr->currentEpoch); senderConfigEpoch = ntohu64(hdr->configEpoch); - if (senderCurrentEpoch > server.cluster->currentEpoch) - server.cluster->currentEpoch = senderCurrentEpoch; + if (senderCurrentEpoch > g_pserver->cluster->currentEpoch) + g_pserver->cluster->currentEpoch = senderCurrentEpoch; /* Update the sender configEpoch if it is publishing a newer one. */ if (senderConfigEpoch > sender->configEpoch) { sender->configEpoch = senderConfigEpoch; @@ -1737,17 +1737,17 @@ int clusterProcessPacket(clusterLink *link) { sender->repl_offset_time = mstime(); /* If we are a slave performing a manual failover and our master * sent its offset while already paused, populate the MF state. */ - if (server.cluster->mf_end && + if (g_pserver->cluster->mf_end && nodeIsSlave(myself) && myself->slaveof == sender && hdr->mflags[0] & CLUSTERMSG_FLAG0_PAUSED && - server.cluster->mf_master_offset == 0) + g_pserver->cluster->mf_master_offset == 0) { - server.cluster->mf_master_offset = sender->repl_offset; + g_pserver->cluster->mf_master_offset = sender->repl_offset; serverLog(LL_WARNING, "Received replication offset for paused " "master manual failover: %lld", - server.cluster->mf_master_offset); + g_pserver->cluster->mf_master_offset); } } @@ -1767,7 +1767,7 @@ int clusterProcessPacket(clusterLink *link) { * even with a normal PING packet. If it's wrong it will be fixed * by MEET later. */ if ((type == CLUSTERMSG_TYPE_MEET || myself->ip[0] == '\0') && - server.cluster_announce_ip == NULL) + g_pserver->cluster_announce_ip == NULL) { char ip[NET_IP_STR_LEN]; @@ -1986,17 +1986,17 @@ int clusterProcessPacket(clusterLink *link) { for (j = 0; j < CLUSTER_SLOTS; j++) { if (bitmapTestBit(hdr->myslots,j)) { - if (server.cluster->slots[j] == sender || - server.cluster->slots[j] == NULL) continue; - if (server.cluster->slots[j]->configEpoch > + if (g_pserver->cluster->slots[j] == sender || + g_pserver->cluster->slots[j] == NULL) continue; + if (g_pserver->cluster->slots[j]->configEpoch > senderConfigEpoch) { serverLog(LL_VERBOSE, "Node %.40s has old slots configuration, sending " "an UPDATE message about %.40s", - sender->name, server.cluster->slots[j]->name); + sender->name, g_pserver->cluster->slots[j]->name); clusterSendUpdate(sender->link, - server.cluster->slots[j]); + g_pserver->cluster->slots[j]); /* TODO: instead of exiting the loop send every other * UPDATE packet for other nodes that are the new owner @@ -2046,8 +2046,8 @@ int clusterProcessPacket(clusterLink *link) { /* Don't bother creating useless objects if there are no * Pub/Sub subscribers. */ - if (dictSize(server.pubsub_channels) || - listLength(server.pubsub_patterns)) + if (dictSize(g_pserver->pubsub_channels) || + listLength(g_pserver->pubsub_patterns)) { channel_len = ntohl(hdr->data.publish.msg.channel_len); message_len = ntohl(hdr->data.publish.msg.message_len); @@ -2069,9 +2069,9 @@ int clusterProcessPacket(clusterLink *link) { * a non zero number of slots, and its currentEpoch is greater or * equal to epoch where this node started the election. */ if (nodeIsMaster(sender) && sender->numslots > 0 && - senderCurrentEpoch >= server.cluster->failover_auth_epoch) + senderCurrentEpoch >= g_pserver->cluster->failover_auth_epoch) { - server.cluster->failover_auth_count++; + g_pserver->cluster->failover_auth_count++; /* Maybe we reached a quorum here, set a flag to make sure * we check ASAP. */ clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); @@ -2083,8 +2083,8 @@ int clusterProcessPacket(clusterLink *link) { /* Manual failover requested from slaves. Initialize the state * accordingly. */ resetManualFailover(); - server.cluster->mf_end = mstime() + CLUSTER_MF_TIMEOUT; - server.cluster->mf_slave = sender; + g_pserver->cluster->mf_end = mstime() + CLUSTER_MF_TIMEOUT; + g_pserver->cluster->mf_slave = sender; pauseClients(mstime()+(CLUSTER_MF_TIMEOUT*2)); serverLog(LL_WARNING,"Manual failover requested by replica %.40s.", sender->name); @@ -2153,7 +2153,7 @@ void clusterWriteHandler(aeEventLoop *el, int fd, void *privdata, int mask) { } sdsrange(link->sndbuf,nwritten,-1); if (sdslen(link->sndbuf) == 0) - aeDeleteFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, link->fd, AE_WRITABLE); + aeDeleteFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, link->fd, AE_WRITABLE); } /* Read data. Try to read the first field of the header first to check the @@ -2229,7 +2229,7 @@ void clusterReadHandler(aeEventLoop *el, int fd, void *privdata, int mask) { * from event handlers that will do stuff with the same link later. */ void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) { if (sdslen(link->sndbuf) == 0 && msglen != 0) - aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->fd,AE_WRITABLE|AE_BARRIER, + aeCreateFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->fd,AE_WRITABLE|AE_BARRIER, clusterWriteHandler,link); link->sndbuf = sdscatlen(link->sndbuf, msg, msglen); @@ -2238,7 +2238,7 @@ void clusterSendMessage(clusterLink *link, unsigned char *msg, size_t msglen) { clusterMsg *hdr = (clusterMsg*) msg; uint16_t type = ntohs(hdr->type); if (type < CLUSTERMSG_TYPE_COUNT) - server.cluster->stats_bus_messages_sent[type]++; + g_pserver->cluster->stats_bus_messages_sent[type]++; } /* Send a message to all the nodes that are part of the cluster having @@ -2251,7 +2251,7 @@ void clusterBroadcastMessage(void *buf, size_t len) { dictIterator *di; dictEntry *de; - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); @@ -2290,17 +2290,17 @@ void clusterBuildMessageHdr(clusterMsg *hdr, int type) { * packets to use the specified address for this node. Otherwise if the * first byte is zero, they'll do auto discovery. */ memset(hdr->myip,0,NET_IP_STR_LEN); - if (server.cluster_announce_ip) { - strncpy(hdr->myip,server.cluster_announce_ip,NET_IP_STR_LEN); + if (g_pserver->cluster_announce_ip) { + strncpy(hdr->myip,g_pserver->cluster_announce_ip,NET_IP_STR_LEN); hdr->myip[NET_IP_STR_LEN-1] = '\0'; } /* Handle cluster-announce-port as well. */ - int announced_port = server.cluster_announce_port ? - server.cluster_announce_port : server.port; - int announced_cport = server.cluster_announce_bus_port ? - server.cluster_announce_bus_port : - (server.port + CLUSTER_PORT_INCR); + int announced_port = g_pserver->cluster_announce_port ? + g_pserver->cluster_announce_port : g_pserver->port; + int announced_cport = g_pserver->cluster_announce_bus_port ? + g_pserver->cluster_announce_bus_port : + (g_pserver->port + CLUSTER_PORT_INCR); memcpy(hdr->myslots,master->slots,sizeof(hdr->myslots)); memset(hdr->slaveof,0,CLUSTER_NAMELEN); @@ -2309,21 +2309,21 @@ void clusterBuildMessageHdr(clusterMsg *hdr, int type) { hdr->port = htons(announced_port); hdr->cport = htons(announced_cport); hdr->flags = htons(myself->flags); - hdr->state = server.cluster->state; + hdr->state = g_pserver->cluster->state; /* Set the currentEpoch and configEpochs. */ - hdr->currentEpoch = htonu64(server.cluster->currentEpoch); + hdr->currentEpoch = htonu64(g_pserver->cluster->currentEpoch); hdr->configEpoch = htonu64(master->configEpoch); /* Set the replication offset. */ if (nodeIsSlave(myself)) offset = replicationGetSlaveOffset(getFirstMaster()); else - offset = server.master_repl_offset; + offset = g_pserver->master_repl_offset; hdr->offset = htonu64(offset); /* Set the message flags. */ - if (nodeIsMaster(myself) && server.cluster->mf_end) + if (nodeIsMaster(myself) && g_pserver->cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_PAUSED; /* Compute the message length for certain messages. For other messages @@ -2378,7 +2378,7 @@ void clusterSendPing(clusterLink *link, int type) { * nodes available minus two (ourself and the node we are sending the * message to). However practically there may be less valid nodes since * nodes in handshake state, disconnected, are not considered. */ - int freshnodes = dictSize(server.cluster->nodes)-2; + int freshnodes = dictSize(g_pserver->cluster->nodes)-2; /* How many gossip sections we want to add? 1/10 of the number of nodes * and anyway at least 3. Why 1/10? @@ -2406,13 +2406,13 @@ void clusterSendPing(clusterLink *link, int type) { * Since we have non-voting slaves that lower the probability of an entry * to feature our node, we set the number of entries per packet as * 10% of the total nodes we have. */ - wanted = floor(dictSize(server.cluster->nodes)/10); + wanted = floor(dictSize(g_pserver->cluster->nodes)/10); if (wanted < 3) wanted = 3; if (wanted > freshnodes) wanted = freshnodes; /* Include all the nodes in PFAIL state, so that failure reports are * faster to propagate to go from PFAIL to FAIL state. */ - int pfail_wanted = server.cluster->stats_pfail_nodes; + int pfail_wanted = g_pserver->cluster->stats_pfail_nodes; /* Compute the maxium totlen to allocate our buffer. We'll fix the totlen * later according to the number of gossip sections we really were able @@ -2433,7 +2433,7 @@ void clusterSendPing(clusterLink *link, int type) { /* Populate the gossip fields */ int maxiterations = wanted*3; while(freshnodes > 0 && gossipcount < wanted && maxiterations--) { - dictEntry *de = dictGetRandomKey(server.cluster->nodes); + dictEntry *de = dictGetRandomKey(g_pserver->cluster->nodes); clusterNode *thisNode = (clusterNode*)dictGetVal(de); /* Don't include this node: the whole packet header is about us @@ -2469,7 +2469,7 @@ void clusterSendPing(clusterLink *link, int type) { dictIterator *di; dictEntry *de; - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL && pfail_wanted > 0) { clusterNode *node = (clusterNode*)dictGetVal(de); if (node->flags & CLUSTER_NODE_HANDSHAKE) continue; @@ -2516,7 +2516,7 @@ void clusterBroadcastPong(int target) { dictIterator *di; dictEntry *de; - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); @@ -2691,7 +2691,7 @@ void clusterRequestFailoverAuth(void) { /* If this is a manual failover, set the CLUSTERMSG_FLAG0_FORCEACK bit * in the header to communicate the nodes receiving the message that * they should authorized the failover even if the master is working. */ - if (server.cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_FORCEACK; + if (g_pserver->cluster->mf_end) hdr->mflags[0] |= CLUSTERMSG_FLAG0_FORCEACK; totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData); hdr->totlen = htonl(totlen); clusterBroadcastMessage(buf,totlen); @@ -2742,21 +2742,21 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { * Note that it is impossible for it to actually be greater since * our currentEpoch was updated as a side effect of receiving this * request, if the request epoch was greater. */ - if (requestCurrentEpoch < server.cluster->currentEpoch) { + if (requestCurrentEpoch < g_pserver->cluster->currentEpoch) { serverLog(LL_WARNING, "Failover auth denied to %.40s: reqEpoch (%llu) < curEpoch(%llu)", node->name, (unsigned long long) requestCurrentEpoch, - (unsigned long long) server.cluster->currentEpoch); + (unsigned long long) g_pserver->cluster->currentEpoch); return; } /* I already voted for this epoch? Return ASAP. */ - if (server.cluster->lastVoteEpoch == server.cluster->currentEpoch) { + if (g_pserver->cluster->lastVoteEpoch == g_pserver->cluster->currentEpoch) { serverLog(LL_WARNING, "Failover auth denied to %.40s: already voted for epoch %llu", node->name, - (unsigned long long) server.cluster->currentEpoch); + (unsigned long long) g_pserver->cluster->currentEpoch); return; } @@ -2785,13 +2785,13 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { /* We did not voted for a slave about this master for two * times the node timeout. This is not strictly needed for correctness * of the algorithm but makes the base case more linear. */ - if (mstime() - node->slaveof->voted_time < server.cluster_node_timeout * 2) + if (mstime() - node->slaveof->voted_time < g_pserver->cluster_node_timeout * 2) { serverLog(LL_WARNING, "Failover auth denied to %.40s: " "can't vote about this master before %lld milliseconds", node->name, - (long long) ((server.cluster_node_timeout*2)- + (long long) ((g_pserver->cluster_node_timeout*2)- (mstime() - node->slaveof->voted_time))); return; } @@ -2801,8 +2801,8 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { * slots in the current configuration. */ for (j = 0; j < CLUSTER_SLOTS; j++) { if (bitmapTestBit(claimed_slots, j) == 0) continue; - if (server.cluster->slots[j] == NULL || - server.cluster->slots[j]->configEpoch <= requestConfigEpoch) + if (g_pserver->cluster->slots[j] == NULL || + g_pserver->cluster->slots[j]->configEpoch <= requestConfigEpoch) { continue; } @@ -2813,18 +2813,18 @@ void clusterSendFailoverAuthIfNeeded(clusterNode *node, clusterMsg *request) { "Failover auth denied to %.40s: " "slot %d epoch (%llu) > reqEpoch (%llu)", node->name, j, - (unsigned long long) server.cluster->slots[j]->configEpoch, + (unsigned long long) g_pserver->cluster->slots[j]->configEpoch, (unsigned long long) requestConfigEpoch); return; } /* We can vote for this slave. */ - server.cluster->lastVoteEpoch = server.cluster->currentEpoch; + g_pserver->cluster->lastVoteEpoch = g_pserver->cluster->currentEpoch; node->slaveof->voted_time = mstime(); clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG|CLUSTER_TODO_FSYNC_CONFIG); clusterSendFailoverAuth(node); serverLog(LL_WARNING, "Failover auth granted to %.40s for epoch %llu", - node->name, (unsigned long long) server.cluster->currentEpoch); + node->name, (unsigned long long) g_pserver->cluster->currentEpoch); } /* This function returns the "rank" of this instance, a slave, in the context @@ -2881,14 +2881,14 @@ int clusterGetSlaveRank(void) { void clusterLogCantFailover(int reason) { const char *msg; static time_t lastlog_time = 0; - mstime_t nolog_fail_time = server.cluster_node_timeout + 5000; + mstime_t nolog_fail_time = g_pserver->cluster_node_timeout + 5000; /* Don't log if we have the same reason for some time. */ - if (reason == server.cluster->cant_failover_reason && + if (reason == g_pserver->cluster->cant_failover_reason && time(NULL)-lastlog_time < CLUSTER_CANT_FAILOVER_RELOG_PERIOD) return; - server.cluster->cant_failover_reason = reason; + g_pserver->cluster->cant_failover_reason = reason; /* We also don't emit any log if the master failed no long ago, the * goal of this function is to log slaves in a stalled condition for @@ -2966,13 +2966,13 @@ void clusterFailoverReplaceYourMaster(void) { */ void clusterHandleSlaveFailover(void) { mstime_t data_age; - mstime_t auth_age = mstime() - server.cluster->failover_auth_time; - int needed_quorum = (server.cluster->size / 2) + 1; - int manual_failover = server.cluster->mf_end != 0 && - server.cluster->mf_can_start; + mstime_t auth_age = mstime() - g_pserver->cluster->failover_auth_time; + int needed_quorum = (g_pserver->cluster->size / 2) + 1; + int manual_failover = g_pserver->cluster->mf_end != 0 && + g_pserver->cluster->mf_can_start; mstime_t auth_timeout, auth_retry_time; - server.cluster->todo_before_sleep &= ~CLUSTER_TODO_HANDLE_FAILOVER; + g_pserver->cluster->todo_before_sleep &= ~CLUSTER_TODO_HANDLE_FAILOVER; /* Compute the failover timeout (the max time we have to send votes * and wait for replies), and the failover retry time (the time to wait @@ -2981,7 +2981,7 @@ void clusterHandleSlaveFailover(void) { * Timeout is MAX(NODE_TIMEOUT*2,2000) milliseconds. * Retry is two times the Timeout. */ - auth_timeout = server.cluster_node_timeout*2; + auth_timeout = g_pserver->cluster_node_timeout*2; if (auth_timeout < 2000) auth_timeout = 2000; auth_retry_time = auth_timeout*2; @@ -2995,38 +2995,38 @@ void clusterHandleSlaveFailover(void) { if (nodeIsMaster(myself) || myself->slaveof == NULL || (!nodeFailed(myself->slaveof) && !manual_failover) || - (server.cluster_slave_no_failover && !manual_failover) || + (g_pserver->cluster_slave_no_failover && !manual_failover) || myself->slaveof->numslots == 0) { /* There are no reasons to failover, so we set the reason why we * are returning without failing over to NONE. */ - server.cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE; + g_pserver->cluster->cant_failover_reason = CLUSTER_CANT_FAILOVER_NONE; return; } /* Set data_age to the number of seconds we are disconnected from * the master. */ if (getFirstMaster()->repl_state == REPL_STATE_CONNECTED) { - data_age = (mstime_t)(server.unixtime - getFirstMaster()->master->lastinteraction) + data_age = (mstime_t)(g_pserver->unixtime - getFirstMaster()->master->lastinteraction) * 1000; } else { - data_age = (mstime_t)(server.unixtime - getFirstMaster()->repl_down_since) * 1000; + data_age = (mstime_t)(g_pserver->unixtime - getFirstMaster()->repl_down_since) * 1000; } /* Remove the node timeout from the data age as it is fine that we are * disconnected from our master at least for the time it was down to be * flagged as FAIL, that's the baseline. */ - if (data_age > server.cluster_node_timeout) - data_age -= server.cluster_node_timeout; + if (data_age > g_pserver->cluster_node_timeout) + data_age -= g_pserver->cluster_node_timeout; /* Check if our data is recent enough according to the slave validity * factor configured by the user. * * Check bypassed for manual failovers. */ - if (server.cluster_slave_validity_factor && + if (g_pserver->cluster_slave_validity_factor && data_age > - (((mstime_t)server.repl_ping_slave_period * 1000) + - (server.cluster_node_timeout * server.cluster_slave_validity_factor))) + (((mstime_t)g_pserver->repl_ping_slave_period * 1000) + + (g_pserver->cluster_node_timeout * g_pserver->cluster_slave_validity_factor))) { if (!manual_failover) { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_DATA_AGE); @@ -3037,28 +3037,28 @@ void clusterHandleSlaveFailover(void) { /* If the previous failover attempt timedout and the retry time has * elapsed, we can setup a new one. */ if (auth_age > auth_retry_time) { - server.cluster->failover_auth_time = mstime() + + g_pserver->cluster->failover_auth_time = mstime() + 500 + /* Fixed delay of 500 milliseconds, let FAIL msg propagate. */ random() % 500; /* Random delay between 0 and 500 milliseconds. */ - server.cluster->failover_auth_count = 0; - server.cluster->failover_auth_sent = 0; - server.cluster->failover_auth_rank = clusterGetSlaveRank(); + g_pserver->cluster->failover_auth_count = 0; + g_pserver->cluster->failover_auth_sent = 0; + g_pserver->cluster->failover_auth_rank = clusterGetSlaveRank(); /* We add another delay that is proportional to the slave rank. * Specifically 1 second * rank. This way slaves that have a probably * less updated replication offset, are penalized. */ - server.cluster->failover_auth_time += - server.cluster->failover_auth_rank * 1000; + g_pserver->cluster->failover_auth_time += + g_pserver->cluster->failover_auth_rank * 1000; /* However if this is a manual failover, no delay is needed. */ - if (server.cluster->mf_end) { - server.cluster->failover_auth_time = mstime(); - server.cluster->failover_auth_rank = 0; + if (g_pserver->cluster->mf_end) { + g_pserver->cluster->failover_auth_time = mstime(); + g_pserver->cluster->failover_auth_rank = 0; clusterDoBeforeSleep(CLUSTER_TODO_HANDLE_FAILOVER); } serverLog(LL_WARNING, "Start of election delayed for %lld milliseconds " "(rank #%d, offset %lld).", - server.cluster->failover_auth_time - mstime(), - server.cluster->failover_auth_rank, + g_pserver->cluster->failover_auth_time - mstime(), + g_pserver->cluster->failover_auth_rank, replicationGetSlaveOffset(getFirstMaster())); /* Now that we have a scheduled election, broadcast our offset * to all the other slaves so that they'll updated their offsets @@ -3072,15 +3072,15 @@ void clusterHandleSlaveFailover(void) { * Update the delay if our rank changed. * * Not performed if this is a manual failover. */ - if (server.cluster->failover_auth_sent == 0 && - server.cluster->mf_end == 0) + if (g_pserver->cluster->failover_auth_sent == 0 && + g_pserver->cluster->mf_end == 0) { int newrank = clusterGetSlaveRank(); - if (newrank > server.cluster->failover_auth_rank) { + if (newrank > g_pserver->cluster->failover_auth_rank) { long long added_delay = - (newrank - server.cluster->failover_auth_rank) * 1000; - server.cluster->failover_auth_time += added_delay; - server.cluster->failover_auth_rank = newrank; + (newrank - g_pserver->cluster->failover_auth_rank) * 1000; + g_pserver->cluster->failover_auth_time += added_delay; + g_pserver->cluster->failover_auth_rank = newrank; serverLog(LL_WARNING, "Replica rank updated to #%d, added %lld milliseconds of delay.", newrank, added_delay); @@ -3088,7 +3088,7 @@ void clusterHandleSlaveFailover(void) { } /* Return ASAP if we can't still start the election. */ - if (mstime() < server.cluster->failover_auth_time) { + if (mstime() < g_pserver->cluster->failover_auth_time) { clusterLogCantFailover(CLUSTER_CANT_FAILOVER_WAITING_DELAY); return; } @@ -3100,13 +3100,13 @@ void clusterHandleSlaveFailover(void) { } /* Ask for votes if needed. */ - if (server.cluster->failover_auth_sent == 0) { - server.cluster->currentEpoch++; - server.cluster->failover_auth_epoch = server.cluster->currentEpoch; + if (g_pserver->cluster->failover_auth_sent == 0) { + g_pserver->cluster->currentEpoch++; + g_pserver->cluster->failover_auth_epoch = g_pserver->cluster->currentEpoch; serverLog(LL_WARNING,"Starting a failover election for epoch %llu.", - (unsigned long long) server.cluster->currentEpoch); + (unsigned long long) g_pserver->cluster->currentEpoch); clusterRequestFailoverAuth(); - server.cluster->failover_auth_sent = 1; + g_pserver->cluster->failover_auth_sent = 1; clusterDoBeforeSleep(CLUSTER_TODO_SAVE_CONFIG| CLUSTER_TODO_UPDATE_STATE| CLUSTER_TODO_FSYNC_CONFIG); @@ -3114,15 +3114,15 @@ void clusterHandleSlaveFailover(void) { } /* Check if we reached the quorum. */ - if (server.cluster->failover_auth_count >= needed_quorum) { + if (g_pserver->cluster->failover_auth_count >= needed_quorum) { /* We have the quorum, we can finally failover the master. */ serverLog(LL_WARNING, "Failover election won: I'm the new master."); /* Update my configEpoch to the epoch of the election. */ - if (myself->configEpoch < server.cluster->failover_auth_epoch) { - myself->configEpoch = server.cluster->failover_auth_epoch; + if (myself->configEpoch < g_pserver->cluster->failover_auth_epoch) { + myself->configEpoch = g_pserver->cluster->failover_auth_epoch; serverLog(LL_WARNING, "configEpoch set to %llu after successful failover", (unsigned long long) myself->configEpoch); @@ -3169,7 +3169,7 @@ void clusterHandleSlaveMigration(int max_slaves) { dictEntry *de; /* Step 1: Don't migrate if the cluster state is not ok. */ - if (server.cluster->state != CLUSTER_OK) return; + if (g_pserver->cluster->state != CLUSTER_OK) return; /* Step 2: Don't migrate if my master will not be left with at least * 'migration-barrier' slaves after my migration. */ @@ -3177,7 +3177,7 @@ void clusterHandleSlaveMigration(int max_slaves) { for (j = 0; j < mymaster->numslaves; j++) if (!nodeFailed(mymaster->slaves[j]) && !nodeTimedOut(mymaster->slaves[j])) okslaves++; - if (okslaves <= server.cluster_migration_barrier) return; + if (okslaves <= g_pserver->cluster_migration_barrier) return; /* Step 3: Identify a candidate for migration, and check if among the * masters with the greatest number of ok slaves, I'm the one with the @@ -3190,7 +3190,7 @@ void clusterHandleSlaveMigration(int max_slaves) { * slaves migrating at the same time), but this is unlikely to * happen, and harmless when happens. */ candidate = myself; - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); int okslaves = 0, is_orphaned = 1; @@ -3239,7 +3239,7 @@ void clusterHandleSlaveMigration(int max_slaves) { * the old master to the new one. */ if (target && candidate == myself && (mstime()-target->orphaned_time) > CLUSTER_SLAVE_MIGRATION_DELAY && - !(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) + !(g_pserver->cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) { serverLog(LL_WARNING,"Migrating to orphaned master %.40s", target->name); @@ -3282,19 +3282,19 @@ void clusterHandleSlaveMigration(int max_slaves) { * The function can be used both to initialize the manual failover state at * startup or to abort a manual failover in progress. */ void resetManualFailover(void) { - if (server.cluster->mf_end && clientsArePaused()) { - server.clients_pause_end_time = 0; + if (g_pserver->cluster->mf_end && clientsArePaused()) { + g_pserver->clients_pause_end_time = 0; clientsArePaused(); /* Just use the side effect of the function. */ } - server.cluster->mf_end = 0; /* No manual failover in progress. */ - server.cluster->mf_can_start = 0; - server.cluster->mf_slave = NULL; - server.cluster->mf_master_offset = 0; + g_pserver->cluster->mf_end = 0; /* No manual failover in progress. */ + g_pserver->cluster->mf_can_start = 0; + g_pserver->cluster->mf_slave = NULL; + g_pserver->cluster->mf_master_offset = 0; } /* If a manual failover timed out, abort it. */ void manualFailoverCheckTimeout(void) { - if (server.cluster->mf_end && server.cluster->mf_end < mstime()) { + if (g_pserver->cluster->mf_end && g_pserver->cluster->mf_end < mstime()) { serverLog(LL_WARNING,"Manual failover timed out."); resetManualFailover(); } @@ -3304,18 +3304,18 @@ void manualFailoverCheckTimeout(void) { * forward with a manual failover state machine. */ void clusterHandleManualFailover(void) { /* Return ASAP if no manual failover is in progress. */ - if (server.cluster->mf_end == 0) return; + if (g_pserver->cluster->mf_end == 0) return; /* If mf_can_start is non-zero, the failover was already triggered so the * next steps are performed by clusterHandleSlaveFailover(). */ - if (server.cluster->mf_can_start) return; + if (g_pserver->cluster->mf_can_start) return; - if (server.cluster->mf_master_offset == 0) return; /* Wait for offset... */ + if (g_pserver->cluster->mf_master_offset == 0) return; /* Wait for offset... */ - if (server.cluster->mf_master_offset == replicationGetSlaveOffset(getFirstMaster())) { + if (g_pserver->cluster->mf_master_offset == replicationGetSlaveOffset(getFirstMaster())) { /* Our replication offset matches the master replication offset * announced after clients were paused. We can start the failover. */ - server.cluster->mf_can_start = 1; + g_pserver->cluster->mf_can_start = 1; serverLog(LL_WARNING, "All master replication stream processed, " "manual failover can start."); @@ -3346,7 +3346,7 @@ void clusterCron(void) { * if the option changed to reflect this into myself->ip. */ { static char *prev_ip = NULL; - char *curr_ip = server.cluster_announce_ip; + char *curr_ip = g_pserver->cluster_announce_ip; int changed = 0; if (prev_ip == NULL && curr_ip != NULL) changed = 1; @@ -3362,7 +3362,7 @@ void clusterCron(void) { * duplicating the string. This way later we can check if * the address really changed. */ prev_ip = zstrdup(prev_ip); - strncpy(myself->ip,server.cluster_announce_ip,NET_IP_STR_LEN); + strncpy(myself->ip,g_pserver->cluster_announce_ip,NET_IP_STR_LEN); myself->ip[NET_IP_STR_LEN-1] = '\0'; } else { myself->ip[0] = '\0'; /* Force autodetection. */ @@ -3374,7 +3374,7 @@ void clusterCron(void) { * not turned into a normal node is removed from the nodes. Usually it is * just the NODE_TIMEOUT value, but when NODE_TIMEOUT is too small we use * the value of 1 second. */ - handshake_timeout = server.cluster_node_timeout; + handshake_timeout = g_pserver->cluster_node_timeout; if (handshake_timeout < 1000) handshake_timeout = 1000; /* Update myself flags. */ @@ -3383,8 +3383,8 @@ void clusterCron(void) { /* Check if we have disconnected nodes and re-establish the connection. * Also update a few stats while we are here, that can be used to make * better decisions in other part of the code. */ - di = dictGetSafeIterator(server.cluster->nodes); - server.cluster->stats_pfail_nodes = 0; + di = dictGetSafeIterator(g_pserver->cluster->nodes); + g_pserver->cluster->stats_pfail_nodes = 0; while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); @@ -3393,7 +3393,7 @@ void clusterCron(void) { if (node->flags & (CLUSTER_NODE_MYSELF|CLUSTER_NODE_NOADDR)) continue; if (node->flags & CLUSTER_NODE_PFAIL) - server.cluster->stats_pfail_nodes++; + g_pserver->cluster->stats_pfail_nodes++; /* A Node in HANDSHAKE state has a limited lifespan equal to the * configured node timeout. */ @@ -3407,7 +3407,7 @@ void clusterCron(void) { mstime_t old_ping_sent; clusterLink *link; - fd = anetTcpNonBlockBindConnect(server.neterr, node->ip, + fd = anetTcpNonBlockBindConnect(g_pserver->neterr, node->ip, node->cport, NET_FIRST_BIND_ADDR); if (fd == -1) { /* We got a synchronous error from connect before @@ -3418,13 +3418,13 @@ void clusterCron(void) { if (node->ping_sent == 0) node->ping_sent = mstime(); serverLog(LL_DEBUG, "Unable to connect to " "Cluster Node [%s]:%d -> %s", node->ip, - node->cport, server.neterr); + node->cport, g_pserver->neterr); continue; } link = createClusterLink(node); link->fd = fd; node->link = link; - aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->fd,AE_READABLE, + aeCreateFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->fd,AE_READABLE, clusterReadHandler,link); /* Queue a PING in the new connection ASAP: this is crucial * to avoid false positives in failure detection. @@ -3462,7 +3462,7 @@ void clusterCron(void) { /* Check a few random nodes and ping the one with the oldest * pong_received time. */ for (j = 0; j < 5; j++) { - de = dictGetRandomKey(server.cluster->nodes); + de = dictGetRandomKey(g_pserver->cluster->nodes); clusterNode *thisNode = (clusterNode*)dictGetVal(de); /* Don't ping nodes disconnected or with a ping currently active. */ @@ -3489,7 +3489,7 @@ void clusterCron(void) { orphaned_masters = 0; max_slaves = 0; this_slaves = 0; - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); now = mstime(); /* Use an updated time at every iteration. */ @@ -3522,11 +3522,11 @@ void clusterCron(void) { * issue even if the node is alive. */ if (node->link && /* is connected */ now - node->link->ctime > - server.cluster_node_timeout && /* was not already reconnected */ + g_pserver->cluster_node_timeout && /* was not already reconnected */ node->ping_sent && /* we already sent a ping */ node->pong_received < node->ping_sent && /* still waiting pong */ /* and we are waiting for the pong more than timeout/2 */ - now - node->ping_sent > server.cluster_node_timeout/2) + now - node->ping_sent > g_pserver->cluster_node_timeout/2) { /* Disconnect the link, it will be reconnected automatically. */ freeClusterLink(node->link); @@ -3538,7 +3538,7 @@ void clusterCron(void) { * a too big delay. */ if (node->link && node->ping_sent == 0 && - (now - node->pong_received) > server.cluster_node_timeout/2) + (now - node->pong_received) > g_pserver->cluster_node_timeout/2) { clusterSendPing(node->link, CLUSTERMSG_TYPE_PING); continue; @@ -3546,9 +3546,9 @@ void clusterCron(void) { /* If we are a master and one of the slaves requested a manual * failover, ping it continuously. */ - if (server.cluster->mf_end && + if (g_pserver->cluster->mf_end && nodeIsMaster(myself) && - server.cluster->mf_slave == node && + g_pserver->cluster->mf_slave == node && node->link) { clusterSendPing(node->link, CLUSTERMSG_TYPE_PING); @@ -3563,7 +3563,7 @@ void clusterCron(void) { * code at all. */ delay = now - node->ping_sent; - if (delay > server.cluster_node_timeout) { + if (delay > g_pserver->cluster_node_timeout) { /* Timeout reached. Set the node as possibly failing if it is * not already in this state. */ if (!(node->flags & (CLUSTER_NODE_PFAIL|CLUSTER_NODE_FAIL))) { @@ -3580,7 +3580,7 @@ void clusterCron(void) { * enable it if we know the address of our master and it appears to * be up. */ if (nodeIsSlave(myself) && - listLength(server.masters) == 0 && + listLength(g_pserver->masters) == 0 && myself->slaveof && nodeHasAddr(myself->slaveof)) { @@ -3592,7 +3592,7 @@ void clusterCron(void) { if (nodeIsSlave(myself)) { clusterHandleManualFailover(); - if (!(server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) + if (!(g_pserver->cluster_module_flags & CLUSTER_MODULE_FLAG_NO_FAILOVER)) clusterHandleSlaveFailover(); /* If there are orphaned slaves, and we are a slave among the masters * with the max number of non-failing slaves, consider migrating to @@ -3603,7 +3603,7 @@ void clusterCron(void) { clusterHandleSlaveMigration(max_slaves); } - if (update_state || server.cluster->state == CLUSTER_FAIL) + if (update_state || g_pserver->cluster->state == CLUSTER_FAIL) clusterUpdateState(); } @@ -3615,27 +3615,27 @@ void clusterCron(void) { void clusterBeforeSleep(void) { /* Handle failover, this is needed when it is likely that there is already * the quorum from masters in order to react fast. */ - if (server.cluster->todo_before_sleep & CLUSTER_TODO_HANDLE_FAILOVER) + if (g_pserver->cluster->todo_before_sleep & CLUSTER_TODO_HANDLE_FAILOVER) clusterHandleSlaveFailover(); /* Update the cluster state. */ - if (server.cluster->todo_before_sleep & CLUSTER_TODO_UPDATE_STATE) + if (g_pserver->cluster->todo_before_sleep & CLUSTER_TODO_UPDATE_STATE) clusterUpdateState(); /* Save the config, possibly using fsync. */ - if (server.cluster->todo_before_sleep & CLUSTER_TODO_SAVE_CONFIG) { - int fsync = server.cluster->todo_before_sleep & + if (g_pserver->cluster->todo_before_sleep & CLUSTER_TODO_SAVE_CONFIG) { + int fsync = g_pserver->cluster->todo_before_sleep & CLUSTER_TODO_FSYNC_CONFIG; clusterSaveConfigOrDie(fsync); } /* Reset our flags (not strictly needed since every single function * called for flags set should be able to clear its flag). */ - server.cluster->todo_before_sleep = 0; + g_pserver->cluster->todo_before_sleep = 0; } void clusterDoBeforeSleep(int flags) { - server.cluster->todo_before_sleep |= flags; + g_pserver->cluster->todo_before_sleep |= flags; } /* ----------------------------------------------------------------------------- @@ -3668,7 +3668,7 @@ void bitmapClearBit(unsigned char *bitmap, int pos) { * Otherwise zero is returned. Used by clusterNodeSetSlotBit() to set the * MIGRATE_TO flag the when a master gets the first slot. */ int clusterMastersHaveSlaves(void) { - dictIterator *di = dictGetSafeIterator(server.cluster->nodes); + dictIterator *di = dictGetSafeIterator(g_pserver->cluster->nodes); dictEntry *de; int slaves = 0; while((de = dictNext(di)) != NULL) { @@ -3724,9 +3724,9 @@ int clusterNodeGetSlotBit(clusterNode *n, int slot) { * If the slot is already assigned to another instance this is considered * an error and C_ERR is returned. */ int clusterAddSlot(clusterNode *n, int slot) { - if (server.cluster->slots[slot]) return C_ERR; + if (g_pserver->cluster->slots[slot]) return C_ERR; clusterNodeSetSlotBit(n,slot); - server.cluster->slots[slot] = n; + g_pserver->cluster->slots[slot] = n; return C_OK; } @@ -3734,11 +3734,11 @@ int clusterAddSlot(clusterNode *n, int slot) { * Returns C_OK if the slot was assigned, otherwise if the slot was * already unassigned C_ERR is returned. */ int clusterDelSlot(int slot) { - clusterNode *n = server.cluster->slots[slot]; + clusterNode *n = g_pserver->cluster->slots[slot]; if (!n) return C_ERR; serverAssert(clusterNodeClearSlotBit(n,slot) == 1); - server.cluster->slots[slot] = NULL; + g_pserver->cluster->slots[slot] = NULL; return C_OK; } @@ -3759,10 +3759,10 @@ int clusterDelNodeSlots(clusterNode *node) { /* Clear the migrating / importing state for all the slots. * This is useful at initialization and when turning a master into slave. */ void clusterCloseAllSlots(void) { - memset(server.cluster->migrating_slots_to,0, - sizeof(server.cluster->migrating_slots_to)); - memset(server.cluster->importing_slots_from,0, - sizeof(server.cluster->importing_slots_from)); + memset(g_pserver->cluster->migrating_slots_to,0, + sizeof(g_pserver->cluster->migrating_slots_to)); + memset(g_pserver->cluster->importing_slots_from,0, + sizeof(g_pserver->cluster->importing_slots_from)); } /* ----------------------------------------------------------------------------- @@ -3783,7 +3783,7 @@ void clusterUpdateState(void) { static mstime_t among_minority_time; static mstime_t first_call_time = 0; - server.cluster->todo_before_sleep &= ~CLUSTER_TODO_UPDATE_STATE; + g_pserver->cluster->todo_before_sleep &= ~CLUSTER_TODO_UPDATE_STATE; /* If this is a master node, wait some time before turning the state * into OK, since it is not a good idea to rejoin the cluster as a writable @@ -3793,7 +3793,7 @@ void clusterUpdateState(void) { * to don't count the DB loading time. */ if (first_call_time == 0) first_call_time = mstime(); if (nodeIsMaster(myself) && - server.cluster->state == CLUSTER_FAIL && + g_pserver->cluster->state == CLUSTER_FAIL && mstime() - first_call_time < CLUSTER_WRITABLE_DELAY) return; /* Start assuming the state is OK. We'll turn it into FAIL if there @@ -3801,10 +3801,10 @@ void clusterUpdateState(void) { new_state = CLUSTER_OK; /* Check if all the slots are covered. */ - if (server.cluster_require_full_coverage) { + if (g_pserver->cluster_require_full_coverage) { for (j = 0; j < CLUSTER_SLOTS; j++) { - if (server.cluster->slots[j] == NULL || - server.cluster->slots[j]->flags & (CLUSTER_NODE_FAIL)) + if (g_pserver->cluster->slots[j] == NULL || + g_pserver->cluster->slots[j]->flags & (CLUSTER_NODE_FAIL)) { new_state = CLUSTER_FAIL; break; @@ -3821,13 +3821,13 @@ void clusterUpdateState(void) { dictIterator *di; dictEntry *de; - server.cluster->size = 0; - di = dictGetSafeIterator(server.cluster->nodes); + g_pserver->cluster->size = 0; + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); if (nodeIsMaster(node) && node->numslots) { - server.cluster->size++; + g_pserver->cluster->size++; if ((node->flags & (CLUSTER_NODE_FAIL|CLUSTER_NODE_PFAIL)) == 0) reachable_masters++; } @@ -3838,7 +3838,7 @@ void clusterUpdateState(void) { /* If we are in a minority partition, change the cluster state * to FAIL. */ { - int needed_quorum = (server.cluster->size / 2) + 1; + int needed_quorum = (g_pserver->cluster->size / 2) + 1; if (reachable_masters < needed_quorum) { new_state = CLUSTER_FAIL; @@ -3847,8 +3847,8 @@ void clusterUpdateState(void) { } /* Log a state change */ - if (new_state != server.cluster->state) { - mstime_t rejoin_delay = server.cluster_node_timeout; + if (new_state != g_pserver->cluster->state) { + mstime_t rejoin_delay = g_pserver->cluster_node_timeout; /* If the instance is a master and was partitioned away with the * minority, don't let it accept queries for some time after the @@ -3869,7 +3869,7 @@ void clusterUpdateState(void) { /* Change the state and log the event. */ serverLog(LL_WARNING,"Cluster state changed: %s", new_state == CLUSTER_OK ? "ok" : "fail"); - server.cluster->state = new_state; + g_pserver->cluster->state = new_state; } } @@ -3901,7 +3901,7 @@ int verifyClusterConfigWithData(void) { /* Return ASAP if a module disabled cluster redirections. In that case * every master can store keys about every possible hash slot. */ - if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) + if (g_pserver->cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return C_OK; /* If this node is a slave, don't perform the check at all as we @@ -3909,8 +3909,8 @@ int verifyClusterConfigWithData(void) { if (nodeIsSlave(myself)) return C_OK; /* Make sure we only have keys in DB0. */ - for (j = 1; j < server.dbnum; j++) { - if (dictSize(server.db[j].pdict)) return C_ERR; + for (j = 1; j < cserver.dbnum; j++) { + if (dictSize(g_pserver->db[j].pdict)) return C_ERR; } /* Check that all the slots we see populated memory have a corresponding @@ -3920,8 +3920,8 @@ int verifyClusterConfigWithData(void) { /* Check if we are assigned to this slot or if we are importing it. * In both cases check the next slot as the configuration makes * sense. */ - if (server.cluster->slots[j] == myself || - server.cluster->importing_slots_from[j] != NULL) continue; + if (g_pserver->cluster->slots[j] == myself || + g_pserver->cluster->importing_slots_from[j] != NULL) continue; /* If we are here data and cluster config don't agree, and we have * slot 'j' populated even if we are not importing it, nor we are @@ -3929,7 +3929,7 @@ int verifyClusterConfigWithData(void) { update_config++; /* Case A: slot is unassigned. Take responsibility for it. */ - if (server.cluster->slots[j] == NULL) { + if (g_pserver->cluster->slots[j] == NULL) { serverLog(LL_WARNING, "I have keys for unassigned slot %d. " "Taking responsibility for it.",j); clusterAddSlot(myself,j); @@ -3937,7 +3937,7 @@ int verifyClusterConfigWithData(void) { serverLog(LL_WARNING, "I have keys for slot %d, but the slot is " "assigned to another node. " "Setting it to importing state.",j); - server.cluster->importing_slots_from[j] = server.cluster->slots[j]; + g_pserver->cluster->importing_slots_from[j] = g_pserver->cluster->slots[j]; } } if (update_config) clusterSaveConfigOrDie(1); @@ -4060,12 +4060,12 @@ sds clusterGenNodeDescription(clusterNode *node) { * instances. */ if (node->flags & CLUSTER_NODE_MYSELF) { for (j = 0; j < CLUSTER_SLOTS; j++) { - if (server.cluster->migrating_slots_to[j]) { + if (g_pserver->cluster->migrating_slots_to[j]) { ci = sdscatprintf(ci," [%d->-%.40s]",j, - server.cluster->migrating_slots_to[j]->name); - } else if (server.cluster->importing_slots_from[j]) { + g_pserver->cluster->migrating_slots_to[j]->name); + } else if (g_pserver->cluster->importing_slots_from[j]) { ci = sdscatprintf(ci," [%d-<-%.40s]",j, - server.cluster->importing_slots_from[j]->name); + g_pserver->cluster->importing_slots_from[j]->name); } } } @@ -4089,7 +4089,7 @@ sds clusterGenNodesDescription(int filter) { dictIterator *di; dictEntry *de; - di = dictGetSafeIterator(server.cluster->nodes); + di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); @@ -4151,7 +4151,7 @@ void clusterReplyMultiBulkSlots(client *c) { void *slot_replylen = addReplyDeferredLen(c); dictEntry *de; - dictIterator *di = dictGetSafeIterator(server.cluster->nodes); + dictIterator *di = dictGetSafeIterator(g_pserver->cluster->nodes); while((de = dictNext(di)) != NULL) { clusterNode *node = (clusterNode*)dictGetVal(de); int j = 0, start = -1; @@ -4210,7 +4210,7 @@ void clusterReplyMultiBulkSlots(client *c) { } void clusterCommand(client *c) { - if (server.cluster_enabled == 0) { + if (g_pserver->cluster_enabled == 0) { addReplyError(c,"This instance has cluster support disabled"); return; } @@ -4286,7 +4286,7 @@ NULL clusterReplyMultiBulkSlots(c); } else if (!strcasecmp(szFromObj(c->argv[1]),"flushslots") && c->argc == 2) { /* CLUSTER FLUSHSLOTS */ - if (dictSize(server.db[0].pdict) != 0) { + if (dictSize(g_pserver->db[0].pdict) != 0) { addReplyError(c,"DB must be empty to perform CLUSTER FLUSHSLOTS."); return; } @@ -4310,11 +4310,11 @@ NULL zfree(slots); return; } - if (del && server.cluster->slots[slot] == NULL) { + if (del && g_pserver->cluster->slots[slot] == NULL) { addReplyErrorFormat(c,"Slot %d is already unassigned", slot); zfree(slots); return; - } else if (!del && server.cluster->slots[slot]) { + } else if (!del && g_pserver->cluster->slots[slot]) { addReplyErrorFormat(c,"Slot %d is already busy", slot); zfree(slots); return; @@ -4332,8 +4332,8 @@ NULL /* If this slot was set as importing we can clear this * state as now we are the real owner of the slot. */ - if (server.cluster->importing_slots_from[j]) - server.cluster->importing_slots_from[j] = NULL; + if (g_pserver->cluster->importing_slots_from[j]) + g_pserver->cluster->importing_slots_from[j] = NULL; retval = del ? clusterDelSlot(j) : clusterAddSlot(myself,j); @@ -4359,7 +4359,7 @@ NULL if ((slot = getSlotOrReply(c,c->argv[2])) == -1) return; if (!strcasecmp(szFromObj(c->argv[3]),"migrating") && c->argc == 5) { - if (server.cluster->slots[slot] != myself) { + if (g_pserver->cluster->slots[slot] != myself) { addReplyErrorFormat(c,"I'm not the owner of hash slot %u",slot); return; } @@ -4368,9 +4368,9 @@ NULL (char*)ptrFromObj(c->argv[4])); return; } - server.cluster->migrating_slots_to[slot] = n; + g_pserver->cluster->migrating_slots_to[slot] = n; } else if (!strcasecmp(szFromObj(c->argv[3]),"importing") && c->argc == 5) { - if (server.cluster->slots[slot] == myself) { + if (g_pserver->cluster->slots[slot] == myself) { addReplyErrorFormat(c, "I'm already the owner of hash slot %u",slot); return; @@ -4380,11 +4380,11 @@ NULL (char*)ptrFromObj(c->argv[4])); return; } - server.cluster->importing_slots_from[slot] = n; + g_pserver->cluster->importing_slots_from[slot] = n; } else if (!strcasecmp(szFromObj(c->argv[3]),"stable") && c->argc == 4) { /* CLUSTER SETSLOT STABLE */ - server.cluster->importing_slots_from[slot] = NULL; - server.cluster->migrating_slots_to[slot] = NULL; + g_pserver->cluster->importing_slots_from[slot] = NULL; + g_pserver->cluster->migrating_slots_to[slot] = NULL; } else if (!strcasecmp(szFromObj(c->argv[3]),"node") && c->argc == 5) { /* CLUSTER SETSLOT NODE */ clusterNode *n = clusterLookupNode(szFromObj(c->argv[4])); @@ -4396,7 +4396,7 @@ NULL } /* If this hash slot was served by 'myself' before to switch * make sure there are no longer local keys for this hash slot. */ - if (server.cluster->slots[slot] == myself && n != myself) { + if (g_pserver->cluster->slots[slot] == myself && n != myself) { if (countKeysInSlot(slot) != 0) { addReplyErrorFormat(c, "Can't assign hashslot %d to a different node " @@ -4408,13 +4408,13 @@ NULL * for it assigning the slot to another node will clear * the migratig status. */ if (countKeysInSlot(slot) == 0 && - server.cluster->migrating_slots_to[slot]) - server.cluster->migrating_slots_to[slot] = NULL; + g_pserver->cluster->migrating_slots_to[slot]) + g_pserver->cluster->migrating_slots_to[slot] = NULL; /* If this node was importing this slot, assigning the slot to * itself also clears the importing status. */ if (n == myself && - server.cluster->importing_slots_from[slot]) + g_pserver->cluster->importing_slots_from[slot]) { /* This slot was manually migrated, set this node configEpoch * to a new epoch so that the new version can be propagated @@ -4429,7 +4429,7 @@ NULL serverLog(LL_WARNING, "configEpoch updated after importing slot %d", slot); } - server.cluster->importing_slots_from[slot] = NULL; + g_pserver->cluster->importing_slots_from[slot] = NULL; } clusterDelSlot(slot); clusterAddSlot(n,slot); @@ -4455,7 +4455,7 @@ NULL int j; for (j = 0; j < CLUSTER_SLOTS; j++) { - clusterNode *n = server.cluster->slots[j]; + clusterNode *n = g_pserver->cluster->slots[j]; if (n == NULL) continue; slots_assigned++; @@ -4481,14 +4481,14 @@ NULL "cluster_size:%d\r\n" "cluster_current_epoch:%llu\r\n" "cluster_my_epoch:%llu\r\n" - , statestr[server.cluster->state], + , statestr[g_pserver->cluster->state], slots_assigned, slots_ok, slots_pfail, slots_fail, - dictSize(server.cluster->nodes), - server.cluster->size, - (unsigned long long) server.cluster->currentEpoch, + dictSize(g_pserver->cluster->nodes), + g_pserver->cluster->size, + (unsigned long long) g_pserver->cluster->currentEpoch, (unsigned long long) myepoch ); @@ -4497,23 +4497,23 @@ NULL long long tot_msg_received = 0; for (int i = 0; i < CLUSTERMSG_TYPE_COUNT; i++) { - if (server.cluster->stats_bus_messages_sent[i] == 0) continue; - tot_msg_sent += server.cluster->stats_bus_messages_sent[i]; + if (g_pserver->cluster->stats_bus_messages_sent[i] == 0) continue; + tot_msg_sent += g_pserver->cluster->stats_bus_messages_sent[i]; info = sdscatprintf(info, "cluster_stats_messages_%s_sent:%lld\r\n", clusterGetMessageTypeString(i), - server.cluster->stats_bus_messages_sent[i]); + g_pserver->cluster->stats_bus_messages_sent[i]); } info = sdscatprintf(info, "cluster_stats_messages_sent:%lld\r\n", tot_msg_sent); for (int i = 0; i < CLUSTERMSG_TYPE_COUNT; i++) { - if (server.cluster->stats_bus_messages_received[i] == 0) continue; - tot_msg_received += server.cluster->stats_bus_messages_received[i]; + if (g_pserver->cluster->stats_bus_messages_received[i] == 0) continue; + tot_msg_received += g_pserver->cluster->stats_bus_messages_received[i]; info = sdscatprintf(info, "cluster_stats_messages_%s_received:%lld\r\n", clusterGetMessageTypeString(i), - server.cluster->stats_bus_messages_received[i]); + g_pserver->cluster->stats_bus_messages_received[i]); } info = sdscatprintf(info, "cluster_stats_messages_received:%lld\r\n", tot_msg_received); @@ -4621,7 +4621,7 @@ NULL * slots nor keys to accept to replicate some other node. * Slaves can switch to another master without issues. */ if (nodeIsMaster(myself) && - (myself->numslots != 0 || dictSize(server.db[0].pdict) != 0)) { + (myself->numslots != 0 || dictSize(g_pserver->db[0].pdict) != 0)) { addReplyError(c, "To set a master the node must be empty and " "without assigned slots."); @@ -4701,7 +4701,7 @@ NULL return; } resetManualFailover(); - server.cluster->mf_end = mstime() + CLUSTER_MF_TIMEOUT; + g_pserver->cluster->mf_end = mstime() + CLUSTER_MF_TIMEOUT; if (takeover) { /* A takeover does not perform any initial check. It just @@ -4716,7 +4716,7 @@ NULL * master to agree about the offset. We just failover taking over * it without coordination. */ serverLog(LL_WARNING,"Forced failover user request accepted."); - server.cluster->mf_can_start = 1; + g_pserver->cluster->mf_can_start = 1; } else { serverLog(LL_WARNING,"Manual failover user request accepted."); clusterSendMFStart(myself->slaveof); @@ -4738,7 +4738,7 @@ NULL if (epoch < 0) { addReplyErrorFormat(c,"Invalid config epoch specified: %lld",epoch); - } else if (dictSize(server.cluster->nodes) > 1) { + } else if (dictSize(g_pserver->cluster->nodes) > 1) { addReplyError(c,"The user can assign a config epoch only when the " "node does not know any other node."); } else if (myself->configEpoch != 0) { @@ -4749,8 +4749,8 @@ NULL "configEpoch set to %llu via CLUSTER SET-CONFIG-EPOCH", (unsigned long long) myself->configEpoch); - if (server.cluster->currentEpoch < (uint64_t)epoch) - server.cluster->currentEpoch = epoch; + if (g_pserver->cluster->currentEpoch < (uint64_t)epoch) + g_pserver->cluster->currentEpoch = epoch; /* No need to fsync the config here since in the unlucky event * of a failure to persist the config, the conflict resolution code * will assign an unique config to this node. */ @@ -4936,7 +4936,7 @@ void restoreCommand(client *c) { rioInitWithBuffer(&payload,szFromObj(c->argv[3])); if (((type = rdbLoadObjectType(&payload)) == -1) || - ((obj = rdbLoadObject(type,&payload,c->argv[1])) == NULL)) + ((obj = rdbLoadObject(type,&payload,c->argv[1], OBJ_MVCC_INVALID)) == NULL)) { addReplyError(c,"Bad data format"); return; @@ -4954,7 +4954,7 @@ void restoreCommand(client *c) { objectSetLRUOrLFU(obj,lfu_freq,lru_idle,lru_clock); signalModifiedKey(c->db,c->argv[1]); addReply(c,shared.ok); - server.dirty++; + g_pserver->dirty++; } /* MIGRATE socket cache implementation. @@ -4992,33 +4992,33 @@ migrateCachedSocket* migrateGetSocket(client *c, robj *host, robj *port, long ti name = sdscatlen(name,ptrFromObj(host),sdslen(szFromObj(host))); name = sdscatlen(name,":",1); name = sdscatlen(name,ptrFromObj(port),sdslen(szFromObj(port))); - cs = (migrateCachedSocket*)dictFetchValue(server.migrate_cached_sockets,name); + cs = (migrateCachedSocket*)dictFetchValue(g_pserver->migrate_cached_sockets,name); if (cs) { sdsfree(name); - cs->last_use_time = server.unixtime; + cs->last_use_time = g_pserver->unixtime; return cs; } /* No cached socket, create one. */ - if (dictSize(server.migrate_cached_sockets) == MIGRATE_SOCKET_CACHE_ITEMS) { + if (dictSize(g_pserver->migrate_cached_sockets) == MIGRATE_SOCKET_CACHE_ITEMS) { /* Too many items, drop one at random. */ - dictEntry *de = dictGetRandomKey(server.migrate_cached_sockets); + dictEntry *de = dictGetRandomKey(g_pserver->migrate_cached_sockets); cs = (migrateCachedSocket*)dictGetVal(de); close(cs->fd); zfree(cs); - dictDelete(server.migrate_cached_sockets,dictGetKey(de)); + dictDelete(g_pserver->migrate_cached_sockets,dictGetKey(de)); } /* Create the socket */ - fd = anetTcpNonBlockConnect(server.neterr,szFromObj(c->argv[1]), + fd = anetTcpNonBlockConnect(g_pserver->neterr,szFromObj(c->argv[1]), atoi(szFromObj(c->argv[2]))); if (fd == -1) { sdsfree(name); addReplyErrorFormat(c,"Can't connect to target node: %s", - server.neterr); + g_pserver->neterr); return NULL; } - anetEnableTcpNoDelay(server.neterr,fd); + anetEnableTcpNoDelay(g_pserver->neterr,fd); /* Check if it connects within the specified timeout. */ if ((aeWait(fd,AE_WRITABLE,timeout) & AE_WRITABLE) == 0) { @@ -5033,8 +5033,8 @@ migrateCachedSocket* migrateGetSocket(client *c, robj *host, robj *port, long ti cs = (migrateCachedSocket*)zmalloc(sizeof(*cs), MALLOC_LOCAL); cs->fd = fd; cs->last_dbid = -1; - cs->last_use_time = server.unixtime; - dictAdd(server.migrate_cached_sockets,name,cs); + cs->last_use_time = g_pserver->unixtime; + dictAdd(g_pserver->migrate_cached_sockets,name,cs); return cs; } @@ -5046,7 +5046,7 @@ void migrateCloseSocket(robj *host, robj *port) { name = sdscatlen(name,ptrFromObj(host),sdslen(szFromObj(host))); name = sdscatlen(name,":",1); name = sdscatlen(name,ptrFromObj(port),sdslen(szFromObj(port))); - cs = (migrateCachedSocket*)dictFetchValue(server.migrate_cached_sockets,name); + cs = (migrateCachedSocket*)dictFetchValue(g_pserver->migrate_cached_sockets,name); if (!cs) { sdsfree(name); return; @@ -5054,21 +5054,21 @@ void migrateCloseSocket(robj *host, robj *port) { close(cs->fd); zfree(cs); - dictDelete(server.migrate_cached_sockets,name); + dictDelete(g_pserver->migrate_cached_sockets,name); sdsfree(name); } void migrateCloseTimedoutSockets(void) { - dictIterator *di = dictGetSafeIterator(server.migrate_cached_sockets); + dictIterator *di = dictGetSafeIterator(g_pserver->migrate_cached_sockets); dictEntry *de; while((de = dictNext(di)) != NULL) { migrateCachedSocket *cs = (migrateCachedSocket*)dictGetVal(de); - if ((server.unixtime - cs->last_use_time) > MIGRATE_SOCKET_CACHE_TTL) { + if ((g_pserver->unixtime - cs->last_use_time) > MIGRATE_SOCKET_CACHE_TTL) { close(cs->fd); zfree(cs); - dictDelete(server.migrate_cached_sockets,dictGetKey(de)); + dictDelete(g_pserver->migrate_cached_sockets,dictGetKey(de)); } } dictReleaseIterator(di); @@ -5212,7 +5212,7 @@ void migrateCommand(client *c) { serverAssertWithInfo(c,NULL, rioWriteBulkCount(&cmd,'*',replace ? 5 : 4)); - if (server.cluster_enabled) + if (g_pserver->cluster_enabled) serverAssertWithInfo(c,NULL, rioWriteBulkString(&cmd,"RESTORE-ASKING",14)); else @@ -5306,7 +5306,7 @@ void migrateCommand(client *c) { /* No COPY option: remove the local key, signal the change. */ dbDelete(c->db,kv[j]); signalModifiedKey(c->db,kv[j]); - server.dirty++; + g_pserver->dirty++; /* Populate the argument vector to replace the old one. */ newargv[del_idx++] = kv[j]; @@ -5347,7 +5347,7 @@ void migrateCommand(client *c) { /* If we are here and a socket error happened, we don't want to retry. * Just signal the problem to the client, but only do it if we did not - * already queue a different error reported by the destination server. */ + * already queue a different error reported by the destination g_pserver-> */ if (!error_from_target && socket_error) { may_retry = 0; goto socket_err; @@ -5413,7 +5413,7 @@ void migrateCommand(client *c) { * information. */ void askingCommand(client *c) { serverAssert(GlobalLocksAcquired()); - if (server.cluster_enabled == 0) { + if (g_pserver->cluster_enabled == 0) { addReplyError(c,"This instance has cluster support disabled"); return; } @@ -5426,7 +5426,7 @@ void askingCommand(client *c) { * with read-only commands to keys that are served by the slave's master. */ void readonlyCommand(client *c) { serverAssert(GlobalLocksAcquired()); - if (server.cluster_enabled == 0) { + if (g_pserver->cluster_enabled == 0) { addReplyError(c,"This instance has cluster support disabled"); return; } @@ -5483,11 +5483,11 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in serverAssert(GlobalLocksAcquired()); /* Allow any key to be set if a module disabled cluster redirections. */ - if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) + if (g_pserver->cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return myself; /* Allow any key to be set if a module disabled cluster redirections. */ - if (server.cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) + if (g_pserver->cluster_module_flags & CLUSTER_MODULE_FLAG_NO_REDIRECTION) return myself; /* Set error code optimistically for the base case. */ @@ -5538,7 +5538,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in * and node. */ firstkey = thiskey; slot = thisslot; - n = server.cluster->slots[slot]; + n = g_pserver->cluster->slots[slot]; /* Error: If a slot is not served, we are in "cluster down" * state. However the state is yet to be updated, so this was @@ -5557,10 +5557,10 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in * error). To do so we set the importing/migrating state and * increment a counter for every missing key. */ if (n == myself && - server.cluster->migrating_slots_to[slot] != NULL) + g_pserver->cluster->migrating_slots_to[slot] != NULL) { migrating_slot = 1; - } else if (server.cluster->importing_slots_from[slot] != NULL) { + } else if (g_pserver->cluster->importing_slots_from[slot] != NULL) { importing_slot = 1; } } else { @@ -5583,7 +5583,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in /* Migarting / Improrting slot? Count keys we don't have. */ if ((migrating_slot || importing_slot) && - lookupKeyRead(&server.db[0],thiskey) == nullptr) + lookupKeyRead(&g_pserver->db[0],thiskey) == nullptr) { missing_keys++; } @@ -5596,7 +5596,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in if (n == NULL) return myself; /* Cluster is globally down but we got keys? We can't serve the request. */ - if (server.cluster->state != CLUSTER_OK) { + if (g_pserver->cluster->state != CLUSTER_OK) { if (error_code) *error_code = CLUSTER_REDIR_DOWN_STATE; return NULL; } @@ -5614,7 +5614,7 @@ clusterNode *getNodeByQuery(client *c, struct redisCommand *cmd, robj **argv, in * an ASK redirection. */ if (migrating_slot && missing_keys) { if (error_code) *error_code = CLUSTER_REDIR_ASK; - return server.cluster->migrating_slots_to[slot]; + return g_pserver->cluster->migrating_slots_to[slot]; } /* If we are receiving the slot, and the client correctly flagged the @@ -5703,7 +5703,7 @@ int clusterRedirectBlockedClientIfNeeded(client *c) { dictIterator *di; /* If the cluster is down, unblock the client with the right error. */ - if (server.cluster->state == CLUSTER_FAIL) { + if (g_pserver->cluster->state == CLUSTER_FAIL) { clusterRedirectClient(c,NULL,0,CLUSTER_REDIR_DOWN_STATE); return 1; } @@ -5713,13 +5713,13 @@ int clusterRedirectBlockedClientIfNeeded(client *c) { if ((de = dictNext(di)) != NULL) { robj *key = (robj*)dictGetKey(de); int slot = keyHashSlot((char*)ptrFromObj(key), sdslen(szFromObj(key))); - clusterNode *node = server.cluster->slots[slot]; + clusterNode *node = g_pserver->cluster->slots[slot]; /* We send an error and unblock the client if: * 1) The slot is unassigned, emitting a cluster down error. * 2) The slot is not handled by this node, nor being imported. */ if (node != myself && - server.cluster->importing_slots_from[slot] == NULL) + g_pserver->cluster->importing_slots_from[slot] == NULL) { if (node == NULL) { clusterRedirectClient(c,NULL,0, diff --git a/src/config.cpp b/src/config.cpp index a702a5645..badedcee4 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -34,6 +34,8 @@ #include #include +const char *KEYDB_SET_VERSION = KEYDB_REAL_VERSION; + /*----------------------------------------------------------------------------- * Config file name-value maps. *----------------------------------------------------------------------------*/ @@ -129,7 +131,7 @@ const char *configEnumGetNameOrUnknown(configEnum *ce, int val) { /* Used for INFO generation. */ const char *evictPolicyToString(void) { - return configEnumGetNameOrUnknown(maxmemory_policy_enum,server.maxmemory_policy); + return configEnumGetNameOrUnknown(maxmemory_policy_enum,g_pserver->maxmemory_policy); } /*----------------------------------------------------------------------------- @@ -143,16 +145,16 @@ int yesnotoi(char *s) { } void appendServerSaveParams(time_t seconds, int changes) { - server.saveparams = (saveparam*)zrealloc(server.saveparams,sizeof(struct saveparam)*(server.saveparamslen+1), MALLOC_LOCAL); - server.saveparams[server.saveparamslen].seconds = seconds; - server.saveparams[server.saveparamslen].changes = changes; - server.saveparamslen++; + g_pserver->saveparams = (saveparam*)zrealloc(g_pserver->saveparams,sizeof(struct saveparam)*(g_pserver->saveparamslen+1), MALLOC_LOCAL); + g_pserver->saveparams[g_pserver->saveparamslen].seconds = seconds; + g_pserver->saveparams[g_pserver->saveparamslen].changes = changes; + g_pserver->saveparamslen++; } void resetServerSaveParams(void) { - zfree(server.saveparams); - server.saveparams = NULL; - server.saveparamslen = 0; + zfree(g_pserver->saveparams); + g_pserver->saveparams = NULL; + g_pserver->saveparamslen = 0; } void queueLoadModule(sds path, sds *argv, int argc) { @@ -166,7 +168,7 @@ void queueLoadModule(sds path, sds *argv, int argc) { for (i = 0; i < argc; i++) { loadmod->argv[i] = createRawStringObject(argv[i],sdslen(argv[i])); } - listAddNodeTail(server.loadmodule_queue,loadmod); + listAddNodeTail(g_pserver->loadmodule_queue,loadmod); } void loadServerConfigFromString(char *config) { @@ -203,27 +205,27 @@ void loadServerConfigFromString(char *config) { /* Execute config directives */ if (!strcasecmp(argv[0],"timeout") && argc == 2) { - server.maxidletime = atoi(argv[1]); - if (server.maxidletime < 0) { + cserver.maxidletime = atoi(argv[1]); + if (cserver.maxidletime < 0) { err = "Invalid timeout value"; goto loaderr; } } else if (!strcasecmp(argv[0],"tcp-keepalive") && argc == 2) { - server.tcpkeepalive = atoi(argv[1]); - if (server.tcpkeepalive < 0) { + cserver.tcpkeepalive = atoi(argv[1]); + if (cserver.tcpkeepalive < 0) { err = "Invalid tcp-keepalive value"; goto loaderr; } } else if (!strcasecmp(argv[0],"protected-mode") && argc == 2) { - if ((server.protected_mode = yesnotoi(argv[1])) == -1) { + if ((g_pserver->protected_mode = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"port") && argc == 2) { - server.port = atoi(argv[1]); - if (server.port < 0 || server.port > 65535) { + g_pserver->port = atoi(argv[1]); + if (g_pserver->port < 0 || g_pserver->port > 65535) { err = "Invalid port"; goto loaderr; } } else if (!strcasecmp(argv[0],"tcp-backlog") && argc == 2) { - server.tcp_backlog = atoi(argv[1]); - if (server.tcp_backlog < 0) { + g_pserver->tcp_backlog = atoi(argv[1]); + if (g_pserver->tcp_backlog < 0) { err = "Invalid backlog value"; goto loaderr; } } else if (!strcasecmp(argv[0],"bind") && argc >= 2) { @@ -233,14 +235,14 @@ void loadServerConfigFromString(char *config) { err = "Too many bind addresses specified"; goto loaderr; } for (j = 0; j < addresses; j++) - server.bindaddr[j] = zstrdup(argv[j+1]); - server.bindaddr_count = addresses; + g_pserver->bindaddr[j] = zstrdup(argv[j+1]); + g_pserver->bindaddr_count = addresses; } else if (!strcasecmp(argv[0],"unixsocket") && argc == 2) { - server.unixsocket = zstrdup(argv[1]); + g_pserver->unixsocket = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"unixsocketperm") && argc == 2) { errno = 0; - server.unixsocketperm = (mode_t)strtol(argv[1], NULL, 8); - if (errno || server.unixsocketperm > 0777) { + g_pserver->unixsocketperm = (mode_t)strtol(argv[1], NULL, 8); + if (errno || g_pserver->unixsocketperm > 0777) { err = "Invalid socket file permissions"; goto loaderr; } } else if (!strcasecmp(argv[0],"save")) { @@ -261,8 +263,8 @@ void loadServerConfigFromString(char *config) { exit(1); } } else if (!strcasecmp(argv[0],"loglevel") && argc == 2) { - server.verbosity = configEnumGetValue(loglevel_enum,argv[1]); - if (server.verbosity == INT_MIN) { + cserver.verbosity = configEnumGetValue(loglevel_enum,argv[1]); + if (cserver.verbosity == INT_MIN) { err = "Invalid log level. " "Must be one of debug, verbose, notice, warning"; goto loaderr; @@ -270,12 +272,12 @@ void loadServerConfigFromString(char *config) { } else if (!strcasecmp(argv[0],"logfile") && argc == 2) { FILE *logfp; - zfree(server.logfile); - server.logfile = zstrdup(argv[1]); - if (server.logfile[0] != '\0') { + zfree(g_pserver->logfile); + g_pserver->logfile = zstrdup(argv[1]); + if (g_pserver->logfile[0] != '\0') { /* Test if we are able to open the file. The server will not * be able to abort just for this problem later... */ - logfp = fopen(server.logfile,"a"); + logfp = fopen(g_pserver->logfile,"a"); if (logfp == NULL) { err = sdscatprintf(sdsempty(), "Can't open the log file: %s", strerror(errno)); @@ -284,66 +286,66 @@ void loadServerConfigFromString(char *config) { fclose(logfp); } } else if (!strcasecmp(argv[0],"aclfile") && argc == 2) { - zfree(server.acl_filename); - server.acl_filename = zstrdup(argv[1]); + zfree(g_pserver->acl_filename); + g_pserver->acl_filename = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"always-show-logo") && argc == 2) { - if ((server.always_show_logo = yesnotoi(argv[1])) == -1) { + if ((g_pserver->always_show_logo = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"syslog-enabled") && argc == 2) { - if ((server.syslog_enabled = yesnotoi(argv[1])) == -1) { + if ((g_pserver->syslog_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"syslog-ident") && argc == 2) { - if (server.syslog_ident) zfree(server.syslog_ident); - server.syslog_ident = zstrdup(argv[1]); + if (g_pserver->syslog_ident) zfree(g_pserver->syslog_ident); + g_pserver->syslog_ident = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"syslog-facility") && argc == 2) { - server.syslog_facility = + g_pserver->syslog_facility = configEnumGetValue(syslog_facility_enum,argv[1]); - if (server.syslog_facility == INT_MIN) { + if (g_pserver->syslog_facility == INT_MIN) { err = "Invalid log facility. Must be one of USER or between LOCAL0-LOCAL7"; goto loaderr; } } else if (!strcasecmp(argv[0],"databases") && argc == 2) { - server.dbnum = atoi(argv[1]); - if (server.dbnum < 1) { + cserver.dbnum = atoi(argv[1]); + if (cserver.dbnum < 1) { err = "Invalid number of databases"; goto loaderr; } } else if (!strcasecmp(argv[0],"include") && argc == 2) { loadServerConfig(argv[1],NULL); } else if (!strcasecmp(argv[0],"maxclients") && argc == 2) { - server.maxclients = atoi(argv[1]); - if (server.maxclients < 1) { + g_pserver->maxclients = atoi(argv[1]); + if (g_pserver->maxclients < 1) { err = "Invalid max clients limit"; goto loaderr; } } else if (!strcasecmp(argv[0],"maxmemory") && argc == 2) { - server.maxmemory = memtoll(argv[1],NULL); + g_pserver->maxmemory = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"maxmemory-policy") && argc == 2) { - server.maxmemory_policy = + g_pserver->maxmemory_policy = configEnumGetValue(maxmemory_policy_enum,argv[1]); - if (server.maxmemory_policy == INT_MIN) { + if (g_pserver->maxmemory_policy == INT_MIN) { err = "Invalid maxmemory policy"; goto loaderr; } } else if (!strcasecmp(argv[0],"maxmemory-samples") && argc == 2) { - server.maxmemory_samples = atoi(argv[1]); - if (server.maxmemory_samples <= 0) { + g_pserver->maxmemory_samples = atoi(argv[1]); + if (g_pserver->maxmemory_samples <= 0) { err = "maxmemory-samples must be 1 or greater"; goto loaderr; } } else if ((!strcasecmp(argv[0],"proto-max-bulk-len")) && argc == 2) { - server.proto_max_bulk_len = memtoll(argv[1],NULL); + g_pserver->proto_max_bulk_len = memtoll(argv[1],NULL); } else if ((!strcasecmp(argv[0],"client-query-buffer-limit")) && argc == 2) { - server.client_max_querybuf_len = memtoll(argv[1],NULL); + cserver.client_max_querybuf_len = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"lfu-log-factor") && argc == 2) { - server.lfu_log_factor = atoi(argv[1]); - if (server.lfu_log_factor < 0) { + g_pserver->lfu_log_factor = atoi(argv[1]); + if (g_pserver->lfu_log_factor < 0) { err = "lfu-log-factor must be 0 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"lfu-decay-time") && argc == 2) { - server.lfu_decay_time = atoi(argv[1]); - if (server.lfu_decay_time < 0) { + g_pserver->lfu_decay_time = atoi(argv[1]); + if (g_pserver->lfu_decay_time < 0) { err = "lfu-decay-time must be 0 or greater"; goto loaderr; } @@ -355,28 +357,28 @@ void loadServerConfigFromString(char *config) { !strcasecmp(argv[0],"repl-ping-replica-period")) && argc == 2) { - server.repl_ping_slave_period = atoi(argv[1]); - if (server.repl_ping_slave_period <= 0) { + g_pserver->repl_ping_slave_period = atoi(argv[1]); + if (g_pserver->repl_ping_slave_period <= 0) { err = "repl-ping-replica-period must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-timeout") && argc == 2) { - server.repl_timeout = atoi(argv[1]); - if (server.repl_timeout <= 0) { + g_pserver->repl_timeout = atoi(argv[1]); + if (g_pserver->repl_timeout <= 0) { err = "repl-timeout must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-disable-tcp-nodelay") && argc==2) { - if ((server.repl_disable_tcp_nodelay = yesnotoi(argv[1])) == -1) { + if ((g_pserver->repl_disable_tcp_nodelay = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-diskless-sync") && argc==2) { - if ((server.repl_diskless_sync = yesnotoi(argv[1])) == -1) { + if ((g_pserver->repl_diskless_sync = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"repl-diskless-sync-delay") && argc==2) { - server.repl_diskless_sync_delay = atoi(argv[1]); - if (server.repl_diskless_sync_delay < 0) { + g_pserver->repl_diskless_sync_delay = atoi(argv[1]); + if (g_pserver->repl_diskless_sync_delay < 0) { err = "repl-diskless-sync-delay can't be negative"; goto loaderr; } @@ -388,148 +390,148 @@ void loadServerConfigFromString(char *config) { } resizeReplicationBacklog(size); } else if (!strcasecmp(argv[0],"repl-backlog-ttl") && argc == 2) { - server.repl_backlog_time_limit = atoi(argv[1]); - if (server.repl_backlog_time_limit < 0) { + g_pserver->repl_backlog_time_limit = atoi(argv[1]); + if (g_pserver->repl_backlog_time_limit < 0) { err = "repl-backlog-ttl can't be negative "; goto loaderr; } } else if (!strcasecmp(argv[0],"masteruser") && argc == 2) { - zfree(server.default_masteruser); - server.default_masteruser = argv[1][0] ? zstrdup(argv[1]) : NULL; + zfree(cserver.default_masteruser); + cserver.default_masteruser = argv[1][0] ? zstrdup(argv[1]) : NULL; } else if (!strcasecmp(argv[0],"masterauth") && argc == 2) { - zfree(server.default_masterauth); - server.default_masterauth = argv[1][0] ? zstrdup(argv[1]) : NULL; + zfree(cserver.default_masterauth); + cserver.default_masterauth = argv[1][0] ? zstrdup(argv[1]) : NULL; // Loop through all existing master infos and update them (in case this came after the replicaof config) updateMasterAuth(); } else if ((!strcasecmp(argv[0],"slave-serve-stale-data") || !strcasecmp(argv[0],"replica-serve-stale-data")) && argc == 2) { - if ((server.repl_serve_stale_data = yesnotoi(argv[1])) == -1) { + if ((g_pserver->repl_serve_stale_data = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slave-read-only") || !strcasecmp(argv[0],"replica-read-only")) && argc == 2) { - if ((server.repl_slave_ro = yesnotoi(argv[1])) == -1) { + if ((g_pserver->repl_slave_ro = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slave-ignore-maxmemory") || !strcasecmp(argv[0],"replica-ignore-maxmemory")) && argc == 2) { - if ((server.repl_slave_ignore_maxmemory = yesnotoi(argv[1])) == -1) { + if ((g_pserver->repl_slave_ignore_maxmemory = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"rdbcompression") && argc == 2) { - if ((server.rdb_compression = yesnotoi(argv[1])) == -1) { + if ((g_pserver->rdb_compression = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"rdbchecksum") && argc == 2) { - if ((server.rdb_checksum = yesnotoi(argv[1])) == -1) { + if ((g_pserver->rdb_checksum = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"activerehashing") && argc == 2) { - if ((server.activerehashing = yesnotoi(argv[1])) == -1) { + if ((g_pserver->activerehashing = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"lazyfree-lazy-eviction") && argc == 2) { - if ((server.lazyfree_lazy_eviction = yesnotoi(argv[1])) == -1) { + if ((g_pserver->lazyfree_lazy_eviction = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"lazyfree-lazy-expire") && argc == 2) { - if ((server.lazyfree_lazy_expire = yesnotoi(argv[1])) == -1) { + if ((g_pserver->lazyfree_lazy_expire = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"lazyfree-lazy-server-del") && argc == 2){ - if ((server.lazyfree_lazy_server_del = yesnotoi(argv[1])) == -1) { + if ((g_pserver->lazyfree_lazy_server_del = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slave-lazy-flush") || !strcasecmp(argv[0],"replica-lazy-flush")) && argc == 2) { - if ((server.repl_slave_lazy_flush = yesnotoi(argv[1])) == -1) { + if ((g_pserver->repl_slave_lazy_flush = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"activedefrag") && argc == 2) { - if ((server.active_defrag_enabled = yesnotoi(argv[1])) == -1) { + if ((cserver.active_defrag_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } - if (server.active_defrag_enabled) { + if (cserver.active_defrag_enabled) { #ifndef HAVE_DEFRAG err = "active defrag can't be enabled without proper jemalloc support"; goto loaderr; #endif } } else if (!strcasecmp(argv[0],"daemonize") && argc == 2) { - if ((server.daemonize = yesnotoi(argv[1])) == -1) { + if ((cserver.daemonize = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"dynamic-hz") && argc == 2) { - if ((server.dynamic_hz = yesnotoi(argv[1])) == -1) { + if ((g_pserver->dynamic_hz = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"hz") && argc == 2) { - server.config_hz = atoi(argv[1]); - if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ; - if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ; + g_pserver->config_hz = atoi(argv[1]); + if (g_pserver->config_hz < CONFIG_MIN_HZ) g_pserver->config_hz = CONFIG_MIN_HZ; + if (g_pserver->config_hz > CONFIG_MAX_HZ) g_pserver->config_hz = CONFIG_MAX_HZ; } else if (!strcasecmp(argv[0],"appendonly") && argc == 2) { int yes; if ((yes = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } - server.aof_state = yes ? AOF_ON : AOF_OFF; + g_pserver->aof_state = yes ? AOF_ON : AOF_OFF; } else if (!strcasecmp(argv[0],"appendfilename") && argc == 2) { if (!pathIsBaseName(argv[1])) { err = "appendfilename can't be a path, just a filename"; goto loaderr; } - zfree(server.aof_filename); - server.aof_filename = zstrdup(argv[1]); + zfree(g_pserver->aof_filename); + g_pserver->aof_filename = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"no-appendfsync-on-rewrite") && argc == 2) { - if ((server.aof_no_fsync_on_rewrite= yesnotoi(argv[1])) == -1) { + if ((g_pserver->aof_no_fsync_on_rewrite= yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"appendfsync") && argc == 2) { - server.aof_fsync = configEnumGetValue(aof_fsync_enum,argv[1]); - if (server.aof_fsync == INT_MIN) { + g_pserver->aof_fsync = configEnumGetValue(aof_fsync_enum,argv[1]); + if (g_pserver->aof_fsync == INT_MIN) { err = "argument must be 'no', 'always' or 'everysec'"; goto loaderr; } } else if (!strcasecmp(argv[0],"auto-aof-rewrite-percentage") && argc == 2) { - server.aof_rewrite_perc = atoi(argv[1]); - if (server.aof_rewrite_perc < 0) { + g_pserver->aof_rewrite_perc = atoi(argv[1]); + if (g_pserver->aof_rewrite_perc < 0) { err = "Invalid negative percentage for AOF auto rewrite"; goto loaderr; } } else if (!strcasecmp(argv[0],"auto-aof-rewrite-min-size") && argc == 2) { - server.aof_rewrite_min_size = memtoll(argv[1],NULL); + g_pserver->aof_rewrite_min_size = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"aof-rewrite-incremental-fsync") && argc == 2) { - if ((server.aof_rewrite_incremental_fsync = + if ((g_pserver->aof_rewrite_incremental_fsync = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"rdb-save-incremental-fsync") && argc == 2) { - if ((server.rdb_save_incremental_fsync = + if ((g_pserver->rdb_save_incremental_fsync = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"aof-load-truncated") && argc == 2) { - if ((server.aof_load_truncated = yesnotoi(argv[1])) == -1) { + if ((g_pserver->aof_load_truncated = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"aof-use-rdb-preamble") && argc == 2) { - if ((server.aof_use_rdb_preamble = yesnotoi(argv[1])) == -1) { + if ((g_pserver->aof_use_rdb_preamble = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"requirepass") && argc == 2) { @@ -544,80 +546,80 @@ void loadServerConfigFromString(char *config) { ACLSetUser(DefaultUser,aclop,sdslen(aclop)); sdsfree(aclop); } else if (!strcasecmp(argv[0],"pidfile") && argc == 2) { - zfree(server.pidfile); - server.pidfile = zstrdup(argv[1]); + zfree(cserver.pidfile); + cserver.pidfile = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"dbfilename") && argc == 2) { if (!pathIsBaseName(argv[1])) { err = "dbfilename can't be a path, just a filename"; goto loaderr; } - zfree(server.rdb_filename); - server.rdb_filename = zstrdup(argv[1]); + zfree(g_pserver->rdb_filename); + g_pserver->rdb_filename = zstrdup(argv[1]); } else if(!strcasecmp(argv[0],"db-s3-object") && argc == 2) { - zfree(server.rdb_s3bucketpath); - server.rdb_s3bucketpath = zstrdup(argv[1]); + zfree(g_pserver->rdb_s3bucketpath); + g_pserver->rdb_s3bucketpath = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"active-defrag-threshold-lower") && argc == 2) { - server.active_defrag_threshold_lower = atoi(argv[1]); - if (server.active_defrag_threshold_lower < 0 || - server.active_defrag_threshold_lower > 1000) { + cserver.active_defrag_threshold_lower = atoi(argv[1]); + if (cserver.active_defrag_threshold_lower < 0 || + cserver.active_defrag_threshold_lower > 1000) { err = "active-defrag-threshold-lower must be between 0 and 1000"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-threshold-upper") && argc == 2) { - server.active_defrag_threshold_upper = atoi(argv[1]); - if (server.active_defrag_threshold_upper < 0 || - server.active_defrag_threshold_upper > 1000) { + cserver.active_defrag_threshold_upper = atoi(argv[1]); + if (cserver.active_defrag_threshold_upper < 0 || + cserver.active_defrag_threshold_upper > 1000) { err = "active-defrag-threshold-upper must be between 0 and 1000"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-ignore-bytes") && argc == 2) { - server.active_defrag_ignore_bytes = memtoll(argv[1], NULL); - if (server.active_defrag_ignore_bytes <= 0) { + cserver.active_defrag_ignore_bytes = memtoll(argv[1], NULL); + if (cserver.active_defrag_ignore_bytes <= 0) { err = "active-defrag-ignore-bytes must above 0"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-cycle-min") && argc == 2) { - server.active_defrag_cycle_min = atoi(argv[1]); - if (server.active_defrag_cycle_min < 1 || server.active_defrag_cycle_min > 99) { + cserver.active_defrag_cycle_min = atoi(argv[1]); + if (cserver.active_defrag_cycle_min < 1 || cserver.active_defrag_cycle_min > 99) { err = "active-defrag-cycle-min must be between 1 and 99"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-cycle-max") && argc == 2) { - server.active_defrag_cycle_max = atoi(argv[1]); - if (server.active_defrag_cycle_max < 1 || server.active_defrag_cycle_max > 99) { + cserver.active_defrag_cycle_max = atoi(argv[1]); + if (cserver.active_defrag_cycle_max < 1 || cserver.active_defrag_cycle_max > 99) { err = "active-defrag-cycle-max must be between 1 and 99"; goto loaderr; } } else if (!strcasecmp(argv[0],"active-defrag-max-scan-fields") && argc == 2) { - server.active_defrag_max_scan_fields = strtoll(argv[1],NULL,10); - if (server.active_defrag_max_scan_fields < 1) { + cserver.active_defrag_max_scan_fields = strtoll(argv[1],NULL,10); + if (cserver.active_defrag_max_scan_fields < 1) { err = "active-defrag-max-scan-fields must be positive"; goto loaderr; } } else if (!strcasecmp(argv[0],"hash-max-ziplist-entries") && argc == 2) { - server.hash_max_ziplist_entries = memtoll(argv[1], NULL); + g_pserver->hash_max_ziplist_entries = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"hash-max-ziplist-value") && argc == 2) { - server.hash_max_ziplist_value = memtoll(argv[1], NULL); + g_pserver->hash_max_ziplist_value = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"stream-node-max-bytes") && argc == 2) { - server.stream_node_max_bytes = memtoll(argv[1], NULL); + g_pserver->stream_node_max_bytes = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"stream-node-max-entries") && argc == 2) { - server.stream_node_max_entries = atoi(argv[1]); + g_pserver->stream_node_max_entries = atoi(argv[1]); } else if (!strcasecmp(argv[0],"list-max-ziplist-entries") && argc == 2){ /* DEAD OPTION */ } else if (!strcasecmp(argv[0],"list-max-ziplist-value") && argc == 2) { /* DEAD OPTION */ } else if (!strcasecmp(argv[0],"list-max-ziplist-size") && argc == 2) { - server.list_max_ziplist_size = atoi(argv[1]); + g_pserver->list_max_ziplist_size = atoi(argv[1]); } else if (!strcasecmp(argv[0],"list-compress-depth") && argc == 2) { - server.list_compress_depth = atoi(argv[1]); + g_pserver->list_compress_depth = atoi(argv[1]); } else if (!strcasecmp(argv[0],"set-max-intset-entries") && argc == 2) { - server.set_max_intset_entries = memtoll(argv[1], NULL); + g_pserver->set_max_intset_entries = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"zset-max-ziplist-entries") && argc == 2) { - server.zset_max_ziplist_entries = memtoll(argv[1], NULL); + g_pserver->zset_max_ziplist_entries = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"zset-max-ziplist-value") && argc == 2) { - server.zset_max_ziplist_value = memtoll(argv[1], NULL); + g_pserver->zset_max_ziplist_value = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"hll-sparse-max-bytes") && argc == 2) { - server.hll_sparse_max_bytes = memtoll(argv[1], NULL); + g_pserver->hll_sparse_max_bytes = memtoll(argv[1], NULL); } else if (!strcasecmp(argv[0],"rename-command") && argc == 3) { struct redisCommand *cmd = lookupCommand(argv[1]); int retval; @@ -629,62 +631,62 @@ void loadServerConfigFromString(char *config) { /* If the target command name is the empty string we just * remove it from the command table. */ - retval = dictDelete(server.commands, argv[1]); + retval = dictDelete(g_pserver->commands, argv[1]); serverAssert(retval == DICT_OK); /* Otherwise we re-add the command under a different name. */ if (sdslen(argv[2]) != 0) { sds copy = sdsdup(argv[2]); - retval = dictAdd(server.commands, copy, cmd); + retval = dictAdd(g_pserver->commands, copy, cmd); if (retval != DICT_OK) { sdsfree(copy); err = "Target command name already exists"; goto loaderr; } } } else if (!strcasecmp(argv[0],"cluster-enabled") && argc == 2) { - if ((server.cluster_enabled = yesnotoi(argv[1])) == -1) { + if ((g_pserver->cluster_enabled = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-config-file") && argc == 2) { - zfree(server.cluster_configfile); - server.cluster_configfile = zstrdup(argv[1]); + zfree(g_pserver->cluster_configfile); + g_pserver->cluster_configfile = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"cluster-announce-ip") && argc == 2) { - zfree(server.cluster_announce_ip); - server.cluster_announce_ip = zstrdup(argv[1]); + zfree(g_pserver->cluster_announce_ip); + g_pserver->cluster_announce_ip = zstrdup(argv[1]); } else if (!strcasecmp(argv[0],"cluster-announce-port") && argc == 2) { - server.cluster_announce_port = atoi(argv[1]); - if (server.cluster_announce_port < 0 || - server.cluster_announce_port > 65535) + g_pserver->cluster_announce_port = atoi(argv[1]); + if (g_pserver->cluster_announce_port < 0 || + g_pserver->cluster_announce_port > 65535) { err = "Invalid port"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-announce-bus-port") && argc == 2) { - server.cluster_announce_bus_port = atoi(argv[1]); - if (server.cluster_announce_bus_port < 0 || - server.cluster_announce_bus_port > 65535) + g_pserver->cluster_announce_bus_port = atoi(argv[1]); + if (g_pserver->cluster_announce_bus_port < 0 || + g_pserver->cluster_announce_bus_port > 65535) { err = "Invalid port"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-require-full-coverage") && argc == 2) { - if ((server.cluster_require_full_coverage = yesnotoi(argv[1])) == -1) + if ((g_pserver->cluster_require_full_coverage = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-node-timeout") && argc == 2) { - server.cluster_node_timeout = strtoll(argv[1],NULL,10); - if (server.cluster_node_timeout <= 0) { + g_pserver->cluster_node_timeout = strtoll(argv[1],NULL,10); + if (g_pserver->cluster_node_timeout <= 0) { err = "cluster node timeout must be 1 or greater"; goto loaderr; } } else if (!strcasecmp(argv[0],"cluster-migration-barrier") && argc == 2) { - server.cluster_migration_barrier = atoi(argv[1]); - if (server.cluster_migration_barrier < 0) { + g_pserver->cluster_migration_barrier = atoi(argv[1]); + if (g_pserver->cluster_migration_barrier < 0) { err = "cluster migration barrier must zero or positive"; goto loaderr; } @@ -692,8 +694,8 @@ void loadServerConfigFromString(char *config) { !strcasecmp(argv[0],"cluster-replica-validity-factor")) && argc == 2) { - server.cluster_slave_validity_factor = atoi(argv[1]); - if (server.cluster_slave_validity_factor < 0) { + g_pserver->cluster_slave_validity_factor = atoi(argv[1]); + if (g_pserver->cluster_slave_validity_factor < 0) { err = "cluster replica validity factor must be zero or positive"; goto loaderr; } @@ -701,29 +703,29 @@ void loadServerConfigFromString(char *config) { !strcasecmp(argv[0],"cluster-replica-no-failover")) && argc == 2) { - server.cluster_slave_no_failover = yesnotoi(argv[1]); - if (server.cluster_slave_no_failover == -1) { + g_pserver->cluster_slave_no_failover = yesnotoi(argv[1]); + if (g_pserver->cluster_slave_no_failover == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"lua-time-limit") && argc == 2) { - server.lua_time_limit = strtoll(argv[1],NULL,10); + g_pserver->lua_time_limit = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"lua-replicate-commands") && argc == 2) { - server.lua_always_replicate_commands = yesnotoi(argv[1]); + g_pserver->lua_always_replicate_commands = yesnotoi(argv[1]); } else if (!strcasecmp(argv[0],"slowlog-log-slower-than") && argc == 2) { - server.slowlog_log_slower_than = strtoll(argv[1],NULL,10); + g_pserver->slowlog_log_slower_than = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"latency-monitor-threshold") && argc == 2) { - server.latency_monitor_threshold = strtoll(argv[1],NULL,10); - if (server.latency_monitor_threshold < 0) { + g_pserver->latency_monitor_threshold = strtoll(argv[1],NULL,10); + if (g_pserver->latency_monitor_threshold < 0) { err = "The latency threshold can't be negative"; goto loaderr; } } else if (!strcasecmp(argv[0],"slowlog-max-len") && argc == 2) { - server.slowlog_max_len = strtoll(argv[1],NULL,10); + g_pserver->slowlog_max_len = strtoll(argv[1],NULL,10); } else if (!strcasecmp(argv[0],"client-output-buffer-limit") && argc == 5) { @@ -743,44 +745,44 @@ void loadServerConfigFromString(char *config) { err = "Negative number of seconds in soft limit is invalid"; goto loaderr; } - server.client_obuf_limits[type].hard_limit_bytes = hard; - server.client_obuf_limits[type].soft_limit_bytes = soft; - server.client_obuf_limits[type].soft_limit_seconds = soft_seconds; + cserver.client_obuf_limits[type].hard_limit_bytes = hard; + cserver.client_obuf_limits[type].soft_limit_bytes = soft; + cserver.client_obuf_limits[type].soft_limit_seconds = soft_seconds; } else if (!strcasecmp(argv[0],"stop-writes-on-bgsave-error") && argc == 2) { - if ((server.stop_writes_on_bgsave_err = yesnotoi(argv[1])) == -1) { + if ((g_pserver->stop_writes_on_bgsave_err = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if ((!strcasecmp(argv[0],"slave-priority") || !strcasecmp(argv[0],"replica-priority")) && argc == 2) { - server.slave_priority = atoi(argv[1]); + g_pserver->slave_priority = atoi(argv[1]); } else if ((!strcasecmp(argv[0],"slave-announce-ip") || !strcasecmp(argv[0],"replica-announce-ip")) && argc == 2) { - zfree(server.slave_announce_ip); - server.slave_announce_ip = zstrdup(argv[1]); + zfree(g_pserver->slave_announce_ip); + g_pserver->slave_announce_ip = zstrdup(argv[1]); } else if ((!strcasecmp(argv[0],"slave-announce-port") || !strcasecmp(argv[0],"replica-announce-port")) && argc == 2) { - server.slave_announce_port = atoi(argv[1]); - if (server.slave_announce_port < 0 || - server.slave_announce_port > 65535) + g_pserver->slave_announce_port = atoi(argv[1]); + if (g_pserver->slave_announce_port < 0 || + g_pserver->slave_announce_port > 65535) { err = "Invalid port"; goto loaderr; } } else if ((!strcasecmp(argv[0],"min-slaves-to-write") || !strcasecmp(argv[0],"min-replicas-to-write")) && argc == 2) { - server.repl_min_slaves_to_write = atoi(argv[1]); - if (server.repl_min_slaves_to_write < 0) { + g_pserver->repl_min_slaves_to_write = atoi(argv[1]); + if (g_pserver->repl_min_slaves_to_write < 0) { err = "Invalid value for min-replicas-to-write."; goto loaderr; } } else if ((!strcasecmp(argv[0],"min-slaves-max-lag") || !strcasecmp(argv[0],"min-replicas-max-lag")) && argc == 2) { - server.repl_min_slaves_max_lag = atoi(argv[1]); - if (server.repl_min_slaves_max_lag < 0) { + g_pserver->repl_min_slaves_max_lag = atoi(argv[1]); + if (g_pserver->repl_min_slaves_max_lag < 0) { err = "Invalid value for min-replicas-max-lag."; goto loaderr; } } else if (!strcasecmp(argv[0],"notify-keyspace-events") && argc == 2) { @@ -790,12 +792,12 @@ void loadServerConfigFromString(char *config) { err = "Invalid event class character. Use 'g$lshzxeA'."; goto loaderr; } - server.notify_keyspace_events = flags; + g_pserver->notify_keyspace_events = flags; } else if (!strcasecmp(argv[0],"supervised") && argc == 2) { - server.supervised_mode = + cserver.supervised_mode = configEnumGetValue(supervised_mode_enum,argv[1]); - if (server.supervised_mode == INT_MIN) { + if (cserver.supervised_mode == INT_MIN) { err = "Invalid option for 'supervised'. " "Allowed values: 'upstart', 'systemd', 'auto', or 'no'"; goto loaderr; @@ -816,7 +818,7 @@ void loadServerConfigFromString(char *config) { /* argc == 1 is handled by main() as we need to enter the sentinel * mode ASAP. */ if (argc != 1) { - if (!server.sentinel_mode) { + if (!g_pserver->sentinel_mode) { err = "sentinel directive while not in sentinel mode"; goto loaderr; } @@ -825,40 +827,43 @@ void loadServerConfigFromString(char *config) { } } else if (!strcasecmp(argv[0],"scratch-file-path")) { #ifdef USE_MEMKIND - storage_init(argv[1], server.maxmemory); + storage_init(argv[1], g_pserver->maxmemory); #else err = "KeyDB not compliled with scratch-file support."; goto loaderr; #endif } else if (!strcasecmp(argv[0],"server-threads") && argc == 2) { - server.cthreads = atoi(argv[1]); - if (server.cthreads <= 0 || server.cthreads > MAX_EVENT_LOOPS) { + cserver.cthreads = atoi(argv[1]); + if (cserver.cthreads <= 0 || cserver.cthreads > MAX_EVENT_LOOPS) { err = "Invalid number of threads specified"; goto loaderr; } } else if (!strcasecmp(argv[0],"server-thread-affinity") && argc == 2) { if (strcasecmp(argv[1], "true") == 0) { - server.fThreadAffinity = TRUE; + cserver.fThreadAffinity = TRUE; } else if (strcasecmp(argv[1], "false") == 0) { - server.fThreadAffinity = FALSE; + cserver.fThreadAffinity = FALSE; } else { err = "Unknown argument: server-thread-affinity expects either true or false"; goto loaderr; } } else if (!strcasecmp(argv[0], "active-replica") && argc == 2) { - server.fActiveReplica = yesnotoi(argv[1]); - if (server.repl_slave_ro) { - server.repl_slave_ro = FALSE; + g_pserver->fActiveReplica = yesnotoi(argv[1]); + if (g_pserver->repl_slave_ro) { + g_pserver->repl_slave_ro = FALSE; serverLog(LL_NOTICE, "Notice: \"active-replica yes\" implies \"replica-read-only no\""); } - if (server.fActiveReplica == -1) { - server.fActiveReplica = CONFIG_DEFAULT_ACTIVE_REPLICA; + if (g_pserver->fActiveReplica == -1) { + g_pserver->fActiveReplica = CONFIG_DEFAULT_ACTIVE_REPLICA; err = "argument must be 'yes' or 'no'"; goto loaderr; } } else if (!strcasecmp(argv[0],"multi-master") && argc == 2){ - if ((server.enable_multimaster = yesnotoi(argv[1])) == -1) { + if ((g_pserver->enable_multimaster = yesnotoi(argv[1])) == -1) { err = "argument must be 'yes' or 'no'"; goto loaderr; } + } else if (!strcasecmp(argv[0], "version-override") && argc == 2) { + KEYDB_SET_VERSION = zstrdup(argv[1]); + serverLog(LL_WARNING, "Warning version is overriden to: %s\n", KEYDB_SET_VERSION); } else { err = "Bad directive or wrong number of arguments"; goto loaderr; } @@ -866,7 +871,7 @@ void loadServerConfigFromString(char *config) { } /* Sanity checks. */ - if (server.cluster_enabled && listLength(server.masters)) { + if (g_pserver->cluster_enabled && listLength(g_pserver->masters)) { linenum = slaveof_linenum; i = linenum-1; err = "replicaof directive not allowed in cluster mode"; @@ -975,8 +980,8 @@ void configSetCommand(client *c) { addReplyError(c, "dbfilename can't be a path, just a filename"); return; } - zfree(server.rdb_filename); - server.rdb_filename = zstrdup(szFromObj(o)); + zfree(g_pserver->rdb_filename); + g_pserver->rdb_filename = zstrdup(szFromObj(o)); } config_set_special_field("requirepass") { if (sdslen(szFromObj(o)) > CONFIG_AUTHPASS_MAX_LEN) goto badfmt; /* The old "requirepass" directive just translates to setting @@ -986,39 +991,39 @@ void configSetCommand(client *c) { ACLSetUser(DefaultUser,aclop,sdslen(aclop)); sdsfree(aclop); } config_set_special_field("masteruser") { - zfree(server.default_masteruser); - server.default_masteruser = ((char*)ptrFromObj(o))[0] ? zstrdup(szFromObj(o)) : NULL; + zfree(cserver.default_masteruser); + cserver.default_masteruser = ((char*)ptrFromObj(o))[0] ? zstrdup(szFromObj(o)) : NULL; } config_set_special_field("masterauth") { - zfree(server.default_masterauth); - server.default_masterauth = ((char*)ptrFromObj(o))[0] ? zstrdup(szFromObj(o)) : NULL; + zfree(cserver.default_masterauth); + cserver.default_masterauth = ((char*)ptrFromObj(o))[0] ? zstrdup(szFromObj(o)) : NULL; } config_set_special_field("cluster-announce-ip") { - zfree(server.cluster_announce_ip); - server.cluster_announce_ip = ((char*)ptrFromObj(o))[0] ? zstrdup(szFromObj(o)) : NULL; + zfree(g_pserver->cluster_announce_ip); + g_pserver->cluster_announce_ip = ((char*)ptrFromObj(o))[0] ? zstrdup(szFromObj(o)) : NULL; } config_set_special_field("maxclients") { - int orig_value = server.maxclients; + int orig_value = g_pserver->maxclients; if (getLongLongFromObject(o,&ll) == C_ERR || ll < 1) goto badfmt; /* Try to check if the OS is capable of supporting so many FDs. */ - server.maxclients = ll; + g_pserver->maxclients = ll; serverAssert(FALSE); if (ll > orig_value) { adjustOpenFilesLimit(); - if (server.maxclients != ll) { - addReplyErrorFormat(c,"The operating system is not able to handle the specified number of clients, try with %d", server.maxclients); - server.maxclients = orig_value; + if (g_pserver->maxclients != ll) { + addReplyErrorFormat(c,"The operating system is not able to handle the specified number of clients, try with %d", g_pserver->maxclients); + g_pserver->maxclients = orig_value; return; } - if ((unsigned int) aeGetSetSize(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el) < - server.maxclients + CONFIG_FDSET_INCR) + if ((unsigned int) aeGetSetSize(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el) < + g_pserver->maxclients + CONFIG_FDSET_INCR) { - for (int iel = 0; iel < server.cthreads; ++iel) + for (int iel = 0; iel < cserver.cthreads; ++iel) { - if (aeResizeSetSize(server.rgthreadvar[iel].el, - server.maxclients + CONFIG_FDSET_INCR) == AE_ERR) + if (aeResizeSetSize(g_pserver->rgthreadvar[iel].el, + g_pserver->maxclients + CONFIG_FDSET_INCR) == AE_ERR) { addReplyError(c,"The event loop API used by Redis is not able to handle the specified number of clients"); - server.maxclients = orig_value; + g_pserver->maxclients = orig_value; return; } } @@ -1028,9 +1033,9 @@ void configSetCommand(client *c) { int enable = yesnotoi(szFromObj(o)); if (enable == -1) goto badfmt; - if (enable == 0 && server.aof_state != AOF_OFF) { + if (enable == 0 && g_pserver->aof_state != AOF_OFF) { stopAppendOnly(); - } else if (enable && server.aof_state == AOF_OFF) { + } else if (enable && g_pserver->aof_state == AOF_OFF) { if (startAppendOnly() == C_ERR) { addReplyError(c, "Unable to turn on AOF. Check server logs."); @@ -1116,63 +1121,63 @@ void configSetCommand(client *c) { soft = memtoll(v[j+2],NULL); soft_seconds = strtoll(v[j+3],NULL,10); - server.client_obuf_limits[type].hard_limit_bytes = hard; - server.client_obuf_limits[type].soft_limit_bytes = soft; - server.client_obuf_limits[type].soft_limit_seconds = soft_seconds; + cserver.client_obuf_limits[type].hard_limit_bytes = hard; + cserver.client_obuf_limits[type].soft_limit_bytes = soft; + cserver.client_obuf_limits[type].soft_limit_seconds = soft_seconds; } sdsfreesplitres(v,vlen); } config_set_special_field("notify-keyspace-events") { int flags = keyspaceEventsStringToFlags(szFromObj(o)); if (flags == -1) goto badfmt; - server.notify_keyspace_events = flags; + g_pserver->notify_keyspace_events = flags; } config_set_special_field_with_alias("slave-announce-ip", "replica-announce-ip") { - zfree(server.slave_announce_ip); - server.slave_announce_ip = ((char*)ptrFromObj(o))[0] ? zstrdup(szFromObj(o)) : NULL; + zfree(g_pserver->slave_announce_ip); + g_pserver->slave_announce_ip = ((char*)ptrFromObj(o))[0] ? zstrdup(szFromObj(o)) : NULL; /* Boolean fields. * config_set_bool_field(name,var). */ } config_set_bool_field( - "rdbcompression", server.rdb_compression) { + "rdbcompression", g_pserver->rdb_compression) { } config_set_bool_field( - "repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay) { + "repl-disable-tcp-nodelay",g_pserver->repl_disable_tcp_nodelay) { } config_set_bool_field( - "repl-diskless-sync",server.repl_diskless_sync) { + "repl-diskless-sync",g_pserver->repl_diskless_sync) { } config_set_bool_field( - "cluster-require-full-coverage",server.cluster_require_full_coverage) { + "cluster-require-full-coverage",g_pserver->cluster_require_full_coverage) { } config_set_bool_field( - "cluster-slave-no-failover",server.cluster_slave_no_failover) { + "cluster-slave-no-failover",g_pserver->cluster_slave_no_failover) { } config_set_bool_field( - "cluster-replica-no-failover",server.cluster_slave_no_failover) { + "cluster-replica-no-failover",g_pserver->cluster_slave_no_failover) { } config_set_bool_field( - "aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync) { + "aof-rewrite-incremental-fsync",g_pserver->aof_rewrite_incremental_fsync) { } config_set_bool_field( - "rdb-save-incremental-fsync",server.rdb_save_incremental_fsync) { + "rdb-save-incremental-fsync",g_pserver->rdb_save_incremental_fsync) { } config_set_bool_field( - "aof-load-truncated",server.aof_load_truncated) { + "aof-load-truncated",g_pserver->aof_load_truncated) { } config_set_bool_field( - "aof-use-rdb-preamble",server.aof_use_rdb_preamble) { + "aof-use-rdb-preamble",g_pserver->aof_use_rdb_preamble) { } config_set_bool_field( - "slave-serve-stale-data",server.repl_serve_stale_data) { + "slave-serve-stale-data",g_pserver->repl_serve_stale_data) { } config_set_bool_field( - "replica-serve-stale-data",server.repl_serve_stale_data) { + "replica-serve-stale-data",g_pserver->repl_serve_stale_data) { } config_set_bool_field( - "slave-read-only",server.repl_slave_ro) { + "slave-read-only",g_pserver->repl_slave_ro) { } config_set_bool_field( - "replica-read-only",server.repl_slave_ro) { + "replica-read-only",g_pserver->repl_slave_ro) { } config_set_bool_field( - "slave-ignore-maxmemory",server.repl_slave_ignore_maxmemory) { + "slave-ignore-maxmemory",g_pserver->repl_slave_ignore_maxmemory) { } config_set_bool_field( - "replica-ignore-maxmemory",server.repl_slave_ignore_maxmemory) { + "replica-ignore-maxmemory",g_pserver->repl_slave_ignore_maxmemory) { } config_set_bool_field( - "activerehashing",server.activerehashing) { + "activerehashing",g_pserver->activerehashing) { } config_set_bool_field( - "activedefrag",server.active_defrag_enabled) { + "activedefrag",cserver.active_defrag_enabled) { #ifndef HAVE_DEFRAG - if (server.active_defrag_enabled) { - server.active_defrag_enabled = 0; + if (cserver.active_defrag_enabled) { + cserver.active_defrag_enabled = 0; addReplyError(c, "-DISABLED Active defragmentation cannot be enabled: it " "requires a Redis server compiled with a modified Jemalloc " @@ -1182,128 +1187,128 @@ void configSetCommand(client *c) { } #endif } config_set_bool_field( - "protected-mode",server.protected_mode) { + "protected-mode",g_pserver->protected_mode) { } config_set_bool_field( - "stop-writes-on-bgsave-error",server.stop_writes_on_bgsave_err) { + "stop-writes-on-bgsave-error",g_pserver->stop_writes_on_bgsave_err) { } config_set_bool_field( - "lazyfree-lazy-eviction",server.lazyfree_lazy_eviction) { + "lazyfree-lazy-eviction",g_pserver->lazyfree_lazy_eviction) { } config_set_bool_field( - "lazyfree-lazy-expire",server.lazyfree_lazy_expire) { + "lazyfree-lazy-expire",g_pserver->lazyfree_lazy_expire) { } config_set_bool_field( - "lazyfree-lazy-server-del",server.lazyfree_lazy_server_del) { + "lazyfree-lazy-server-del",g_pserver->lazyfree_lazy_server_del) { } config_set_bool_field( - "slave-lazy-flush",server.repl_slave_lazy_flush) { + "slave-lazy-flush",g_pserver->repl_slave_lazy_flush) { } config_set_bool_field( - "replica-lazy-flush",server.repl_slave_lazy_flush) { + "replica-lazy-flush",g_pserver->repl_slave_lazy_flush) { } config_set_bool_field( - "no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite) { + "no-appendfsync-on-rewrite",g_pserver->aof_no_fsync_on_rewrite) { } config_set_bool_field( - "dynamic-hz",server.dynamic_hz) { + "dynamic-hz",g_pserver->dynamic_hz) { /* Numerical fields. * config_set_numerical_field(name,var,min,max) */ } config_set_numerical_field( - "tcp-keepalive",server.tcpkeepalive,0,INT_MAX) { + "tcp-keepalive",cserver.tcpkeepalive,0,INT_MAX) { } config_set_numerical_field( - "maxmemory-samples",server.maxmemory_samples,1,INT_MAX) { + "maxmemory-samples",g_pserver->maxmemory_samples,1,INT_MAX) { } config_set_numerical_field( - "lfu-log-factor",server.lfu_log_factor,0,INT_MAX) { + "lfu-log-factor",g_pserver->lfu_log_factor,0,INT_MAX) { } config_set_numerical_field( - "lfu-decay-time",server.lfu_decay_time,0,INT_MAX) { + "lfu-decay-time",g_pserver->lfu_decay_time,0,INT_MAX) { } config_set_numerical_field( - "timeout",server.maxidletime,0,INT_MAX) { + "timeout",cserver.maxidletime,0,INT_MAX) { } config_set_numerical_field( - "active-defrag-threshold-lower",server.active_defrag_threshold_lower,0,1000) { + "active-defrag-threshold-lower",cserver.active_defrag_threshold_lower,0,1000) { } config_set_numerical_field( - "active-defrag-threshold-upper",server.active_defrag_threshold_upper,0,1000) { + "active-defrag-threshold-upper",cserver.active_defrag_threshold_upper,0,1000) { } config_set_memory_field( - "active-defrag-ignore-bytes",server.active_defrag_ignore_bytes) { + "active-defrag-ignore-bytes",cserver.active_defrag_ignore_bytes) { } config_set_numerical_field( - "active-defrag-cycle-min",server.active_defrag_cycle_min,1,99) { + "active-defrag-cycle-min",cserver.active_defrag_cycle_min,1,99) { } config_set_numerical_field( - "active-defrag-cycle-max",server.active_defrag_cycle_max,1,99) { + "active-defrag-cycle-max",cserver.active_defrag_cycle_max,1,99) { } config_set_numerical_field( - "active-defrag-max-scan-fields",server.active_defrag_max_scan_fields,1,LONG_MAX) { + "active-defrag-max-scan-fields",cserver.active_defrag_max_scan_fields,1,LONG_MAX) { } config_set_numerical_field( - "auto-aof-rewrite-percentage",server.aof_rewrite_perc,0,INT_MAX){ + "auto-aof-rewrite-percentage",g_pserver->aof_rewrite_perc,0,INT_MAX){ } config_set_numerical_field( - "hash-max-ziplist-entries",server.hash_max_ziplist_entries,0,LONG_MAX) { + "hash-max-ziplist-entries",g_pserver->hash_max_ziplist_entries,0,LONG_MAX) { } config_set_numerical_field( - "hash-max-ziplist-value",server.hash_max_ziplist_value,0,LONG_MAX) { + "hash-max-ziplist-value",g_pserver->hash_max_ziplist_value,0,LONG_MAX) { } config_set_numerical_field( - "stream-node-max-bytes",server.stream_node_max_bytes,0,LONG_MAX) { + "stream-node-max-bytes",g_pserver->stream_node_max_bytes,0,LONG_MAX) { } config_set_numerical_field( - "stream-node-max-entries",server.stream_node_max_entries,0,LLONG_MAX) { + "stream-node-max-entries",g_pserver->stream_node_max_entries,0,LLONG_MAX) { } config_set_numerical_field( - "list-max-ziplist-size",server.list_max_ziplist_size,INT_MIN,INT_MAX) { + "list-max-ziplist-size",g_pserver->list_max_ziplist_size,INT_MIN,INT_MAX) { } config_set_numerical_field( - "list-compress-depth",server.list_compress_depth,0,INT_MAX) { + "list-compress-depth",g_pserver->list_compress_depth,0,INT_MAX) { } config_set_numerical_field( - "set-max-intset-entries",server.set_max_intset_entries,0,LONG_MAX) { + "set-max-intset-entries",g_pserver->set_max_intset_entries,0,LONG_MAX) { } config_set_numerical_field( - "zset-max-ziplist-entries",server.zset_max_ziplist_entries,0,LONG_MAX) { + "zset-max-ziplist-entries",g_pserver->zset_max_ziplist_entries,0,LONG_MAX) { } config_set_numerical_field( - "zset-max-ziplist-value",server.zset_max_ziplist_value,0,LONG_MAX) { + "zset-max-ziplist-value",g_pserver->zset_max_ziplist_value,0,LONG_MAX) { } config_set_numerical_field( - "hll-sparse-max-bytes",server.hll_sparse_max_bytes,0,LONG_MAX) { + "hll-sparse-max-bytes",g_pserver->hll_sparse_max_bytes,0,LONG_MAX) { } config_set_numerical_field( - "lua-time-limit",server.lua_time_limit,0,LONG_MAX) { + "lua-time-limit",g_pserver->lua_time_limit,0,LONG_MAX) { } config_set_numerical_field( - "slowlog-log-slower-than",server.slowlog_log_slower_than,-1,LLONG_MAX) { + "slowlog-log-slower-than",g_pserver->slowlog_log_slower_than,-1,LLONG_MAX) { } config_set_numerical_field( "slowlog-max-len",ll,0,LONG_MAX) { /* Cast to unsigned. */ - server.slowlog_max_len = (unsigned long)ll; + g_pserver->slowlog_max_len = (unsigned long)ll; } config_set_numerical_field( - "latency-monitor-threshold",server.latency_monitor_threshold,0,LLONG_MAX){ + "latency-monitor-threshold",g_pserver->latency_monitor_threshold,0,LLONG_MAX){ } config_set_numerical_field( - "repl-ping-slave-period",server.repl_ping_slave_period,1,INT_MAX) { + "repl-ping-slave-period",g_pserver->repl_ping_slave_period,1,INT_MAX) { } config_set_numerical_field( - "repl-ping-replica-period",server.repl_ping_slave_period,1,INT_MAX) { + "repl-ping-replica-period",g_pserver->repl_ping_slave_period,1,INT_MAX) { } config_set_numerical_field( - "repl-timeout",server.repl_timeout,1,INT_MAX) { + "repl-timeout",g_pserver->repl_timeout,1,INT_MAX) { } config_set_numerical_field( - "repl-backlog-ttl",server.repl_backlog_time_limit,0,LONG_MAX) { + "repl-backlog-ttl",g_pserver->repl_backlog_time_limit,0,LONG_MAX) { } config_set_numerical_field( - "repl-diskless-sync-delay",server.repl_diskless_sync_delay,0,INT_MAX) { + "repl-diskless-sync-delay",g_pserver->repl_diskless_sync_delay,0,INT_MAX) { } config_set_numerical_field( - "slave-priority",server.slave_priority,0,INT_MAX) { + "slave-priority",g_pserver->slave_priority,0,INT_MAX) { } config_set_numerical_field( - "replica-priority",server.slave_priority,0,INT_MAX) { + "replica-priority",g_pserver->slave_priority,0,INT_MAX) { } config_set_numerical_field( - "slave-announce-port",server.slave_announce_port,0,65535) { + "slave-announce-port",g_pserver->slave_announce_port,0,65535) { } config_set_numerical_field( - "replica-announce-port",server.slave_announce_port,0,65535) { + "replica-announce-port",g_pserver->slave_announce_port,0,65535) { } config_set_numerical_field( - "min-slaves-to-write",server.repl_min_slaves_to_write,0,INT_MAX) { + "min-slaves-to-write",g_pserver->repl_min_slaves_to_write,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( - "min-replicas-to-write",server.repl_min_slaves_to_write,0,INT_MAX) { + "min-replicas-to-write",g_pserver->repl_min_slaves_to_write,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( - "min-slaves-max-lag",server.repl_min_slaves_max_lag,0,INT_MAX) { + "min-slaves-max-lag",g_pserver->repl_min_slaves_max_lag,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( - "min-replicas-max-lag",server.repl_min_slaves_max_lag,0,INT_MAX) { + "min-replicas-max-lag",g_pserver->repl_min_slaves_max_lag,0,INT_MAX) { refreshGoodSlavesCount(); } config_set_numerical_field( - "cluster-node-timeout",server.cluster_node_timeout,0,LLONG_MAX) { + "cluster-node-timeout",g_pserver->cluster_node_timeout,0,LLONG_MAX) { } config_set_numerical_field( - "cluster-announce-port",server.cluster_announce_port,0,65535) { + "cluster-announce-port",g_pserver->cluster_announce_port,0,65535) { } config_set_numerical_field( - "cluster-announce-bus-port",server.cluster_announce_bus_port,0,65535) { + "cluster-announce-bus-port",g_pserver->cluster_announce_bus_port,0,65535) { } config_set_numerical_field( - "cluster-migration-barrier",server.cluster_migration_barrier,0,INT_MAX){ + "cluster-migration-barrier",g_pserver->cluster_migration_barrier,0,INT_MAX){ } config_set_numerical_field( - "cluster-slave-validity-factor",server.cluster_slave_validity_factor,0,INT_MAX) { + "cluster-slave-validity-factor",g_pserver->cluster_slave_validity_factor,0,INT_MAX) { } config_set_numerical_field( - "cluster-replica-validity-factor",server.cluster_slave_validity_factor,0,INT_MAX) { + "cluster-replica-validity-factor",g_pserver->cluster_slave_validity_factor,0,INT_MAX) { } config_set_numerical_field( - "hz",server.config_hz,0,INT_MAX) { + "hz",g_pserver->config_hz,0,INT_MAX) { /* Hz is more an hint from the user, so we accept values out of range * but cap them to reasonable values. */ - if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ; - if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ; + if (g_pserver->config_hz < CONFIG_MIN_HZ) g_pserver->config_hz = CONFIG_MIN_HZ; + if (g_pserver->config_hz > CONFIG_MAX_HZ) g_pserver->config_hz = CONFIG_MAX_HZ; } config_set_numerical_field( "watchdog-period",ll,0,INT_MAX) { if (ll) @@ -1313,32 +1318,32 @@ void configSetCommand(client *c) { /* Memory fields. * config_set_memory_field(name,var) */ - } config_set_memory_field("maxmemory",server.maxmemory) { - if (server.maxmemory) { - if (server.maxmemory < zmalloc_used_memory()) { + } config_set_memory_field("maxmemory",g_pserver->maxmemory) { + if (g_pserver->maxmemory) { + if (g_pserver->maxmemory < zmalloc_used_memory()) { serverLog(LL_WARNING,"WARNING: the new maxmemory value set via CONFIG SET is smaller than the current memory usage. This will result in key eviction and/or the inability to accept new write commands depending on the maxmemory-policy."); } freeMemoryIfNeededAndSafe(); } } config_set_memory_field( - "proto-max-bulk-len",server.proto_max_bulk_len) { + "proto-max-bulk-len",g_pserver->proto_max_bulk_len) { } config_set_memory_field( - "client-query-buffer-limit",server.client_max_querybuf_len) { + "client-query-buffer-limit",cserver.client_max_querybuf_len) { } config_set_memory_field("repl-backlog-size",ll) { resizeReplicationBacklog(ll); } config_set_memory_field("auto-aof-rewrite-min-size",ll) { - server.aof_rewrite_min_size = ll; + g_pserver->aof_rewrite_min_size = ll; /* Enumeration fields. * config_set_enum_field(name,var,enum_var) */ } config_set_enum_field( - "loglevel",server.verbosity,loglevel_enum) { + "loglevel",cserver.verbosity,loglevel_enum) { } config_set_enum_field( - "maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum) { + "maxmemory-policy",g_pserver->maxmemory_policy,maxmemory_policy_enum) { } config_set_bool_field( - "multi-master", server.enable_multimaster) { + "multi-master", g_pserver->enable_multimaster) { } config_set_enum_field( - "appendfsync",server.aof_fsync,aof_fsync_enum) { + "appendfsync",g_pserver->aof_fsync,aof_fsync_enum) { /* Everyhing else is an error... */ } config_set_else { @@ -1403,161 +1408,162 @@ void configGetCommand(client *c) { serverAssertWithInfo(c,o,sdsEncodedObject(o)); /* String values */ - config_get_string_field("dbfilename",server.rdb_filename); - config_get_string_field("masteruser",server.default_masteruser); - config_get_string_field("masterauth",server.default_masterauth); - config_get_string_field("cluster-announce-ip",server.cluster_announce_ip); - config_get_string_field("unixsocket",server.unixsocket); - config_get_string_field("logfile",server.logfile); - config_get_string_field("aclfile",server.acl_filename); - config_get_string_field("pidfile",server.pidfile); - config_get_string_field("slave-announce-ip",server.slave_announce_ip); - config_get_string_field("replica-announce-ip",server.slave_announce_ip); + config_get_string_field("dbfilename",g_pserver->rdb_filename); + config_get_string_field("masteruser",cserver.default_masteruser); + config_get_string_field("masterauth",cserver.default_masterauth); + config_get_string_field("cluster-announce-ip",g_pserver->cluster_announce_ip); + config_get_string_field("unixsocket",g_pserver->unixsocket); + config_get_string_field("logfile",g_pserver->logfile); + config_get_string_field("aclfile",g_pserver->acl_filename); + config_get_string_field("pidfile",cserver.pidfile); + config_get_string_field("slave-announce-ip",g_pserver->slave_announce_ip); + config_get_string_field("replica-announce-ip",g_pserver->slave_announce_ip); + config_get_string_field("version-override",KEYDB_SET_VERSION); /* Numerical values */ - config_get_numerical_field("maxmemory",server.maxmemory); - config_get_numerical_field("proto-max-bulk-len",server.proto_max_bulk_len); - config_get_numerical_field("client-query-buffer-limit",server.client_max_querybuf_len); - config_get_numerical_field("maxmemory-samples",server.maxmemory_samples); - config_get_numerical_field("lfu-log-factor",server.lfu_log_factor); - config_get_numerical_field("lfu-decay-time",server.lfu_decay_time); - config_get_numerical_field("timeout",server.maxidletime); - config_get_numerical_field("active-defrag-threshold-lower",server.active_defrag_threshold_lower); - config_get_numerical_field("active-defrag-threshold-upper",server.active_defrag_threshold_upper); - config_get_numerical_field("active-defrag-ignore-bytes",server.active_defrag_ignore_bytes); - config_get_numerical_field("active-defrag-cycle-min",server.active_defrag_cycle_min); - config_get_numerical_field("active-defrag-cycle-max",server.active_defrag_cycle_max); - config_get_numerical_field("active-defrag-max-scan-fields",server.active_defrag_max_scan_fields); + config_get_numerical_field("maxmemory",g_pserver->maxmemory); + config_get_numerical_field("proto-max-bulk-len",g_pserver->proto_max_bulk_len); + config_get_numerical_field("client-query-buffer-limit",cserver.client_max_querybuf_len); + config_get_numerical_field("maxmemory-samples",g_pserver->maxmemory_samples); + config_get_numerical_field("lfu-log-factor",g_pserver->lfu_log_factor); + config_get_numerical_field("lfu-decay-time",g_pserver->lfu_decay_time); + config_get_numerical_field("timeout",cserver.maxidletime); + config_get_numerical_field("active-defrag-threshold-lower",cserver.active_defrag_threshold_lower); + config_get_numerical_field("active-defrag-threshold-upper",cserver.active_defrag_threshold_upper); + config_get_numerical_field("active-defrag-ignore-bytes",cserver.active_defrag_ignore_bytes); + config_get_numerical_field("active-defrag-cycle-min",cserver.active_defrag_cycle_min); + config_get_numerical_field("active-defrag-cycle-max",cserver.active_defrag_cycle_max); + config_get_numerical_field("active-defrag-max-scan-fields",cserver.active_defrag_max_scan_fields); config_get_numerical_field("auto-aof-rewrite-percentage", - server.aof_rewrite_perc); + g_pserver->aof_rewrite_perc); config_get_numerical_field("auto-aof-rewrite-min-size", - server.aof_rewrite_min_size); + g_pserver->aof_rewrite_min_size); config_get_numerical_field("hash-max-ziplist-entries", - server.hash_max_ziplist_entries); + g_pserver->hash_max_ziplist_entries); config_get_numerical_field("hash-max-ziplist-value", - server.hash_max_ziplist_value); + g_pserver->hash_max_ziplist_value); config_get_numerical_field("stream-node-max-bytes", - server.stream_node_max_bytes); + g_pserver->stream_node_max_bytes); config_get_numerical_field("stream-node-max-entries", - server.stream_node_max_entries); + g_pserver->stream_node_max_entries); config_get_numerical_field("list-max-ziplist-size", - server.list_max_ziplist_size); + g_pserver->list_max_ziplist_size); config_get_numerical_field("list-compress-depth", - server.list_compress_depth); + g_pserver->list_compress_depth); config_get_numerical_field("set-max-intset-entries", - server.set_max_intset_entries); + g_pserver->set_max_intset_entries); config_get_numerical_field("zset-max-ziplist-entries", - server.zset_max_ziplist_entries); + g_pserver->zset_max_ziplist_entries); config_get_numerical_field("zset-max-ziplist-value", - server.zset_max_ziplist_value); + g_pserver->zset_max_ziplist_value); config_get_numerical_field("hll-sparse-max-bytes", - server.hll_sparse_max_bytes); - config_get_numerical_field("lua-time-limit",server.lua_time_limit); + g_pserver->hll_sparse_max_bytes); + config_get_numerical_field("lua-time-limit",g_pserver->lua_time_limit); config_get_numerical_field("slowlog-log-slower-than", - server.slowlog_log_slower_than); + g_pserver->slowlog_log_slower_than); config_get_numerical_field("latency-monitor-threshold", - server.latency_monitor_threshold); + g_pserver->latency_monitor_threshold); config_get_numerical_field("slowlog-max-len", - server.slowlog_max_len); - config_get_numerical_field("port",server.port); - config_get_numerical_field("cluster-announce-port",server.cluster_announce_port); - config_get_numerical_field("cluster-announce-bus-port",server.cluster_announce_bus_port); - config_get_numerical_field("tcp-backlog",server.tcp_backlog); - config_get_numerical_field("databases",server.dbnum); - config_get_numerical_field("repl-ping-slave-period",server.repl_ping_slave_period); - config_get_numerical_field("repl-ping-replica-period",server.repl_ping_slave_period); - config_get_numerical_field("repl-timeout",server.repl_timeout); - config_get_numerical_field("repl-backlog-size",server.repl_backlog_size); - config_get_numerical_field("repl-backlog-ttl",server.repl_backlog_time_limit); - config_get_numerical_field("maxclients",server.maxclients); - config_get_numerical_field("watchdog-period",server.watchdog_period); - config_get_numerical_field("slave-priority",server.slave_priority); - config_get_numerical_field("replica-priority",server.slave_priority); - config_get_numerical_field("slave-announce-port",server.slave_announce_port); - config_get_numerical_field("replica-announce-port",server.slave_announce_port); - config_get_numerical_field("min-slaves-to-write",server.repl_min_slaves_to_write); - config_get_numerical_field("min-replicas-to-write",server.repl_min_slaves_to_write); - config_get_numerical_field("min-slaves-max-lag",server.repl_min_slaves_max_lag); - config_get_numerical_field("min-replicas-max-lag",server.repl_min_slaves_max_lag); - config_get_numerical_field("hz",server.config_hz); - config_get_numerical_field("cluster-node-timeout",server.cluster_node_timeout); - config_get_numerical_field("cluster-migration-barrier",server.cluster_migration_barrier); - config_get_numerical_field("cluster-slave-validity-factor",server.cluster_slave_validity_factor); - config_get_numerical_field("cluster-replica-validity-factor",server.cluster_slave_validity_factor); - config_get_numerical_field("repl-diskless-sync-delay",server.repl_diskless_sync_delay); - config_get_numerical_field("tcp-keepalive",server.tcpkeepalive); + g_pserver->slowlog_max_len); + config_get_numerical_field("port",g_pserver->port); + config_get_numerical_field("cluster-announce-port",g_pserver->cluster_announce_port); + config_get_numerical_field("cluster-announce-bus-port",g_pserver->cluster_announce_bus_port); + config_get_numerical_field("tcp-backlog",g_pserver->tcp_backlog); + config_get_numerical_field("databases",cserver.dbnum); + config_get_numerical_field("repl-ping-slave-period",g_pserver->repl_ping_slave_period); + config_get_numerical_field("repl-ping-replica-period",g_pserver->repl_ping_slave_period); + config_get_numerical_field("repl-timeout",g_pserver->repl_timeout); + config_get_numerical_field("repl-backlog-size",g_pserver->repl_backlog_size); + config_get_numerical_field("repl-backlog-ttl",g_pserver->repl_backlog_time_limit); + config_get_numerical_field("maxclients",g_pserver->maxclients); + config_get_numerical_field("watchdog-period",g_pserver->watchdog_period); + config_get_numerical_field("slave-priority",g_pserver->slave_priority); + config_get_numerical_field("replica-priority",g_pserver->slave_priority); + config_get_numerical_field("slave-announce-port",g_pserver->slave_announce_port); + config_get_numerical_field("replica-announce-port",g_pserver->slave_announce_port); + config_get_numerical_field("min-slaves-to-write",g_pserver->repl_min_slaves_to_write); + config_get_numerical_field("min-replicas-to-write",g_pserver->repl_min_slaves_to_write); + config_get_numerical_field("min-slaves-max-lag",g_pserver->repl_min_slaves_max_lag); + config_get_numerical_field("min-replicas-max-lag",g_pserver->repl_min_slaves_max_lag); + config_get_numerical_field("hz",g_pserver->config_hz); + config_get_numerical_field("cluster-node-timeout",g_pserver->cluster_node_timeout); + config_get_numerical_field("cluster-migration-barrier",g_pserver->cluster_migration_barrier); + config_get_numerical_field("cluster-slave-validity-factor",g_pserver->cluster_slave_validity_factor); + config_get_numerical_field("cluster-replica-validity-factor",g_pserver->cluster_slave_validity_factor); + config_get_numerical_field("repl-diskless-sync-delay",g_pserver->repl_diskless_sync_delay); + config_get_numerical_field("tcp-keepalive",cserver.tcpkeepalive); /* Bool (yes/no) values */ config_get_bool_field("cluster-require-full-coverage", - server.cluster_require_full_coverage); + g_pserver->cluster_require_full_coverage); config_get_bool_field("cluster-slave-no-failover", - server.cluster_slave_no_failover); + g_pserver->cluster_slave_no_failover); config_get_bool_field("cluster-replica-no-failover", - server.cluster_slave_no_failover); + g_pserver->cluster_slave_no_failover); config_get_bool_field("no-appendfsync-on-rewrite", - server.aof_no_fsync_on_rewrite); + g_pserver->aof_no_fsync_on_rewrite); config_get_bool_field("slave-serve-stale-data", - server.repl_serve_stale_data); + g_pserver->repl_serve_stale_data); config_get_bool_field("replica-serve-stale-data", - server.repl_serve_stale_data); + g_pserver->repl_serve_stale_data); config_get_bool_field("slave-read-only", - server.repl_slave_ro); + g_pserver->repl_slave_ro); config_get_bool_field("replica-read-only", - server.repl_slave_ro); + g_pserver->repl_slave_ro); config_get_bool_field("slave-ignore-maxmemory", - server.repl_slave_ignore_maxmemory); + g_pserver->repl_slave_ignore_maxmemory); config_get_bool_field("replica-ignore-maxmemory", - server.repl_slave_ignore_maxmemory); + g_pserver->repl_slave_ignore_maxmemory); config_get_bool_field("stop-writes-on-bgsave-error", - server.stop_writes_on_bgsave_err); - config_get_bool_field("daemonize", server.daemonize); - config_get_bool_field("rdbcompression", server.rdb_compression); - config_get_bool_field("rdbchecksum", server.rdb_checksum); - config_get_bool_field("activerehashing", server.activerehashing); - config_get_bool_field("activedefrag", server.active_defrag_enabled); - config_get_bool_field("protected-mode", server.protected_mode); + g_pserver->stop_writes_on_bgsave_err); + config_get_bool_field("daemonize", cserver.daemonize); + config_get_bool_field("rdbcompression", g_pserver->rdb_compression); + config_get_bool_field("rdbchecksum", g_pserver->rdb_checksum); + config_get_bool_field("activerehashing", g_pserver->activerehashing); + config_get_bool_field("activedefrag", cserver.active_defrag_enabled); + config_get_bool_field("protected-mode", g_pserver->protected_mode); config_get_bool_field("repl-disable-tcp-nodelay", - server.repl_disable_tcp_nodelay); + g_pserver->repl_disable_tcp_nodelay); config_get_bool_field("repl-diskless-sync", - server.repl_diskless_sync); + g_pserver->repl_diskless_sync); config_get_bool_field("aof-rewrite-incremental-fsync", - server.aof_rewrite_incremental_fsync); + g_pserver->aof_rewrite_incremental_fsync); config_get_bool_field("rdb-save-incremental-fsync", - server.rdb_save_incremental_fsync); + g_pserver->rdb_save_incremental_fsync); config_get_bool_field("aof-load-truncated", - server.aof_load_truncated); + g_pserver->aof_load_truncated); config_get_bool_field("aof-use-rdb-preamble", - server.aof_use_rdb_preamble); + g_pserver->aof_use_rdb_preamble); config_get_bool_field("lazyfree-lazy-eviction", - server.lazyfree_lazy_eviction); + g_pserver->lazyfree_lazy_eviction); config_get_bool_field("lazyfree-lazy-expire", - server.lazyfree_lazy_expire); + g_pserver->lazyfree_lazy_expire); config_get_bool_field("lazyfree-lazy-server-del", - server.lazyfree_lazy_server_del); + g_pserver->lazyfree_lazy_server_del); config_get_bool_field("slave-lazy-flush", - server.repl_slave_lazy_flush); + g_pserver->repl_slave_lazy_flush); config_get_bool_field("replica-lazy-flush", - server.repl_slave_lazy_flush); + g_pserver->repl_slave_lazy_flush); config_get_bool_field("dynamic-hz", - server.dynamic_hz); + g_pserver->dynamic_hz); /* Enum values */ config_get_enum_field("maxmemory-policy", - server.maxmemory_policy,maxmemory_policy_enum); + g_pserver->maxmemory_policy,maxmemory_policy_enum); config_get_enum_field("loglevel", - server.verbosity,loglevel_enum); + cserver.verbosity,loglevel_enum); config_get_enum_field("supervised", - server.supervised_mode,supervised_mode_enum); + cserver.supervised_mode,supervised_mode_enum); config_get_enum_field("appendfsync", - server.aof_fsync,aof_fsync_enum); + g_pserver->aof_fsync,aof_fsync_enum); config_get_enum_field("syslog-facility", - server.syslog_facility,syslog_facility_enum); + g_pserver->syslog_facility,syslog_facility_enum); /* Everything we can't handle with macros follows. */ if (stringmatch(pattern,"appendonly",1)) { addReplyBulkCString(c,"appendonly"); - addReplyBulkCString(c,server.aof_state == AOF_OFF ? "no" : "yes"); + addReplyBulkCString(c,g_pserver->aof_state == AOF_OFF ? "no" : "yes"); matches++; } if (stringmatch(pattern,"dir",1)) { @@ -1574,11 +1580,11 @@ void configGetCommand(client *c) { sds buf = sdsempty(); int j; - for (j = 0; j < server.saveparamslen; j++) { + for (j = 0; j < g_pserver->saveparamslen; j++) { buf = sdscatprintf(buf,"%jd %d", - (intmax_t)server.saveparams[j].seconds, - server.saveparams[j].changes); - if (j != server.saveparamslen-1) + (intmax_t)g_pserver->saveparams[j].seconds, + g_pserver->saveparams[j].changes); + if (j != g_pserver->saveparamslen-1) buf = sdscatlen(buf," ",1); } addReplyBulkCString(c,"save"); @@ -1593,9 +1599,9 @@ void configGetCommand(client *c) { for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) { buf = sdscatprintf(buf,"%s %llu %llu %ld", getClientTypeName(j), - server.client_obuf_limits[j].hard_limit_bytes, - server.client_obuf_limits[j].soft_limit_bytes, - (long) server.client_obuf_limits[j].soft_limit_seconds); + cserver.client_obuf_limits[j].hard_limit_bytes, + cserver.client_obuf_limits[j].soft_limit_bytes, + (long) cserver.client_obuf_limits[j].soft_limit_seconds); if (j != CLIENT_TYPE_OBUF_COUNT-1) buf = sdscatlen(buf," ",1); } @@ -1606,7 +1612,7 @@ void configGetCommand(client *c) { } if (stringmatch(pattern,"unixsocketperm",1)) { char buf[32]; - snprintf(buf,sizeof(buf),"%o",server.unixsocketperm); + snprintf(buf,sizeof(buf),"%o",g_pserver->unixsocketperm); addReplyBulkCString(c,"unixsocketperm"); addReplyBulkCString(c,buf); matches++; @@ -1618,7 +1624,7 @@ void configGetCommand(client *c) { "slaveof" : "replicaof"; char buf[256]; addReplyBulkCString(c,optname); - if (listLength(server.masters) == 0) + if (listLength(g_pserver->masters) == 0) { buf[0] = '\0'; addReplyBulkCString(c,buf); @@ -1627,7 +1633,7 @@ void configGetCommand(client *c) { { listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { struct redisMaster *mi = (struct redisMaster*)listNodeValue(ln); @@ -1640,7 +1646,7 @@ void configGetCommand(client *c) { } if (stringmatch(pattern,"notify-keyspace-events",1)) { robj *flagsobj = createObject(OBJ_STRING, - keyspaceEventsFlagsToString(server.notify_keyspace_events)); + keyspaceEventsFlagsToString(g_pserver->notify_keyspace_events)); addReplyBulkCString(c,"notify-keyspace-events"); addReplyBulk(c,flagsobj); @@ -1648,7 +1654,7 @@ void configGetCommand(client *c) { matches++; } if (stringmatch(pattern,"bind",1)) { - sds aux = sdsjoin(server.bindaddr,server.bindaddr_count," "); + sds aux = sdsjoin(g_pserver->bindaddr,g_pserver->bindaddr_count," "); addReplyBulkCString(c,"bind"); addReplyBulkCString(c,aux); @@ -1956,7 +1962,7 @@ void rewriteConfigEnumOption(struct rewriteConfigState *state, const char *optio /* Rewrite the syslog-facility option. */ void rewriteConfigSyslogfacilityOption(struct rewriteConfigState *state) { - int value = server.syslog_facility; + int value = g_pserver->syslog_facility; int force = value != LOG_LOCAL0; const char *name = NULL, *option = "syslog-facility"; sds line; @@ -1974,12 +1980,12 @@ void rewriteConfigSaveOption(struct rewriteConfigState *state) { /* Note that if there are no save parameters at all, all the current * config line with "save" will be detected as orphaned and deleted, * resulting into no RDB persistence as expected. */ - for (j = 0; j < server.saveparamslen; j++) { + for (j = 0; j < g_pserver->saveparamslen; j++) { line = sdscatprintf(sdsempty(),"save %ld %d", - (long) server.saveparams[j].seconds, server.saveparams[j].changes); + (long) g_pserver->saveparams[j].seconds, g_pserver->saveparams[j].changes); rewriteConfigRewriteLine(state,"save",line,1); } - /* Mark "save" as processed in case server.saveparamslen is zero. */ + /* Mark "save" as processed in case g_pserver->saveparamslen is zero. */ rewriteConfigMarkAsProcessed(state,"save"); } @@ -1988,7 +1994,7 @@ void rewriteConfigUserOption(struct rewriteConfigState *state) { /* If there is a user file defined we just mark this configuration * directive as processed, so that all the lines containing users * inside the config file gets discarded. */ - if (server.acl_filename[0] != '\0') { + if (g_pserver->acl_filename[0] != '\0') { rewriteConfigMarkAsProcessed(state,"user"); return; } @@ -2031,14 +2037,14 @@ void rewriteConfigSlaveofOption(struct rewriteConfigState *state, const char *op /* If this is a master, we want all the slaveof config options * in the file to be removed. Note that if this is a cluster instance * we don't want a slaveof directive inside redis.conf. */ - if (server.cluster_enabled || listLength(server.masters) == 0) { + if (g_pserver->cluster_enabled || listLength(g_pserver->masters) == 0) { rewriteConfigMarkAsProcessed(state,option); return; } listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { struct redisMaster *mi = (struct redisMaster*)listNodeValue(ln); @@ -2052,11 +2058,11 @@ void rewriteConfigSlaveofOption(struct rewriteConfigState *state, const char *op /* Rewrite the notify-keyspace-events option. */ void rewriteConfigNotifykeyspaceeventsOption(struct rewriteConfigState *state) { - int force = server.notify_keyspace_events != 0; + int force = g_pserver->notify_keyspace_events != 0; const char *option = "notify-keyspace-events"; sds line, flags; - flags = keyspaceEventsFlagsToString(server.notify_keyspace_events); + flags = keyspaceEventsFlagsToString(g_pserver->notify_keyspace_events); line = sdsnew(option); line = sdscatlen(line, " ", 1); line = sdscatrepr(line, flags, sdslen(flags)); @@ -2070,25 +2076,25 @@ void rewriteConfigClientoutputbufferlimitOption(struct rewriteConfigState *state const char *option = "client-output-buffer-limit"; for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) { - int force = (server.client_obuf_limits[j].hard_limit_bytes != + int force = (cserver.client_obuf_limits[j].hard_limit_bytes != clientBufferLimitsDefaults[j].hard_limit_bytes) || - (server.client_obuf_limits[j].soft_limit_bytes != + (cserver.client_obuf_limits[j].soft_limit_bytes != clientBufferLimitsDefaults[j].soft_limit_bytes) || - (server.client_obuf_limits[j].soft_limit_seconds != + (cserver.client_obuf_limits[j].soft_limit_seconds != clientBufferLimitsDefaults[j].soft_limit_seconds); sds line; char hard[64], soft[64]; rewriteConfigFormatMemory(hard,sizeof(hard), - server.client_obuf_limits[j].hard_limit_bytes); + cserver.client_obuf_limits[j].hard_limit_bytes); rewriteConfigFormatMemory(soft,sizeof(soft), - server.client_obuf_limits[j].soft_limit_bytes); + cserver.client_obuf_limits[j].soft_limit_bytes); const char *tname = getClientTypeName(j); if (!strcmp(tname,"slave")) tname = "replica"; line = sdscatprintf(sdsempty(),"%s %s %s %s %ld", option, tname, hard, soft, - (long) server.client_obuf_limits[j].soft_limit_seconds); + (long) cserver.client_obuf_limits[j].soft_limit_seconds); rewriteConfigRewriteLine(state,option,line,force); } } @@ -2100,13 +2106,13 @@ void rewriteConfigBindOption(struct rewriteConfigState *state) { const char *option = "bind"; /* Nothing to rewrite if we don't have bind addresses. */ - if (server.bindaddr_count == 0) { + if (g_pserver->bindaddr_count == 0) { rewriteConfigMarkAsProcessed(state,option); return; } /* Rewrite as bind ... */ - addresses = sdsjoin(server.bindaddr,server.bindaddr_count," "); + addresses = sdsjoin(g_pserver->bindaddr,g_pserver->bindaddr_count," "); line = sdsnew(option); line = sdscatlen(line, " ", 1); line = sdscatsds(line, addresses); @@ -2274,113 +2280,114 @@ int rewriteConfig(char *path) { /* Step 2: rewrite every single option, replacing or appending it inside * the rewrite state. */ - rewriteConfigYesNoOption(state,"daemonize",server.daemonize,0); - rewriteConfigStringOption(state,"pidfile",server.pidfile,CONFIG_DEFAULT_PID_FILE); - rewriteConfigNumericalOption(state,"port",server.port,CONFIG_DEFAULT_SERVER_PORT); - rewriteConfigNumericalOption(state,"cluster-announce-port",server.cluster_announce_port,CONFIG_DEFAULT_CLUSTER_ANNOUNCE_PORT); - rewriteConfigNumericalOption(state,"cluster-announce-bus-port",server.cluster_announce_bus_port,CONFIG_DEFAULT_CLUSTER_ANNOUNCE_BUS_PORT); - rewriteConfigNumericalOption(state,"tcp-backlog",server.tcp_backlog,CONFIG_DEFAULT_TCP_BACKLOG); + rewriteConfigYesNoOption(state,"daemonize",cserver.daemonize,0); + rewriteConfigStringOption(state,"pidfile",cserver.pidfile,CONFIG_DEFAULT_PID_FILE); + rewriteConfigNumericalOption(state,"port",g_pserver->port,CONFIG_DEFAULT_SERVER_PORT); + rewriteConfigNumericalOption(state,"cluster-announce-port",g_pserver->cluster_announce_port,CONFIG_DEFAULT_CLUSTER_ANNOUNCE_PORT); + rewriteConfigNumericalOption(state,"cluster-announce-bus-port",g_pserver->cluster_announce_bus_port,CONFIG_DEFAULT_CLUSTER_ANNOUNCE_BUS_PORT); + rewriteConfigNumericalOption(state,"tcp-backlog",g_pserver->tcp_backlog,CONFIG_DEFAULT_TCP_BACKLOG); rewriteConfigBindOption(state); - rewriteConfigStringOption(state,"unixsocket",server.unixsocket,NULL); - rewriteConfigOctalOption(state,"unixsocketperm",server.unixsocketperm,CONFIG_DEFAULT_UNIX_SOCKET_PERM); - rewriteConfigNumericalOption(state,"timeout",server.maxidletime,CONFIG_DEFAULT_CLIENT_TIMEOUT); - rewriteConfigNumericalOption(state,"tcp-keepalive",server.tcpkeepalive,CONFIG_DEFAULT_TCP_KEEPALIVE); - rewriteConfigNumericalOption(state,"replica-announce-port",server.slave_announce_port,CONFIG_DEFAULT_SLAVE_ANNOUNCE_PORT); - rewriteConfigEnumOption(state,"loglevel",server.verbosity,loglevel_enum,CONFIG_DEFAULT_VERBOSITY); - rewriteConfigStringOption(state,"logfile",server.logfile,CONFIG_DEFAULT_LOGFILE); - rewriteConfigStringOption(state,"aclfile",server.acl_filename,CONFIG_DEFAULT_ACL_FILENAME); - rewriteConfigYesNoOption(state,"syslog-enabled",server.syslog_enabled,CONFIG_DEFAULT_SYSLOG_ENABLED); - rewriteConfigStringOption(state,"syslog-ident",server.syslog_ident,CONFIG_DEFAULT_SYSLOG_IDENT); + rewriteConfigStringOption(state,"unixsocket",g_pserver->unixsocket,NULL); + rewriteConfigOctalOption(state,"unixsocketperm",g_pserver->unixsocketperm,CONFIG_DEFAULT_UNIX_SOCKET_PERM); + rewriteConfigNumericalOption(state,"timeout",cserver.maxidletime,CONFIG_DEFAULT_CLIENT_TIMEOUT); + rewriteConfigNumericalOption(state,"tcp-keepalive",cserver.tcpkeepalive,CONFIG_DEFAULT_TCP_KEEPALIVE); + rewriteConfigNumericalOption(state,"replica-announce-port",g_pserver->slave_announce_port,CONFIG_DEFAULT_SLAVE_ANNOUNCE_PORT); + rewriteConfigEnumOption(state,"loglevel",cserver.verbosity,loglevel_enum,CONFIG_DEFAULT_VERBOSITY); + rewriteConfigStringOption(state,"logfile",g_pserver->logfile,CONFIG_DEFAULT_LOGFILE); + rewriteConfigStringOption(state,"aclfile",g_pserver->acl_filename,CONFIG_DEFAULT_ACL_FILENAME); + rewriteConfigYesNoOption(state,"syslog-enabled",g_pserver->syslog_enabled,CONFIG_DEFAULT_SYSLOG_ENABLED); + rewriteConfigStringOption(state,"syslog-ident",g_pserver->syslog_ident,CONFIG_DEFAULT_SYSLOG_IDENT); rewriteConfigSyslogfacilityOption(state); rewriteConfigSaveOption(state); rewriteConfigUserOption(state); - rewriteConfigNumericalOption(state,"databases",server.dbnum,CONFIG_DEFAULT_DBNUM); - rewriteConfigYesNoOption(state,"stop-writes-on-bgsave-error",server.stop_writes_on_bgsave_err,CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR); - rewriteConfigYesNoOption(state,"rdbcompression",server.rdb_compression,CONFIG_DEFAULT_RDB_COMPRESSION); - rewriteConfigYesNoOption(state,"rdbchecksum",server.rdb_checksum,CONFIG_DEFAULT_RDB_CHECKSUM); - rewriteConfigStringOption(state,"dbfilename",server.rdb_filename,CONFIG_DEFAULT_RDB_FILENAME); + rewriteConfigNumericalOption(state,"databases",cserver.dbnum,CONFIG_DEFAULT_DBNUM); + rewriteConfigYesNoOption(state,"stop-writes-on-bgsave-error",g_pserver->stop_writes_on_bgsave_err,CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR); + rewriteConfigYesNoOption(state,"rdbcompression",g_pserver->rdb_compression,CONFIG_DEFAULT_RDB_COMPRESSION); + rewriteConfigYesNoOption(state,"rdbchecksum",g_pserver->rdb_checksum,CONFIG_DEFAULT_RDB_CHECKSUM); + rewriteConfigStringOption(state,"dbfilename",g_pserver->rdb_filename,CONFIG_DEFAULT_RDB_FILENAME); rewriteConfigDirOption(state); rewriteConfigSlaveofOption(state,"replicaof"); - rewriteConfigStringOption(state,"replica-announce-ip",server.slave_announce_ip,CONFIG_DEFAULT_SLAVE_ANNOUNCE_IP); - rewriteConfigStringOption(state,"masteruser",server.default_masteruser,NULL); - rewriteConfigStringOption(state,"masterauth",server.default_masterauth,NULL); - rewriteConfigStringOption(state,"cluster-announce-ip",server.cluster_announce_ip,NULL); - rewriteConfigYesNoOption(state,"replica-serve-stale-data",server.repl_serve_stale_data,CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA); - rewriteConfigYesNoOption(state,"replica-read-only",server.repl_slave_ro,CONFIG_DEFAULT_SLAVE_READ_ONLY); - rewriteConfigYesNoOption(state,"replica-ignore-maxmemory",server.repl_slave_ignore_maxmemory,CONFIG_DEFAULT_SLAVE_IGNORE_MAXMEMORY); - rewriteConfigNumericalOption(state,"repl-ping-replica-period",server.repl_ping_slave_period,CONFIG_DEFAULT_REPL_PING_SLAVE_PERIOD); - rewriteConfigNumericalOption(state,"repl-timeout",server.repl_timeout,CONFIG_DEFAULT_REPL_TIMEOUT); - rewriteConfigBytesOption(state,"repl-backlog-size",server.repl_backlog_size,CONFIG_DEFAULT_REPL_BACKLOG_SIZE); - rewriteConfigBytesOption(state,"repl-backlog-ttl",server.repl_backlog_time_limit,CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT); - rewriteConfigYesNoOption(state,"repl-disable-tcp-nodelay",server.repl_disable_tcp_nodelay,CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY); - rewriteConfigYesNoOption(state,"repl-diskless-sync",server.repl_diskless_sync,CONFIG_DEFAULT_REPL_DISKLESS_SYNC); - rewriteConfigNumericalOption(state,"repl-diskless-sync-delay",server.repl_diskless_sync_delay,CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY); - rewriteConfigNumericalOption(state,"replica-priority",server.slave_priority,CONFIG_DEFAULT_SLAVE_PRIORITY); - rewriteConfigNumericalOption(state,"min-replicas-to-write",server.repl_min_slaves_to_write,CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE); - rewriteConfigNumericalOption(state,"min-replicas-max-lag",server.repl_min_slaves_max_lag,CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG); + rewriteConfigStringOption(state,"replica-announce-ip",g_pserver->slave_announce_ip,CONFIG_DEFAULT_SLAVE_ANNOUNCE_IP); + rewriteConfigStringOption(state,"masteruser",cserver.default_masteruser,NULL); + rewriteConfigStringOption(state,"masterauth",cserver.default_masterauth,NULL); + rewriteConfigStringOption(state,"cluster-announce-ip",g_pserver->cluster_announce_ip,NULL); + rewriteConfigYesNoOption(state,"replica-serve-stale-data",g_pserver->repl_serve_stale_data,CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA); + rewriteConfigYesNoOption(state,"replica-read-only",g_pserver->repl_slave_ro,CONFIG_DEFAULT_SLAVE_READ_ONLY); + rewriteConfigYesNoOption(state,"replica-ignore-maxmemory",g_pserver->repl_slave_ignore_maxmemory,CONFIG_DEFAULT_SLAVE_IGNORE_MAXMEMORY); + rewriteConfigNumericalOption(state,"repl-ping-replica-period",g_pserver->repl_ping_slave_period,CONFIG_DEFAULT_REPL_PING_SLAVE_PERIOD); + rewriteConfigNumericalOption(state,"repl-timeout",g_pserver->repl_timeout,CONFIG_DEFAULT_REPL_TIMEOUT); + rewriteConfigBytesOption(state,"repl-backlog-size",g_pserver->repl_backlog_size,CONFIG_DEFAULT_REPL_BACKLOG_SIZE); + rewriteConfigBytesOption(state,"repl-backlog-ttl",g_pserver->repl_backlog_time_limit,CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT); + rewriteConfigYesNoOption(state,"repl-disable-tcp-nodelay",g_pserver->repl_disable_tcp_nodelay,CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY); + rewriteConfigYesNoOption(state,"repl-diskless-sync",g_pserver->repl_diskless_sync,CONFIG_DEFAULT_REPL_DISKLESS_SYNC); + rewriteConfigNumericalOption(state,"repl-diskless-sync-delay",g_pserver->repl_diskless_sync_delay,CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY); + rewriteConfigNumericalOption(state,"replica-priority",g_pserver->slave_priority,CONFIG_DEFAULT_SLAVE_PRIORITY); + rewriteConfigNumericalOption(state,"min-replicas-to-write",g_pserver->repl_min_slaves_to_write,CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE); + rewriteConfigNumericalOption(state,"min-replicas-max-lag",g_pserver->repl_min_slaves_max_lag,CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG); rewriteConfigRequirepassOption(state,"requirepass"); - rewriteConfigNumericalOption(state,"maxclients",server.maxclients,CONFIG_DEFAULT_MAX_CLIENTS); - rewriteConfigBytesOption(state,"maxmemory",server.maxmemory,CONFIG_DEFAULT_MAXMEMORY); - rewriteConfigBytesOption(state,"proto-max-bulk-len",server.proto_max_bulk_len,CONFIG_DEFAULT_PROTO_MAX_BULK_LEN); - rewriteConfigBytesOption(state,"client-query-buffer-limit",server.client_max_querybuf_len,PROTO_MAX_QUERYBUF_LEN); - rewriteConfigEnumOption(state,"maxmemory-policy",server.maxmemory_policy,maxmemory_policy_enum,CONFIG_DEFAULT_MAXMEMORY_POLICY); - rewriteConfigNumericalOption(state,"maxmemory-samples",server.maxmemory_samples,CONFIG_DEFAULT_MAXMEMORY_SAMPLES); - rewriteConfigNumericalOption(state,"lfu-log-factor",server.lfu_log_factor,CONFIG_DEFAULT_LFU_LOG_FACTOR); - rewriteConfigNumericalOption(state,"lfu-decay-time",server.lfu_decay_time,CONFIG_DEFAULT_LFU_DECAY_TIME); - rewriteConfigNumericalOption(state,"active-defrag-threshold-lower",server.active_defrag_threshold_lower,CONFIG_DEFAULT_DEFRAG_THRESHOLD_LOWER); - rewriteConfigNumericalOption(state,"active-defrag-threshold-upper",server.active_defrag_threshold_upper,CONFIG_DEFAULT_DEFRAG_THRESHOLD_UPPER); - rewriteConfigBytesOption(state,"active-defrag-ignore-bytes",server.active_defrag_ignore_bytes,CONFIG_DEFAULT_DEFRAG_IGNORE_BYTES); - rewriteConfigNumericalOption(state,"active-defrag-cycle-min",server.active_defrag_cycle_min,CONFIG_DEFAULT_DEFRAG_CYCLE_MIN); - rewriteConfigNumericalOption(state,"active-defrag-cycle-max",server.active_defrag_cycle_max,CONFIG_DEFAULT_DEFRAG_CYCLE_MAX); - rewriteConfigNumericalOption(state,"active-defrag-max-scan-fields",server.active_defrag_max_scan_fields,CONFIG_DEFAULT_DEFRAG_MAX_SCAN_FIELDS); - rewriteConfigYesNoOption(state,"appendonly",server.aof_state != AOF_OFF,0); - rewriteConfigStringOption(state,"appendfilename",server.aof_filename,CONFIG_DEFAULT_AOF_FILENAME); - rewriteConfigEnumOption(state,"appendfsync",server.aof_fsync,aof_fsync_enum,CONFIG_DEFAULT_AOF_FSYNC); - rewriteConfigYesNoOption(state,"no-appendfsync-on-rewrite",server.aof_no_fsync_on_rewrite,CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE); - rewriteConfigNumericalOption(state,"auto-aof-rewrite-percentage",server.aof_rewrite_perc,AOF_REWRITE_PERC); - rewriteConfigBytesOption(state,"auto-aof-rewrite-min-size",server.aof_rewrite_min_size,AOF_REWRITE_MIN_SIZE); - rewriteConfigNumericalOption(state,"lua-time-limit",server.lua_time_limit,LUA_SCRIPT_TIME_LIMIT); - rewriteConfigYesNoOption(state,"cluster-enabled",server.cluster_enabled,0); - rewriteConfigStringOption(state,"cluster-config-file",server.cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); - rewriteConfigYesNoOption(state,"cluster-require-full-coverage",server.cluster_require_full_coverage,CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE); - rewriteConfigYesNoOption(state,"cluster-replica-no-failover",server.cluster_slave_no_failover,CLUSTER_DEFAULT_SLAVE_NO_FAILOVER); - rewriteConfigNumericalOption(state,"cluster-node-timeout",server.cluster_node_timeout,CLUSTER_DEFAULT_NODE_TIMEOUT); - rewriteConfigNumericalOption(state,"cluster-migration-barrier",server.cluster_migration_barrier,CLUSTER_DEFAULT_MIGRATION_BARRIER); - rewriteConfigNumericalOption(state,"cluster-replica-validity-factor",server.cluster_slave_validity_factor,CLUSTER_DEFAULT_SLAVE_VALIDITY); - rewriteConfigNumericalOption(state,"slowlog-log-slower-than",server.slowlog_log_slower_than,CONFIG_DEFAULT_SLOWLOG_LOG_SLOWER_THAN); - rewriteConfigNumericalOption(state,"latency-monitor-threshold",server.latency_monitor_threshold,CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD); - rewriteConfigNumericalOption(state,"slowlog-max-len",server.slowlog_max_len,CONFIG_DEFAULT_SLOWLOG_MAX_LEN); + rewriteConfigNumericalOption(state,"maxclients",g_pserver->maxclients,CONFIG_DEFAULT_MAX_CLIENTS); + rewriteConfigBytesOption(state,"maxmemory",g_pserver->maxmemory,CONFIG_DEFAULT_MAXMEMORY); + rewriteConfigBytesOption(state,"proto-max-bulk-len",g_pserver->proto_max_bulk_len,CONFIG_DEFAULT_PROTO_MAX_BULK_LEN); + rewriteConfigBytesOption(state,"client-query-buffer-limit",cserver.client_max_querybuf_len,PROTO_MAX_QUERYBUF_LEN); + rewriteConfigEnumOption(state,"maxmemory-policy",g_pserver->maxmemory_policy,maxmemory_policy_enum,CONFIG_DEFAULT_MAXMEMORY_POLICY); + rewriteConfigNumericalOption(state,"maxmemory-samples",g_pserver->maxmemory_samples,CONFIG_DEFAULT_MAXMEMORY_SAMPLES); + rewriteConfigNumericalOption(state,"lfu-log-factor",g_pserver->lfu_log_factor,CONFIG_DEFAULT_LFU_LOG_FACTOR); + rewriteConfigNumericalOption(state,"lfu-decay-time",g_pserver->lfu_decay_time,CONFIG_DEFAULT_LFU_DECAY_TIME); + rewriteConfigNumericalOption(state,"active-defrag-threshold-lower",cserver.active_defrag_threshold_lower,CONFIG_DEFAULT_DEFRAG_THRESHOLD_LOWER); + rewriteConfigNumericalOption(state,"active-defrag-threshold-upper",cserver.active_defrag_threshold_upper,CONFIG_DEFAULT_DEFRAG_THRESHOLD_UPPER); + rewriteConfigBytesOption(state,"active-defrag-ignore-bytes",cserver.active_defrag_ignore_bytes,CONFIG_DEFAULT_DEFRAG_IGNORE_BYTES); + rewriteConfigNumericalOption(state,"active-defrag-cycle-min",cserver.active_defrag_cycle_min,CONFIG_DEFAULT_DEFRAG_CYCLE_MIN); + rewriteConfigNumericalOption(state,"active-defrag-cycle-max",cserver.active_defrag_cycle_max,CONFIG_DEFAULT_DEFRAG_CYCLE_MAX); + rewriteConfigNumericalOption(state,"active-defrag-max-scan-fields",cserver.active_defrag_max_scan_fields,CONFIG_DEFAULT_DEFRAG_MAX_SCAN_FIELDS); + rewriteConfigYesNoOption(state,"appendonly",g_pserver->aof_state != AOF_OFF,0); + rewriteConfigStringOption(state,"appendfilename",g_pserver->aof_filename,CONFIG_DEFAULT_AOF_FILENAME); + rewriteConfigEnumOption(state,"appendfsync",g_pserver->aof_fsync,aof_fsync_enum,CONFIG_DEFAULT_AOF_FSYNC); + rewriteConfigYesNoOption(state,"no-appendfsync-on-rewrite",g_pserver->aof_no_fsync_on_rewrite,CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE); + rewriteConfigNumericalOption(state,"auto-aof-rewrite-percentage",g_pserver->aof_rewrite_perc,AOF_REWRITE_PERC); + rewriteConfigBytesOption(state,"auto-aof-rewrite-min-size",g_pserver->aof_rewrite_min_size,AOF_REWRITE_MIN_SIZE); + rewriteConfigNumericalOption(state,"lua-time-limit",g_pserver->lua_time_limit,LUA_SCRIPT_TIME_LIMIT); + rewriteConfigYesNoOption(state,"cluster-enabled",g_pserver->cluster_enabled,0); + rewriteConfigStringOption(state,"cluster-config-file",g_pserver->cluster_configfile,CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); + rewriteConfigYesNoOption(state,"cluster-require-full-coverage",g_pserver->cluster_require_full_coverage,CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE); + rewriteConfigYesNoOption(state,"cluster-replica-no-failover",g_pserver->cluster_slave_no_failover,CLUSTER_DEFAULT_SLAVE_NO_FAILOVER); + rewriteConfigNumericalOption(state,"cluster-node-timeout",g_pserver->cluster_node_timeout,CLUSTER_DEFAULT_NODE_TIMEOUT); + rewriteConfigNumericalOption(state,"cluster-migration-barrier",g_pserver->cluster_migration_barrier,CLUSTER_DEFAULT_MIGRATION_BARRIER); + rewriteConfigNumericalOption(state,"cluster-replica-validity-factor",g_pserver->cluster_slave_validity_factor,CLUSTER_DEFAULT_SLAVE_VALIDITY); + rewriteConfigNumericalOption(state,"slowlog-log-slower-than",g_pserver->slowlog_log_slower_than,CONFIG_DEFAULT_SLOWLOG_LOG_SLOWER_THAN); + rewriteConfigNumericalOption(state,"latency-monitor-threshold",g_pserver->latency_monitor_threshold,CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD); + rewriteConfigNumericalOption(state,"slowlog-max-len",g_pserver->slowlog_max_len,CONFIG_DEFAULT_SLOWLOG_MAX_LEN); rewriteConfigNotifykeyspaceeventsOption(state); - rewriteConfigNumericalOption(state,"hash-max-ziplist-entries",server.hash_max_ziplist_entries,OBJ_HASH_MAX_ZIPLIST_ENTRIES); - rewriteConfigNumericalOption(state,"hash-max-ziplist-value",server.hash_max_ziplist_value,OBJ_HASH_MAX_ZIPLIST_VALUE); - rewriteConfigNumericalOption(state,"stream-node-max-bytes",server.stream_node_max_bytes,OBJ_STREAM_NODE_MAX_BYTES); - rewriteConfigNumericalOption(state,"stream-node-max-entries",server.stream_node_max_entries,OBJ_STREAM_NODE_MAX_ENTRIES); - rewriteConfigNumericalOption(state,"list-max-ziplist-size",server.list_max_ziplist_size,OBJ_LIST_MAX_ZIPLIST_SIZE); - rewriteConfigNumericalOption(state,"list-compress-depth",server.list_compress_depth,OBJ_LIST_COMPRESS_DEPTH); - rewriteConfigNumericalOption(state,"set-max-intset-entries",server.set_max_intset_entries,OBJ_SET_MAX_INTSET_ENTRIES); - rewriteConfigNumericalOption(state,"zset-max-ziplist-entries",server.zset_max_ziplist_entries,OBJ_ZSET_MAX_ZIPLIST_ENTRIES); - rewriteConfigNumericalOption(state,"zset-max-ziplist-value",server.zset_max_ziplist_value,OBJ_ZSET_MAX_ZIPLIST_VALUE); - rewriteConfigNumericalOption(state,"hll-sparse-max-bytes",server.hll_sparse_max_bytes,CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES); - rewriteConfigYesNoOption(state,"activerehashing",server.activerehashing,CONFIG_DEFAULT_ACTIVE_REHASHING); - rewriteConfigYesNoOption(state,"activedefrag",server.active_defrag_enabled,CONFIG_DEFAULT_ACTIVE_DEFRAG); - rewriteConfigYesNoOption(state,"protected-mode",server.protected_mode,CONFIG_DEFAULT_PROTECTED_MODE); + rewriteConfigNumericalOption(state,"hash-max-ziplist-entries",g_pserver->hash_max_ziplist_entries,OBJ_HASH_MAX_ZIPLIST_ENTRIES); + rewriteConfigNumericalOption(state,"hash-max-ziplist-value",g_pserver->hash_max_ziplist_value,OBJ_HASH_MAX_ZIPLIST_VALUE); + rewriteConfigNumericalOption(state,"stream-node-max-bytes",g_pserver->stream_node_max_bytes,OBJ_STREAM_NODE_MAX_BYTES); + rewriteConfigNumericalOption(state,"stream-node-max-entries",g_pserver->stream_node_max_entries,OBJ_STREAM_NODE_MAX_ENTRIES); + rewriteConfigNumericalOption(state,"list-max-ziplist-size",g_pserver->list_max_ziplist_size,OBJ_LIST_MAX_ZIPLIST_SIZE); + rewriteConfigNumericalOption(state,"list-compress-depth",g_pserver->list_compress_depth,OBJ_LIST_COMPRESS_DEPTH); + rewriteConfigNumericalOption(state,"set-max-intset-entries",g_pserver->set_max_intset_entries,OBJ_SET_MAX_INTSET_ENTRIES); + rewriteConfigNumericalOption(state,"zset-max-ziplist-entries",g_pserver->zset_max_ziplist_entries,OBJ_ZSET_MAX_ZIPLIST_ENTRIES); + rewriteConfigNumericalOption(state,"zset-max-ziplist-value",g_pserver->zset_max_ziplist_value,OBJ_ZSET_MAX_ZIPLIST_VALUE); + rewriteConfigNumericalOption(state,"hll-sparse-max-bytes",g_pserver->hll_sparse_max_bytes,CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES); + rewriteConfigYesNoOption(state,"activerehashing",g_pserver->activerehashing,CONFIG_DEFAULT_ACTIVE_REHASHING); + rewriteConfigYesNoOption(state,"activedefrag",cserver.active_defrag_enabled,CONFIG_DEFAULT_ACTIVE_DEFRAG); + rewriteConfigYesNoOption(state,"protected-mode",g_pserver->protected_mode,CONFIG_DEFAULT_PROTECTED_MODE); rewriteConfigClientoutputbufferlimitOption(state); - rewriteConfigNumericalOption(state,"hz",server.config_hz,CONFIG_DEFAULT_HZ); - rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",server.aof_rewrite_incremental_fsync,CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC); - rewriteConfigYesNoOption(state,"rdb-save-incremental-fsync",server.rdb_save_incremental_fsync,CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC); - rewriteConfigYesNoOption(state,"aof-load-truncated",server.aof_load_truncated,CONFIG_DEFAULT_AOF_LOAD_TRUNCATED); - rewriteConfigYesNoOption(state,"aof-use-rdb-preamble",server.aof_use_rdb_preamble,CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE); - rewriteConfigEnumOption(state,"supervised",server.supervised_mode,supervised_mode_enum,SUPERVISED_NONE); - rewriteConfigYesNoOption(state,"lazyfree-lazy-eviction",server.lazyfree_lazy_eviction,CONFIG_DEFAULT_LAZYFREE_LAZY_EVICTION); - rewriteConfigYesNoOption(state,"lazyfree-lazy-expire",server.lazyfree_lazy_expire,CONFIG_DEFAULT_LAZYFREE_LAZY_EXPIRE); - rewriteConfigYesNoOption(state,"lazyfree-lazy-server-del",server.lazyfree_lazy_server_del,CONFIG_DEFAULT_LAZYFREE_LAZY_SERVER_DEL); - rewriteConfigYesNoOption(state,"replica-lazy-flush",server.repl_slave_lazy_flush,CONFIG_DEFAULT_SLAVE_LAZY_FLUSH); - rewriteConfigYesNoOption(state,"dynamic-hz",server.dynamic_hz,CONFIG_DEFAULT_DYNAMIC_HZ); - rewriteConfigYesNoOption(state,"active-replica",server.fActiveReplica,CONFIG_DEFAULT_ACTIVE_REPLICA); - rewriteConfigYesNoOption(state,"multi-master",server.enable_multimaster,CONFIG_DEFAULT_ENABLE_MULTIMASTER); + rewriteConfigNumericalOption(state,"hz",g_pserver->config_hz,CONFIG_DEFAULT_HZ); + rewriteConfigYesNoOption(state,"aof-rewrite-incremental-fsync",g_pserver->aof_rewrite_incremental_fsync,CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC); + rewriteConfigYesNoOption(state,"rdb-save-incremental-fsync",g_pserver->rdb_save_incremental_fsync,CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC); + rewriteConfigYesNoOption(state,"aof-load-truncated",g_pserver->aof_load_truncated,CONFIG_DEFAULT_AOF_LOAD_TRUNCATED); + rewriteConfigYesNoOption(state,"aof-use-rdb-preamble",g_pserver->aof_use_rdb_preamble,CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE); + rewriteConfigEnumOption(state,"supervised",cserver.supervised_mode,supervised_mode_enum,SUPERVISED_NONE); + rewriteConfigYesNoOption(state,"lazyfree-lazy-eviction",g_pserver->lazyfree_lazy_eviction,CONFIG_DEFAULT_LAZYFREE_LAZY_EVICTION); + rewriteConfigYesNoOption(state,"lazyfree-lazy-expire",g_pserver->lazyfree_lazy_expire,CONFIG_DEFAULT_LAZYFREE_LAZY_EXPIRE); + rewriteConfigYesNoOption(state,"lazyfree-lazy-server-del",g_pserver->lazyfree_lazy_server_del,CONFIG_DEFAULT_LAZYFREE_LAZY_SERVER_DEL); + rewriteConfigYesNoOption(state,"replica-lazy-flush",g_pserver->repl_slave_lazy_flush,CONFIG_DEFAULT_SLAVE_LAZY_FLUSH); + rewriteConfigYesNoOption(state,"dynamic-hz",g_pserver->dynamic_hz,CONFIG_DEFAULT_DYNAMIC_HZ); + rewriteConfigYesNoOption(state,"active-replica",g_pserver->fActiveReplica,CONFIG_DEFAULT_ACTIVE_REPLICA); + rewriteConfigYesNoOption(state,"multi-master",g_pserver->enable_multimaster,CONFIG_DEFAULT_ENABLE_MULTIMASTER); + rewriteConfigStringOption(state, "version-override",KEYDB_SET_VERSION,KEYDB_REAL_VERSION); /* Rewrite Sentinel config if in Sentinel mode. */ - if (server.sentinel_mode) rewriteConfigSentinelOption(state); + if (g_pserver->sentinel_mode) rewriteConfigSentinelOption(state); /* Step 3: remove all the orphaned lines in the old file, that is, lines * that were used by a config option and are no longer used, like in case @@ -2390,7 +2397,7 @@ int rewriteConfig(char *path) { /* Step 4: generate a new configuration file from the modified state * and write it into the original file. */ newcontent = rewriteConfigGetContentFromState(state); - retval = rewriteConfigOverwriteFile(server.configfile,newcontent); + retval = rewriteConfigOverwriteFile(cserver.configfile,newcontent); sdsfree(newcontent); rewriteConfigReleaseState(state); @@ -2403,7 +2410,7 @@ int rewriteConfig(char *path) { void configCommand(client *c) { /* Only allow CONFIG GET while loading. */ - if (server.loading && strcasecmp(szFromObj(c->argv[1]),"get")) { + if (g_pserver->loading && strcasecmp(szFromObj(c->argv[1]),"get")) { addReplyError(c,"Only CONFIG GET is allowed during loading"); return; } @@ -2426,11 +2433,11 @@ NULL resetCommandTableStats(); addReply(c,shared.ok); } else if (!strcasecmp(szFromObj(c->argv[1]),"rewrite") && c->argc == 2) { - if (server.configfile == NULL) { + if (cserver.configfile == NULL) { addReplyError(c,"The server is running without a config file"); return; } - if (rewriteConfig(server.configfile) == -1) { + if (rewriteConfig(cserver.configfile) == -1) { serverLog(LL_WARNING,"CONFIG REWRITE failed: %s", strerror(errno)); addReplyErrorFormat(c,"Rewriting config file: %s", strerror(errno)); } else { diff --git a/src/db.cpp b/src/db.cpp index 685d56bc8..3e0be4f88 100644 --- a/src/db.cpp +++ b/src/db.cpp @@ -60,16 +60,20 @@ static robj *lookupKey(redisDb *db, robj *key, int flags) { /* Update the access time for the ageing algorithm. * Don't do it if we have a saving child, as this will trigger * a copy on write madness. */ - if (server.rdb_child_pid == -1 && - server.aof_child_pid == -1 && + if (g_pserver->rdb_child_pid == -1 && + g_pserver->aof_child_pid == -1 && !(flags & LOOKUP_NOTOUCH)) { - if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { + if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) { updateLFU(val); } else { val->lru = LRU_CLOCK(); } } + + if (flags & LOOKUP_UPDATEMVCC) { + val->mvcc_tstamp = getMvccTstamp(); + } return val; } else { return NULL; @@ -106,8 +110,8 @@ robj_roptr lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { /* Key expired. If we are in the context of a master, expireIfNeeded() * returns 0 only when the key does not exist at all, so it's safe * to return NULL ASAP. */ - if (listLength(server.masters) == 0) { - server.stat_keyspace_misses++; + if (listLength(g_pserver->masters) == 0) { + g_pserver->stat_keyspace_misses++; notifyKeyspaceEvent(NOTIFY_KEY_MISS, "keymiss", key, db->id); return NULL; } @@ -124,23 +128,23 @@ robj_roptr lookupKeyReadWithFlags(redisDb *db, robj *key, int flags) { * will say the key as non existing. * * Notably this covers GETs when slaves are used to scale reads. */ - if (server.current_client && - !FActiveMaster(server.current_client) && - server.current_client->cmd && - server.current_client->cmd->flags & CMD_READONLY) + if (serverTL->current_client && + !FActiveMaster(serverTL->current_client) && + serverTL->current_client->cmd && + serverTL->current_client->cmd->flags & CMD_READONLY) { - server.stat_keyspace_misses++; + g_pserver->stat_keyspace_misses++; notifyKeyspaceEvent(NOTIFY_KEY_MISS, "keymiss", key, db->id); return NULL; } } val = lookupKey(db,key,flags); if (val == NULL) { - server.stat_keyspace_misses++; + g_pserver->stat_keyspace_misses++; notifyKeyspaceEvent(NOTIFY_KEY_MISS, "keymiss", key, db->id); } else - server.stat_keyspace_hits++; + g_pserver->stat_keyspace_hits++; return val; } @@ -157,7 +161,7 @@ robj_roptr lookupKeyRead(redisDb *db, robj *key) { * does not exist in the specified DB. */ robj *lookupKeyWrite(redisDb *db, robj *key) { expireIfNeeded(db,key); - return lookupKey(db,key,LOOKUP_NONE); + return lookupKey(db,key,LOOKUP_UPDATEMVCC); } robj_roptr lookupKeyReadOrReply(client *c, robj *key, robj *reply) { @@ -175,16 +179,14 @@ robj *lookupKeyWriteOrReply(client *c, robj *key, robj *reply) { int dbAddCore(redisDb *db, robj *key, robj *val) { sds copy = sdsdup(szFromObj(key)); int retval = dictAdd(db->pdict, copy, val); -#ifdef ENABLE_MVCC val->mvcc_tstamp = key->mvcc_tstamp = getMvccTstamp(); -#endif if (retval == DICT_OK) { if (val->type == OBJ_LIST || val->type == OBJ_ZSET) signalKeyAsReady(db, key); - if (server.cluster_enabled) slotToKeyAdd(key); + if (g_pserver->cluster_enabled) slotToKeyAdd(key); } else { @@ -204,18 +206,24 @@ void dbAdd(redisDb *db, robj *key, robj *val) serverAssertWithInfo(NULL,key,retval == DICT_OK); } -/* Insert a key, handling duplicate keys according to fReplace */ -int dbMerge(redisDb *db, robj *key, robj *val, int fReplace) +void dbOverwriteCore(redisDb *db, dictEntry *de, robj *val, bool fUpdateMvcc) { - if (fReplace) - { - setKey(db, key, val); - return TRUE; + dictEntry auxentry = *de; + robj *old = (robj*)dictGetVal(de); + if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) { + val->lru = old->lru; } - else - { - return (dbAddCore(db, key, val) == DICT_OK); + if (fUpdateMvcc) + val->mvcc_tstamp = getMvccTstamp(); + + dictSetVal(db->pdict, de, val); + + if (g_pserver->lazyfree_lazy_server_del) { + freeObjAsync(old); + dictSetVal(db->pdict, &auxentry, NULL); } + + dictFreeVal(db->pdict, &auxentry); } /* Overwrite an existing key with a new value. Incrementing the reference @@ -227,22 +235,31 @@ void dbOverwrite(redisDb *db, robj *key, robj *val) { dictEntry *de = dictFind(db->pdict,ptrFromObj(key)); serverAssertWithInfo(NULL,key,de != NULL); - dictEntry auxentry = *de; - robj *old = (robj*)dictGetVal(de); - if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { - val->lru = old->lru; - } -#ifdef ENABLE_MVCC - val->mvcc_tstamp = getMvccTstamp(); -#endif - dictSetVal(db->pdict, de, val); + dbOverwriteCore(db, de, val, true); +} - if (server.lazyfree_lazy_server_del) { - freeObjAsync(old); - dictSetVal(db->pdict, &auxentry, NULL); - } +/* Insert a key, handling duplicate keys according to fReplace */ +int dbMerge(redisDb *db, robj *key, robj *val, int fReplace) +{ + if (fReplace) + { + dictEntry *de = dictFind(db->pdict, ptrFromObj(key)); + if (de == nullptr) + return (dbAddCore(db, key, val) == DICT_OK); - dictFreeVal(db->pdict, &auxentry); + robj *old = (robj*)dictGetVal(de); + if (old->mvcc_tstamp <= val->mvcc_tstamp) + { + dbOverwriteCore(db, de, val, false); + return true; + } + + return false; + } + else + { + return (dbAddCore(db, key, val) == DICT_OK); + } } /* High level Set operation. This function can be used in order to set @@ -287,7 +304,7 @@ robj *dbRandomKey(redisDb *db) { key = (sds)dictGetKey(de); keyobj = createStringObject(key,sdslen(key)); if (dictFind(db->expires,key)) { - if (allvolatile && listLength(server.masters) && --maxtries == 0) { + if (allvolatile && listLength(g_pserver->masters) && --maxtries == 0) { /* If the DB is composed only of keys with an expire set, * it could happen that all the keys are already logically * expired in the slave, so the function cannot stop because @@ -313,7 +330,7 @@ int dbSyncDelete(redisDb *db, robj *key) { * the key, because it is shared with the main dictionary. */ if (dictSize(db->expires) > 0) dictDelete(db->expires,ptrFromObj(key)); if (dictDelete(db->pdict,ptrFromObj(key)) == DICT_OK) { - if (server.cluster_enabled) slotToKeyDel(key); + if (g_pserver->cluster_enabled) slotToKeyDel(key); return 1; } else { return 0; @@ -323,7 +340,7 @@ int dbSyncDelete(redisDb *db, robj *key) { /* This is a wrapper whose behavior depends on the Redis lazy free * configuration. Deletes the key synchronously or asynchronously. */ int dbDelete(redisDb *db, robj *key) { - return server.lazyfree_lazy_server_del ? dbAsyncDelete(db,key) : + return g_pserver->lazyfree_lazy_server_del ? dbAsyncDelete(db,key) : dbSyncDelete(db,key); } @@ -365,7 +382,7 @@ robj *dbUnshareStringValue(redisDb *db, robj *key, robj *o) { return o; } -/* Remove all keys from all the databases in a Redis server. +/* Remove all keys from all the databases in a Redis g_pserver-> * If callback is given the function is called from time to time to * signal that work is in progress. * @@ -383,7 +400,7 @@ long long emptyDb(int dbnum, int flags, void(callback)(void*)) { int async = (flags & EMPTYDB_ASYNC); long long removed = 0; - if (dbnum < -1 || dbnum >= server.dbnum) { + if (dbnum < -1 || dbnum >= cserver.dbnum) { errno = EINVAL; return -1; } @@ -391,21 +408,21 @@ long long emptyDb(int dbnum, int flags, void(callback)(void*)) { int startdb, enddb; if (dbnum == -1) { startdb = 0; - enddb = server.dbnum-1; + enddb = cserver.dbnum-1; } else { startdb = enddb = dbnum; } for (int j = startdb; j <= enddb; j++) { - removed += dictSize(server.db[j].pdict); + removed += dictSize(g_pserver->db[j].pdict); if (async) { - emptyDbAsync(&server.db[j]); + emptyDbAsync(&g_pserver->db[j]); } else { - dictEmpty(server.db[j].pdict,callback); - dictEmpty(server.db[j].expires,callback); + dictEmpty(g_pserver->db[j].pdict,callback); + dictEmpty(g_pserver->db[j].expires,callback); } } - if (server.cluster_enabled) { + if (g_pserver->cluster_enabled) { if (async) { slotToKeyFlushAsync(); } else { @@ -417,9 +434,9 @@ long long emptyDb(int dbnum, int flags, void(callback)(void*)) { } int selectDb(client *c, int id) { - if (id < 0 || id >= server.dbnum) + if (id < 0 || id >= cserver.dbnum) return C_ERR; - c->db = &server.db[id]; + c->db = &g_pserver->db[id]; return C_OK; } @@ -474,7 +491,7 @@ void flushdbCommand(client *c) { if (getFlushCommandFlags(c,&flags) == C_ERR) return; signalFlushedDb(c->db->id); - server.dirty += emptyDb(c->db->id,flags,NULL); + g_pserver->dirty += emptyDb(c->db->id,flags,NULL); addReply(c,shared.ok); } @@ -486,19 +503,19 @@ void flushallCommand(client *c) { if (getFlushCommandFlags(c,&flags) == C_ERR) return; signalFlushedDb(-1); - server.dirty += emptyDb(-1,flags,NULL); + g_pserver->dirty += emptyDb(-1,flags,NULL); addReply(c,shared.ok); - if (server.rdb_child_pid != -1) killRDBChild(); - if (server.saveparamslen > 0) { + if (g_pserver->rdb_child_pid != -1) killRDBChild(); + if (g_pserver->saveparamslen > 0) { /* Normally rdbSave() will reset dirty, but we don't want this here * as otherwise FLUSHALL will not be replicated nor put into the AOF. */ - int saved_dirty = server.dirty; + int saved_dirty = g_pserver->dirty; rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); rdbSave(rsiptr); - server.dirty = saved_dirty; + g_pserver->dirty = saved_dirty; } - server.dirty++; + g_pserver->dirty++; } /* This command implements DEL and LAZYDEL. */ @@ -513,7 +530,7 @@ void delGenericCommand(client *c, int lazy) { signalModifiedKey(c->db,c->argv[j]); notifyKeyspaceEvent(NOTIFY_GENERIC, "del",c->argv[j],c->db->id); - server.dirty++; + g_pserver->dirty++; numdel++; } } @@ -547,7 +564,7 @@ void selectCommand(client *c) { "invalid DB index") != C_OK) return; - if (server.cluster_enabled && id != 0) { + if (g_pserver->cluster_enabled && id != 0) { addReplyError(c,"SELECT is not allowed in cluster mode"); return; } @@ -847,7 +864,7 @@ void dbsizeCommand(client *c) { } void lastsaveCommand(client *c) { - addReplyLongLong(c,server.lastsave); + addReplyLongLong(c,g_pserver->lastsave); } void typeCommand(client *c) { @@ -896,7 +913,7 @@ void shutdownCommand(client *c) { * with half-read data). * * Also when in Sentinel mode clear the SAVE flag and force NOSAVE. */ - if (server.loading || server.sentinel_mode) + if (g_pserver->loading || g_pserver->sentinel_mode) flags = (flags & ~SHUTDOWN_SAVE) | SHUTDOWN_NOSAVE; if (prepareForShutdown(flags) == C_OK) exit(0); addReplyError(c,"Errors trying to SHUTDOWN. Check logs."); @@ -940,7 +957,7 @@ void renameGenericCommand(client *c, int nx) { c->argv[1],c->db->id); notifyKeyspaceEvent(NOTIFY_GENERIC,"rename_to", c->argv[2],c->db->id); - server.dirty++; + g_pserver->dirty++; addReply(c,nx ? shared.cone : shared.ok); } @@ -958,7 +975,7 @@ void moveCommand(client *c) { int srcid; long long dbid, expire; - if (server.cluster_enabled) { + if (g_pserver->cluster_enabled) { addReplyError(c,"MOVE is not allowed in cluster mode"); return; } @@ -1003,7 +1020,7 @@ void moveCommand(client *c) { /* OK! key moved, free the entry in the source DB */ dbDelete(src,c->argv[1]); - server.dirty++; + g_pserver->dirty++; addReply(c,shared.cone); } @@ -1034,11 +1051,11 @@ void scanDatabaseForReadyLists(redisDb *db) { * Returns C_ERR if at least one of the DB ids are out of range, otherwise * C_OK is returned. */ int dbSwapDatabases(int id1, int id2) { - if (id1 < 0 || id1 >= server.dbnum || - id2 < 0 || id2 >= server.dbnum) return C_ERR; + if (id1 < 0 || id1 >= cserver.dbnum || + id2 < 0 || id2 >= cserver.dbnum) return C_ERR; if (id1 == id2) return C_OK; - redisDb aux = server.db[id1]; - redisDb *db1 = &server.db[id1], *db2 = &server.db[id2]; + redisDb aux = g_pserver->db[id1]; + redisDb *db1 = &g_pserver->db[id1], *db2 = &g_pserver->db[id2]; /* Swap hash tables. Note that we don't swap blocking_keys, * ready_keys and watched_keys, since we want clients to @@ -1070,7 +1087,7 @@ void swapdbCommand(client *c) { long id1, id2; /* Not allowed in cluster mode: we have just DB 0 there. */ - if (server.cluster_enabled) { + if (g_pserver->cluster_enabled) { addReplyError(c,"SWAPDB is not allowed in cluster mode"); return; } @@ -1089,7 +1106,7 @@ void swapdbCommand(client *c) { addReplyError(c,"DB index is out of range"); return; } else { - server.dirty++; + g_pserver->dirty++; addReply(c,shared.ok); } } @@ -1119,7 +1136,7 @@ void setExpire(client *c, redisDb *db, robj *key, long long when) { de = dictAddOrFind(db->expires,dictGetKey(kde)); dictSetSignedIntegerVal(de,when); - int writable_slave = listLength(server.masters) && server.repl_slave_ro == 0; + int writable_slave = listLength(g_pserver->masters) && g_pserver->repl_slave_ro == 0; if (c && writable_slave && !(c->flags & CLIENT_MASTER)) rememberSlaveKeyWithExpire(db,key); } @@ -1156,9 +1173,9 @@ void propagateExpire(redisDb *db, robj *key, int lazy) { incrRefCount(argv[0]); incrRefCount(argv[1]); - if (server.aof_state != AOF_OFF) - feedAppendOnlyFile(server.delCommand,db->id,argv,2); - replicationFeedSlaves(server.slaves,db->id,argv,2); + if (g_pserver->aof_state != AOF_OFF) + feedAppendOnlyFile(cserver.delCommand,db->id,argv,2); + replicationFeedSlaves(g_pserver->slaves,db->id,argv,2); decrRefCount(argv[0]); decrRefCount(argv[1]); @@ -1171,14 +1188,14 @@ int keyIsExpired(redisDb *db, robj *key) { if (when < 0) return 0; /* No expire for this key */ /* Don't expire anything while loading. It will be done later. */ - if (server.loading) return 0; + if (g_pserver->loading) return 0; /* If we are in the context of a Lua script, we pretend that time is * blocked to when the Lua script started. This way a key can expire * only the first time it is accessed and not in the middle of the * script execution, making propagation to slaves / AOF consistent. * See issue #1525 on Github for more information. */ - mstime_t now = server.lua_caller ? server.lua_time_start : mstime(); + mstime_t now = g_pserver->lua_caller ? g_pserver->lua_time_start : mstime(); return now > when; } @@ -1213,14 +1230,14 @@ int expireIfNeeded(redisDb *db, robj *key) { * Still we try to return the right information to the caller, * that is, 0 if we think the key should be still valid, 1 if * we think the key is expired at this time. */ - if (listLength(server.masters)) return 1; + if (listLength(g_pserver->masters)) return 1; /* Delete the key */ - server.stat_expiredkeys++; - propagateExpire(db,key,server.lazyfree_lazy_expire); + g_pserver->stat_expiredkeys++; + propagateExpire(db,key,g_pserver->lazyfree_lazy_expire); notifyKeyspaceEvent(NOTIFY_EXPIRED, "expired",key,db->id); - return server.lazyfree_lazy_expire ? dbAsyncDelete(db,key) : + return g_pserver->lazyfree_lazy_expire ? dbAsyncDelete(db,key) : dbSyncDelete(db,key); } @@ -1511,15 +1528,15 @@ void slotToKeyUpdateKey(robj *key, int add) { unsigned char buf[64]; unsigned char *indexed = buf; - server.cluster->slots_keys_count[hashslot] += add ? 1 : -1; + g_pserver->cluster->slots_keys_count[hashslot] += add ? 1 : -1; if (keylen+2 > 64) indexed = (unsigned char*)zmalloc(keylen+2, MALLOC_SHARED); indexed[0] = (hashslot >> 8) & 0xff; indexed[1] = hashslot & 0xff; memcpy(indexed+2,ptrFromObj(key),keylen); if (add) { - raxInsert(server.cluster->slots_to_keys,indexed,keylen+2,NULL,NULL); + raxInsert(g_pserver->cluster->slots_to_keys,indexed,keylen+2,NULL,NULL); } else { - raxRemove(server.cluster->slots_to_keys,indexed,keylen+2,NULL); + raxRemove(g_pserver->cluster->slots_to_keys,indexed,keylen+2,NULL); } if (indexed != buf) zfree(indexed); } @@ -1533,10 +1550,10 @@ void slotToKeyDel(robj *key) { } void slotToKeyFlush(void) { - raxFree(server.cluster->slots_to_keys); - server.cluster->slots_to_keys = raxNew(); - memset(server.cluster->slots_keys_count,0, - sizeof(server.cluster->slots_keys_count)); + raxFree(g_pserver->cluster->slots_to_keys); + g_pserver->cluster->slots_to_keys = raxNew(); + memset(g_pserver->cluster->slots_keys_count,0, + sizeof(g_pserver->cluster->slots_keys_count)); } /* Pupulate the specified array of objects with keys in the specified slot. @@ -1549,7 +1566,7 @@ unsigned int getKeysInSlot(unsigned int hashslot, robj **keys, unsigned int coun indexed[0] = (hashslot >> 8) & 0xff; indexed[1] = hashslot & 0xff; - raxStart(&iter,server.cluster->slots_to_keys); + raxStart(&iter,g_pserver->cluster->slots_to_keys); raxSeek(&iter,">=",indexed,2); while(count-- && raxNext(&iter)) { if (iter.key[0] != indexed[0] || iter.key[1] != indexed[1]) break; @@ -1568,13 +1585,13 @@ unsigned int delKeysInSlot(unsigned int hashslot) { indexed[0] = (hashslot >> 8) & 0xff; indexed[1] = hashslot & 0xff; - raxStart(&iter,server.cluster->slots_to_keys); - while(server.cluster->slots_keys_count[hashslot]) { + raxStart(&iter,g_pserver->cluster->slots_to_keys); + while(g_pserver->cluster->slots_keys_count[hashslot]) { raxSeek(&iter,">=",indexed,2); raxNext(&iter); robj *key = createStringObject((char*)iter.key+2,iter.key_len-2); - dbDelete(&server.db[0],key); + dbDelete(&g_pserver->db[0],key); decrRefCount(key); j++; } @@ -1583,5 +1600,5 @@ unsigned int delKeysInSlot(unsigned int hashslot) { } unsigned int countKeysInSlot(unsigned int hashslot) { - return server.cluster->slots_keys_count[hashslot]; + return g_pserver->cluster->slots_keys_count[hashslot]; } diff --git a/src/debug.cpp b/src/debug.cpp index edcc1c631..4e588a254 100644 --- a/src/debug.cpp +++ b/src/debug.cpp @@ -27,6 +27,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#define NO_DEPRECATE_FREE 1 // we are required to call the real free() in this CU #include "server.h" #include "sha1.h" /* SHA1 is used for DEBUG DIGEST */ #include "crc64.h" @@ -266,8 +267,8 @@ void computeDatasetDigest(unsigned char *final) { memset(final,0,20); /* Start with a clean result */ - for (j = 0; j < server.dbnum; j++) { - redisDb *db = server.db+j; + for (j = 0; j < cserver.dbnum; j++) { + redisDb *db = g_pserver->db+j; if (dictSize(db->pdict) == 0) continue; di = dictGetSafeIterator(db->pdict); @@ -345,7 +346,7 @@ NULL (RESTART_SERVER_GRACEFULLY|RESTART_SERVER_CONFIG_REWRITE) : RESTART_SERVER_NONE; restartServer(flags,delay); - addReplyError(c,"failed to restart the server. Check server logs."); + addReplyError(c,"failed to restart the g_pserver-> Check server logs."); } else if (!strcasecmp(szFromObj(c->argv[1]),"oom")) { void *ptr = zmalloc(ULONG_MAX, MALLOC_LOCAL); /* Should trigger an out of memory. */ zfree(ptr); @@ -374,16 +375,16 @@ NULL serverLog(LL_WARNING,"DB reloaded by DEBUG RELOAD"); addReply(c,shared.ok); } else if (!strcasecmp(szFromObj(c->argv[1]),"loadaof")) { - if (server.aof_state != AOF_OFF) flushAppendOnlyFile(1); + if (g_pserver->aof_state != AOF_OFF) flushAppendOnlyFile(1); emptyDb(-1,EMPTYDB_NO_FLAGS,NULL); protectClient(c); - int ret = loadAppendOnlyFile(server.aof_filename); + int ret = loadAppendOnlyFile(g_pserver->aof_filename); unprotectClient(c); if (ret != C_OK) { addReply(c,shared.err); return; } - server.dirty = 0; /* Prevent AOF / replication */ + g_pserver->dirty = 0; /* Prevent AOF / replication */ serverLog(LL_WARNING,"Append Only File loaded by DEBUG LOADAOF"); addReply(c,shared.ok); } else if (!strcasecmp(szFromObj(c->argv[1]),"object") && c->argc == 3) { @@ -596,12 +597,12 @@ NULL } else if (!strcasecmp(szFromObj(c->argv[1]),"set-active-expire") && c->argc == 3) { - server.active_expire_enabled = atoi(szFromObj(c->argv[2])); + g_pserver->active_expire_enabled = atoi(szFromObj(c->argv[2])); addReply(c,shared.ok); } else if (!strcasecmp(szFromObj(c->argv[1]),"lua-always-replicate-commands") && c->argc == 3) { - server.lua_always_replicate_commands = atoi(szFromObj(c->argv[2])); + g_pserver->lua_always_replicate_commands = atoi(szFromObj(c->argv[2])); addReply(c,shared.ok); } else if (!strcasecmp(szFromObj(c->argv[1]),"error") && c->argc == 3) { sds errstr = sdsnewlen("-",1); @@ -628,17 +629,17 @@ NULL if (getLongFromObjectOrReply(c, c->argv[2], &dbid, NULL) != C_OK) return; - if (dbid < 0 || dbid >= server.dbnum) { + if (dbid < 0 || dbid >= cserver.dbnum) { addReplyError(c,"Out of range database"); return; } stats = sdscatprintf(stats,"[Dictionary HT]\n"); - dictGetStats(buf,sizeof(buf),server.db[dbid].pdict); + dictGetStats(buf,sizeof(buf),g_pserver->db[dbid].pdict); stats = sdscat(stats,buf); stats = sdscatprintf(stats,"[Expires HT]\n"); - dictGetStats(buf,sizeof(buf),server.db[dbid].expires); + dictGetStats(buf,sizeof(buf),g_pserver->db[dbid].expires); stats = sdscat(stats,buf); addReplyBulkSds(c,stats); @@ -692,9 +693,9 @@ void _serverAssert(const char *estr, const char *file, int line) { serverLog(LL_WARNING,"=== ASSERTION FAILED ==="); serverLog(LL_WARNING,"==> %s:%d '%s' is not true",file,line,estr); #ifdef HAVE_BACKTRACE - server.assert_failed = estr; - server.assert_file = file; - server.assert_line = line; + g_pserver->assert_failed = estr; + g_pserver->assert_file = file; + g_pserver->assert_line = line; serverLog(LL_WARNING,"(forcing SIGSEGV to print the bug report.)"); #endif *((char*)-1) = 'x'; @@ -779,10 +780,10 @@ void _serverPanic(const char *file, int line, const char *msg, ...) { } void bugReportStart(void) { - if (server.bug_report_start == 0) { + if (g_pserver->bug_report_start == 0) { serverLogRaw(LL_WARNING|LL_RAW, - "\n\n=== REDIS BUG REPORT START: Cut & paste starting from here ===\n"); - server.bug_report_start = 1; + "\n\n=== KEYDB BUG REPORT START: Cut & paste starting from here ===\n"); + g_pserver->bug_report_start = 1; } } @@ -1126,16 +1127,16 @@ void logRegisters(ucontext_t *uc) { * * Close it with closeDirectLogFiledes(). */ int openDirectLogFiledes(void) { - int log_to_stdout = server.logfile[0] == '\0'; + int log_to_stdout = g_pserver->logfile[0] == '\0'; int fd = log_to_stdout ? STDOUT_FILENO : - open(server.logfile, O_APPEND|O_CREAT|O_WRONLY, 0644); + open(g_pserver->logfile, O_APPEND|O_CREAT|O_WRONLY, 0644); return fd; } /* Used to close what closeDirectLogFiledes() returns. */ void closeDirectLogFiledes(int fd) { - int log_to_stdout = server.logfile[0] == '\0'; + int log_to_stdout = g_pserver->logfile[0] == '\0'; if (!log_to_stdout) close(fd); } @@ -1225,9 +1226,9 @@ void logStackTrace(ucontext_t *uc) { * currently being served by Redis. May be NULL if Redis is not serving a * client right now. */ void logCurrentClient(void) { - if (server.current_client == NULL) return; + if (serverTL->current_client == NULL) return; - client *cc = server.current_client; + client *cc = serverTL->current_client; sds client; int j; @@ -1362,7 +1363,7 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { bugReportStart(); serverLog(LL_WARNING, - "Redis %s crashed by signal: %d", REDIS_VERSION, sig); + "KeyDB %s crashed by signal: %d", KEYDB_REAL_VERSION, sig); if (eip != NULL) { serverLog(LL_WARNING, "Crashed running the instruction at: %p", eip); @@ -1372,8 +1373,8 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { "Accessing address: %p", (void*)info->si_addr); } serverLog(LL_WARNING, - "Failed assertion: %s (%s:%d)", server.assert_failed, - server.assert_file, server.assert_line); + "Failed assertion: %s (%s:%d)", g_pserver->assert_failed, + g_pserver->assert_file, g_pserver->assert_line); /* Log the stack trace */ serverLogRaw(LL_WARNING|LL_RAW, "\n------ STACK TRACE ------\n"); @@ -1438,14 +1439,14 @@ void sigsegvHandler(int sig, siginfo_t *info, void *secret) { } serverLogRaw(LL_WARNING|LL_RAW, -"\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n" +"\n=== KEYDB BUG REPORT END. Make sure to include from START to END. ===\n\n" " Please report the crash by opening an issue on github:\n\n" -" http://github.com/antirez/redis/issues\n\n" +" https://github.com/JohnSully/KeyDB/issues\n\n" " Suspect RAM error? Use keydb-server --test-memory to verify it.\n\n" ); /* free(messages); Don't call free() with possibly corrupted memory. */ - if (server.daemonize && server.supervised == 0) unlink(server.pidfile); + if (cserver.daemonize && cserver.supervised == 0) unlink(cserver.pidfile); /* Make sure we exit with the right signal at the end. So for instance * the core will be dumped if enabled. */ @@ -1521,7 +1522,7 @@ void watchdogScheduleSignal(int period) { void enableWatchdog(int period) { int min_period; - if (server.watchdog_period == 0) { + if (g_pserver->watchdog_period == 0) { struct sigaction act; /* Watchdog was actually disabled, so we have to setup the signal @@ -1534,16 +1535,16 @@ void enableWatchdog(int period) { /* If the configured period is smaller than twice the timer period, it is * too short for the software watchdog to work reliably. Fix it now * if needed. */ - min_period = (1000/server.hz)*2; + min_period = (1000/g_pserver->hz)*2; if (period < min_period) period = min_period; watchdogScheduleSignal(period); /* Adjust the current timer. */ - server.watchdog_period = period; + g_pserver->watchdog_period = period; } /* Disable the software watchdog. */ void disableWatchdog(void) { struct sigaction act; - if (server.watchdog_period == 0) return; /* Already disabled. */ + if (g_pserver->watchdog_period == 0) return; /* Already disabled. */ watchdogScheduleSignal(0); /* Stop the current timer. */ /* Set the signal handler to SIG_IGN, this will also remove pending @@ -1552,5 +1553,5 @@ void disableWatchdog(void) { act.sa_flags = 0; act.sa_handler = SIG_IGN; sigaction(SIGALRM, &act, NULL); - server.watchdog_period = 0; + g_pserver->watchdog_period = 0; } diff --git a/src/defrag.cpp b/src/defrag.cpp index 2cdf2c286..64f7a3bb2 100644 --- a/src/defrag.cpp +++ b/src/defrag.cpp @@ -59,14 +59,14 @@ void* activeDefragAlloc(void *ptr) { size_t size; void *newptr; if(!je_get_defrag_hint(ptr, &bin_util, &run_util)) { - server.stat_active_defrag_misses++; + g_pserver->stat_active_defrag_misses++; return NULL; } /* if this run is more utilized than the average utilization in this bin * (or it is full), skip it. This will eventually move all the allocations * from relatively empty runs into relatively full runs. */ if (run_util > bin_util || run_util == 1<<16) { - server.stat_active_defrag_misses++; + g_pserver->stat_active_defrag_misses++; return NULL; } /* move this allocation to a new allocation. @@ -442,7 +442,7 @@ long scanLaterList(robj *ob) { quicklist *ql = (quicklist*)ptrFromObj(ob); if (ob->type != OBJ_LIST || ob->encoding != OBJ_ENCODING_QUICKLIST) return 0; - server.stat_active_defrag_scanned+=ql->len; + g_pserver->stat_active_defrag_scanned+=ql->len; return activeDefragQuickListNodes(ql); } @@ -455,7 +455,7 @@ void scanLaterZsetCallback(void *privdata, const dictEntry *_de) { dictEntry *de = (dictEntry*)_de; scanLaterZsetData *data = (scanLaterZsetData*)privdata; data->defragged += activeDefragZsetEntry(data->zs, de); - server.stat_active_defrag_scanned++; + g_pserver->stat_active_defrag_scanned++; } long scanLaterZset(robj *ob, unsigned long *cursor) { @@ -474,7 +474,7 @@ void scanLaterSetCallback(void *privdata, const dictEntry *_de) { sds sdsele = (sds)dictGetKey(de), newsds; if ((newsds = activeDefragSds(sdsele))) (*defragged)++, de->key = newsds; - server.stat_active_defrag_scanned++; + g_pserver->stat_active_defrag_scanned++; } long scanLaterSet(robj *ob, unsigned long *cursor) { @@ -495,7 +495,7 @@ void scanLaterHashCallback(void *privdata, const dictEntry *_de) { sdsele = (sds)dictGetVal(de); if ((newsds = activeDefragSds(sdsele))) (*defragged)++, de->v.val = newsds; - server.stat_active_defrag_scanned++; + g_pserver->stat_active_defrag_scanned++; } long scanLaterHash(robj *ob, unsigned long *cursor) { @@ -514,7 +514,7 @@ long defragQuicklist(redisDb *db, dictEntry *kde) { serverAssert(ob->type == OBJ_LIST && ob->encoding == OBJ_ENCODING_QUICKLIST); if ((newql = (quicklist*)activeDefragAlloc(ql))) defragged++, ob->m_ptr = ql = newql; - if (ql->len > server.active_defrag_max_scan_fields) + if (ql->len > cserver.active_defrag_max_scan_fields) defragLater(db, kde); else defragged += activeDefragQuickListNodes(ql); @@ -537,7 +537,7 @@ long defragZsetSkiplist(redisDb *db, dictEntry *kde) { defragged++, zs->zsl = newzsl; if ((newheader = (zskiplistNode*)activeDefragAlloc(zs->zsl->header))) defragged++, zs->zsl->header = newheader; - if (dictSize(zs->pdict) > server.active_defrag_max_scan_fields) + if (dictSize(zs->pdict) > cserver.active_defrag_max_scan_fields) defragLater(db, kde); else { dictIterator *di = dictGetIterator(zs->pdict); @@ -560,7 +560,7 @@ long defragHash(redisDb *db, dictEntry *kde) { dict *d, *newd; serverAssert(ob->type == OBJ_HASH && ob->encoding == OBJ_ENCODING_HT); d = (dict*)ptrFromObj(ob); - if (dictSize(d) > server.active_defrag_max_scan_fields) + if (dictSize(d) > cserver.active_defrag_max_scan_fields) defragLater(db, kde); else defragged += activeDefragSdsDict(d, DEFRAG_SDS_DICT_VAL_IS_SDS); @@ -578,7 +578,7 @@ long defragSet(redisDb *db, dictEntry *kde) { dict *d, *newd; serverAssert(ob->type == OBJ_SET && ob->encoding == OBJ_ENCODING_HT); d = (dict*)ptrFromObj(ob); - if (dictSize(d) > server.active_defrag_max_scan_fields) + if (dictSize(d) > cserver.active_defrag_max_scan_fields) defragLater(db, kde); else defragged += activeDefragSdsDict(d, DEFRAG_SDS_DICT_NO_VAL); @@ -742,7 +742,7 @@ long defragStream(redisDb *db, dictEntry *kde) { if ((news = (stream*)activeDefragAlloc(s))) defragged++, ob->m_ptr = s = news; - if (raxSize(s->prax) > server.active_defrag_max_scan_fields) { + if (raxSize(s->prax) > cserver.active_defrag_max_scan_fields) { rax *newrax = (rax*)activeDefragAlloc(s->prax); if (newrax) defragged++, s->prax = newrax; @@ -837,12 +837,12 @@ long defragKey(redisDb *db, dictEntry *de) { /* Defrag scan callback for the main db dictionary. */ void defragScanCallback(void *privdata, const dictEntry *de) { long defragged = defragKey((redisDb*)privdata, (dictEntry*)de); - server.stat_active_defrag_hits += defragged; + g_pserver->stat_active_defrag_hits += defragged; if(defragged) - server.stat_active_defrag_key_hits++; + g_pserver->stat_active_defrag_key_hits++; else - server.stat_active_defrag_key_misses++; - server.stat_active_defrag_scanned++; + g_pserver->stat_active_defrag_key_misses++; + g_pserver->stat_active_defrag_scanned++; } /* Defrag scan callback for each hash table bicket, @@ -887,8 +887,8 @@ long defragOtherGlobals() { /* there are many more pointers to defrag (e.g. client argv, output / aof buffers, etc. * but we assume most of these are short lived, we only need to defrag allocations * that remain static for a long time */ - defragged += activeDefragSdsDict(server.lua_scripts, DEFRAG_SDS_DICT_VAL_IS_STROB); - defragged += activeDefragSdsListAndDict(server.repl_scriptcache_fifo, server.repl_scriptcache_dict, DEFRAG_SDS_DICT_NO_VAL); + defragged += activeDefragSdsDict(g_pserver->lua_scripts, DEFRAG_SDS_DICT_VAL_IS_STROB); + defragged += activeDefragSdsListAndDict(g_pserver->repl_scriptcache_fifo, g_pserver->repl_scriptcache_dict, DEFRAG_SDS_DICT_NO_VAL); return defragged; } @@ -898,16 +898,16 @@ int defragLaterItem(dictEntry *de, unsigned long *cursor, long long endtime) { if (de) { robj *ob = (robj*)dictGetVal(de); if (ob->type == OBJ_LIST) { - server.stat_active_defrag_hits += scanLaterList(ob); + g_pserver->stat_active_defrag_hits += scanLaterList(ob); *cursor = 0; /* list has no scan, we must finish it in one go */ } else if (ob->type == OBJ_SET) { - server.stat_active_defrag_hits += scanLaterSet(ob, cursor); + g_pserver->stat_active_defrag_hits += scanLaterSet(ob, cursor); } else if (ob->type == OBJ_ZSET) { - server.stat_active_defrag_hits += scanLaterZset(ob, cursor); + g_pserver->stat_active_defrag_hits += scanLaterZset(ob, cursor); } else if (ob->type == OBJ_HASH) { - server.stat_active_defrag_hits += scanLaterHash(ob, cursor); + g_pserver->stat_active_defrag_hits += scanLaterHash(ob, cursor); } else if (ob->type == OBJ_STREAM) { - return scanLaterStraemListpacks(ob, cursor, endtime, &server.stat_active_defrag_hits); + return scanLaterStraemListpacks(ob, cursor, endtime, &g_pserver->stat_active_defrag_hits); } else { *cursor = 0; /* object type may have changed since we schedule it for later */ } @@ -922,8 +922,8 @@ int defragLaterStep(redisDb *db, long long endtime) { static sds current_key = NULL; static unsigned long cursor = 0; unsigned int iterations = 0; - unsigned long long prev_defragged = server.stat_active_defrag_hits; - unsigned long long prev_scanned = server.stat_active_defrag_scanned; + unsigned long long prev_defragged = g_pserver->stat_active_defrag_hits; + unsigned long long prev_scanned = g_pserver->stat_active_defrag_scanned; long long key_defragged; do { @@ -952,7 +952,7 @@ int defragLaterStep(redisDb *db, long long endtime) { /* each time we enter this function we need to fetch the key from the dict again (if it still exists) */ dictEntry *de = dictFind(db->pdict, current_key); - key_defragged = server.stat_active_defrag_hits; + key_defragged = g_pserver->stat_active_defrag_hits; do { int quit = 0; if (defragLaterItem(de, &cursor, endtime)) @@ -967,24 +967,24 @@ int defragLaterStep(redisDb *db, long long endtime) { * (if we have a lot of pointers in one hash bucket, or rehashing), * check if we reached the time limit. */ if (quit || (++iterations > 16 || - server.stat_active_defrag_hits - prev_defragged > 512 || - server.stat_active_defrag_scanned - prev_scanned > 64)) { + g_pserver->stat_active_defrag_hits - prev_defragged > 512 || + g_pserver->stat_active_defrag_scanned - prev_scanned > 64)) { if (quit || ustime() > endtime) { - if(key_defragged != server.stat_active_defrag_hits) - server.stat_active_defrag_key_hits++; + if(key_defragged != g_pserver->stat_active_defrag_hits) + g_pserver->stat_active_defrag_key_hits++; else - server.stat_active_defrag_key_misses++; + g_pserver->stat_active_defrag_key_misses++; return 1; } iterations = 0; - prev_defragged = server.stat_active_defrag_hits; - prev_scanned = server.stat_active_defrag_scanned; + prev_defragged = g_pserver->stat_active_defrag_hits; + prev_scanned = g_pserver->stat_active_defrag_scanned; } } while(cursor); - if(key_defragged != server.stat_active_defrag_hits) - server.stat_active_defrag_key_hits++; + if(key_defragged != g_pserver->stat_active_defrag_hits) + g_pserver->stat_active_defrag_key_hits++; else - server.stat_active_defrag_key_misses++; + g_pserver->stat_active_defrag_key_misses++; } while(1); } @@ -996,26 +996,26 @@ void computeDefragCycles() { size_t frag_bytes; float frag_pct = getAllocatorFragmentation(&frag_bytes); /* If we're not already running, and below the threshold, exit. */ - if (!server.active_defrag_running) { - if(frag_pct < server.active_defrag_threshold_lower || frag_bytes < server.active_defrag_ignore_bytes) + if (!g_pserver->active_defrag_running) { + if(frag_pct < cserver.active_defrag_threshold_lower || frag_bytes < cserver.active_defrag_ignore_bytes) return; } /* Calculate the adaptive aggressiveness of the defrag */ int cpu_pct = INTERPOLATE(frag_pct, - server.active_defrag_threshold_lower, - server.active_defrag_threshold_upper, - server.active_defrag_cycle_min, - server.active_defrag_cycle_max); + cserver.active_defrag_threshold_lower, + cserver.active_defrag_threshold_upper, + cserver.active_defrag_cycle_min, + cserver.active_defrag_cycle_max); cpu_pct = LIMIT(cpu_pct, - server.active_defrag_cycle_min, - server.active_defrag_cycle_max); + cserver.active_defrag_cycle_min, + cserver.active_defrag_cycle_max); /* We allow increasing the aggressiveness during a scan, but don't * reduce it. */ - if (!server.active_defrag_running || - cpu_pct > server.active_defrag_running) + if (!g_pserver->active_defrag_running || + cpu_pct > g_pserver->active_defrag_running) { - server.active_defrag_running = cpu_pct; + g_pserver->active_defrag_running = cpu_pct; serverLog(LL_VERBOSE, "Starting active defrag, frag=%.0f%%, frag_bytes=%zu, cpu=%d%%", frag_pct, frag_bytes, cpu_pct); @@ -1031,13 +1031,13 @@ void activeDefragCycle(void) { static redisDb *db = NULL; static long long start_scan, start_stat; unsigned int iterations = 0; - unsigned long long prev_defragged = server.stat_active_defrag_hits; - unsigned long long prev_scanned = server.stat_active_defrag_scanned; + unsigned long long prev_defragged = g_pserver->stat_active_defrag_hits; + unsigned long long prev_scanned = g_pserver->stat_active_defrag_scanned; long long start, timelimit, endtime; mstime_t latency; int quit = 0; - if (server.aof_child_pid!=-1 || server.rdb_child_pid!=-1) + if (g_pserver->aof_child_pid!=-1 || g_pserver->rdb_child_pid!=-1) return; /* Defragging memory while there's a fork will just do damage. */ /* Once a second, check if we the fragmentation justfies starting a scan @@ -1045,12 +1045,12 @@ void activeDefragCycle(void) { run_with_period(1000) { computeDefragCycles(); } - if (!server.active_defrag_running) + if (!g_pserver->active_defrag_running) return; /* See activeExpireCycle for how timelimit is handled. */ start = ustime(); - timelimit = 1000000*server.active_defrag_running/server.hz/100; + timelimit = 1000000*g_pserver->active_defrag_running/g_pserver->hz/100; if (timelimit <= 0) timelimit = 1; endtime = start + timelimit; latencyStartMonitor(latency); @@ -1065,7 +1065,7 @@ void activeDefragCycle(void) { } /* Move on to next database, and stop if we reached the last one. */ - if (++current_db >= server.dbnum) { + if (++current_db >= cserver.dbnum) { /* defrag other items not part of the db / keys */ defragOtherGlobals(); @@ -1074,26 +1074,26 @@ void activeDefragCycle(void) { float frag_pct = getAllocatorFragmentation(&frag_bytes); serverLog(LL_VERBOSE, "Active defrag done in %dms, reallocated=%d, frag=%.0f%%, frag_bytes=%zu", - (int)((now - start_scan)/1000), (int)(server.stat_active_defrag_hits - start_stat), frag_pct, frag_bytes); + (int)((now - start_scan)/1000), (int)(g_pserver->stat_active_defrag_hits - start_stat), frag_pct, frag_bytes); start_scan = now; current_db = -1; cursor = 0; db = NULL; - server.active_defrag_running = 0; + g_pserver->active_defrag_running = 0; computeDefragCycles(); /* if another scan is needed, start it right away */ - if (server.active_defrag_running != 0 && ustime() < endtime) + if (g_pserver->active_defrag_running != 0 && ustime() < endtime) continue; break; } else if (current_db==0) { /* Start a scan from the first database. */ start_scan = ustime(); - start_stat = server.stat_active_defrag_hits; + start_stat = g_pserver->stat_active_defrag_hits; } - db = &server.db[current_db]; + db = &g_pserver->db[current_db]; cursor = 0; } @@ -1112,15 +1112,15 @@ void activeDefragCycle(void) { * But regardless, don't start a new db in this loop, this is because after * the last db we call defragOtherGlobals, which must be done in once cycle */ if (!cursor || (++iterations > 16 || - server.stat_active_defrag_hits - prev_defragged > 512 || - server.stat_active_defrag_scanned - prev_scanned > 64)) { + g_pserver->stat_active_defrag_hits - prev_defragged > 512 || + g_pserver->stat_active_defrag_scanned - prev_scanned > 64)) { if (!cursor || ustime() > endtime) { quit = 1; break; } iterations = 0; - prev_defragged = server.stat_active_defrag_hits; - prev_scanned = server.stat_active_defrag_scanned; + prev_defragged = g_pserver->stat_active_defrag_hits; + prev_scanned = g_pserver->stat_active_defrag_scanned; } } while(cursor && !quit); } while(!quit); diff --git a/src/evict.cpp b/src/evict.cpp index 1affbe445..f7b99f389 100644 --- a/src/evict.cpp +++ b/src/evict.cpp @@ -77,8 +77,8 @@ unsigned int getLRUClock(void) { * precomputed value, otherwise we need to resort to a system call. */ unsigned int LRU_CLOCK(void) { unsigned int lruclock; - if (1000/server.hz <= LRU_CLOCK_RESOLUTION) { - atomicGet(server.lruclock,lruclock); + if (1000/g_pserver->hz <= LRU_CLOCK_RESOLUTION) { + atomicGet(g_pserver->lruclock,lruclock); } else { lruclock = getLRUClock(); } @@ -111,7 +111,7 @@ unsigned long long estimateObjectIdleTime(robj *o) { * If all the bytes needed to return back under the limit were freed the * function returns C_OK, otherwise C_ERR is returned, and the caller * should block the execution of commands that will result in more memory - * used by the server. + * used by the g_pserver-> * * ------------------------------------------------------------------------ * @@ -161,13 +161,13 @@ void evictionPoolAlloc(void) { void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evictionPoolEntry *pool) { int j, k, count; - dictEntry **samples = (dictEntry**)alloca(server.maxmemory_samples * sizeof(dictEntry*)); + dictEntry **samples = (dictEntry**)alloca(g_pserver->maxmemory_samples * sizeof(dictEntry*)); - count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples); + count = dictGetSomeKeys(sampledict,samples,g_pserver->maxmemory_samples); for (j = 0; j < count; j++) { unsigned long long idle; sds key; - robj *o; + robj *o = nullptr; dictEntry *de; de = samples[j]; @@ -176,7 +176,7 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic /* If the dictionary we are sampling from is not the main * dictionary (but the expires one) we need to lookup the key * again in the key dictionary to obtain the value object. */ - if (server.maxmemory_policy != MAXMEMORY_VOLATILE_TTL) { + if (g_pserver->maxmemory_policy != MAXMEMORY_VOLATILE_TTL) { if (sampledict != keydict) de = dictFind(keydict, key); o = (robj*)dictGetVal(de); } @@ -184,9 +184,9 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic /* Calculate the idle time according to the policy. This is called * idle just because the code initially handled LRU, but is in fact * just a score where an higher score means better candidate. */ - if (server.maxmemory_policy & MAXMEMORY_FLAG_LRU) { - idle = estimateObjectIdleTime(o); - } else if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { + if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LRU) { + idle = (o != nullptr) ? estimateObjectIdleTime(o) : 0; + } else if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) { /* When we use an LRU policy, we sort the keys by idle time * so that we expire keys starting from greater idle time. * However when the policy is an LFU one, we have a frequency @@ -195,7 +195,7 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic * frequency subtracting the actual frequency to the maximum * frequency of 255. */ idle = 255-LFUDecrAndReturn(o); - } else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) { + } else if (g_pserver->maxmemory_policy == MAXMEMORY_VOLATILE_TTL) { /* In this case the sooner the expire the better. */ idle = ULLONG_MAX - (long)dictGetVal(de); } else { @@ -297,7 +297,7 @@ void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evic * 16 bits. The returned time is suitable to be stored as LDT (last decrement * time) for the LFU implementation. */ unsigned long LFUGetTimeInMinutes(void) { - return (server.unixtime/60) & 65535; + return (g_pserver->unixtime/60) & 65535; } /* Given an object last access time, compute the minimum number of minutes @@ -317,7 +317,7 @@ uint8_t LFULogIncr(uint8_t counter) { double r = (double)rand()/RAND_MAX; double baseval = counter - LFU_INIT_VAL; if (baseval < 0) baseval = 0; - double p = 1.0/(baseval*server.lfu_log_factor+1); + double p = 1.0/(baseval*g_pserver->lfu_log_factor+1); if (r < p) counter++; return counter; } @@ -326,7 +326,7 @@ uint8_t LFULogIncr(uint8_t counter) { * do not update LFU fields of the object, we update the access time * and counter in an explicit way when the object is really accessed. * And we will times halve the counter according to the times of - * elapsed time than server.lfu_decay_time. + * elapsed time than g_pserver->lfu_decay_time. * Return the object frequency counter. * * This function is used in order to scan the dataset for the best object @@ -335,7 +335,7 @@ uint8_t LFULogIncr(uint8_t counter) { unsigned long LFUDecrAndReturn(robj *o) { unsigned long ldt = o->lru >> 8; unsigned long counter = o->lru & 255; - unsigned long num_periods = server.lfu_decay_time ? LFUTimeElapsed(ldt) / server.lfu_decay_time : 0; + unsigned long num_periods = g_pserver->lfu_decay_time ? LFUTimeElapsed(ldt) / g_pserver->lfu_decay_time : 0; if (num_periods) counter = (num_periods > counter) ? 0 : counter - num_periods; return counter; @@ -352,20 +352,20 @@ unsigned long LFUDecrAndReturn(robj *o) { size_t freeMemoryGetNotCountedMemory(void) { serverAssert(GlobalLocksAcquired()); size_t overhead = 0; - int slaves = listLength(server.slaves); + int slaves = listLength(g_pserver->slaves); if (slaves) { listIter li; listNode *ln; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)listNodeValue(ln); overhead += getClientOutputBufferMemoryUsage(slave); } } - if (server.aof_state != AOF_OFF) { - overhead += sdsalloc(server.aof_buf)+aofRewriteBufferSize(); + if (g_pserver->aof_state != AOF_OFF) { + overhead += sdsalloc(g_pserver->aof_buf)+aofRewriteBufferSize(); } return overhead; } @@ -403,7 +403,7 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev if (total) *total = mem_reported; /* We may return ASAP if there is no need to compute the level. */ - int return_ok_asap = !server.maxmemory || mem_reported <= server.maxmemory; + int return_ok_asap = !g_pserver->maxmemory || mem_reported <= g_pserver->maxmemory; if (return_ok_asap && !level) return C_OK; /* Remove the size of slaves output buffers and AOF buffer from the @@ -414,20 +414,20 @@ int getMaxmemoryState(size_t *total, size_t *logical, size_t *tofree, float *lev /* Compute the ratio of memory usage. */ if (level) { - if (!server.maxmemory) { + if (!g_pserver->maxmemory) { *level = 0; } else { - *level = (float)mem_used / (float)server.maxmemory; + *level = (float)mem_used / (float)g_pserver->maxmemory; } } if (return_ok_asap) return C_OK; /* Check if we are still over the memory limit. */ - if (mem_used <= server.maxmemory) return C_OK; + if (mem_used <= g_pserver->maxmemory) return C_OK; /* Compute how much memory we need to free. */ - mem_tofree = mem_used - server.maxmemory; + mem_tofree = mem_used - g_pserver->maxmemory; if (logical) *logical = mem_used; if (tofree) *tofree = mem_tofree; @@ -448,12 +448,12 @@ int freeMemoryIfNeeded(void) { serverAssert(GlobalLocksAcquired()); /* By default replicas should ignore maxmemory * and just be masters exact copies. */ - if (listLength(server.masters) && server.repl_slave_ignore_maxmemory) return C_OK; + if (listLength(g_pserver->masters) && g_pserver->repl_slave_ignore_maxmemory) return C_OK; size_t mem_reported, mem_tofree, mem_freed; mstime_t latency, eviction_latency; long long delta; - int slaves = listLength(server.slaves); + int slaves = listLength(g_pserver->slaves); /* When clients are paused the dataset should be static not just from the * POV of clients not being able to write, but also from the POV of @@ -464,7 +464,7 @@ int freeMemoryIfNeeded(void) { mem_freed = 0; - if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION) + if (g_pserver->maxmemory_policy == MAXMEMORY_NO_EVICTION) goto cant_free; /* We need to free memory, but policy forbids. */ latencyStartMonitor(latency); @@ -477,8 +477,8 @@ int freeMemoryIfNeeded(void) { dict *dict; dictEntry *de; - if (server.maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) || - server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) + if (g_pserver->maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) || + g_pserver->maxmemory_policy == MAXMEMORY_VOLATILE_TTL) { struct evictionPoolEntry *pool = EvictionPoolLRU; @@ -488,9 +488,9 @@ int freeMemoryIfNeeded(void) { /* We don't want to make local-db choices when expiring keys, * so to start populate the eviction pool sampling keys from * every DB. */ - for (i = 0; i < server.dbnum; i++) { - db = server.db+i; - dict = (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ? + for (i = 0; i < cserver.dbnum; i++) { + db = g_pserver->db+i; + dict = (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ? db->pdict : db->expires; if ((keys = dictSize(dict)) != 0) { evictionPoolPopulate(i, dict, db->pdict, pool); @@ -504,11 +504,11 @@ int freeMemoryIfNeeded(void) { if (pool[k].key == NULL) continue; bestdbid = pool[k].dbid; - if (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) { - de = dictFind(server.db[pool[k].dbid].pdict, + if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) { + de = dictFind(g_pserver->db[pool[k].dbid].pdict, pool[k].key); } else { - de = dictFind(server.db[pool[k].dbid].expires, + de = dictFind(g_pserver->db[pool[k].dbid].expires, pool[k].key); } @@ -531,16 +531,16 @@ int freeMemoryIfNeeded(void) { } /* volatile-random and allkeys-random policy */ - else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM || - server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM) + else if (g_pserver->maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM || + g_pserver->maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM) { /* When evicting a random key, we try to evict a key for * each DB, so we use the static 'next_db' variable to * incrementally visit all DBs. */ - for (i = 0; i < server.dbnum; i++) { - j = (++next_db) % server.dbnum; - db = server.db+j; - dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ? + for (i = 0; i < cserver.dbnum; i++) { + j = (++next_db) % cserver.dbnum; + db = g_pserver->db+j; + dict = (g_pserver->maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ? db->pdict : db->expires; if (dictSize(dict) != 0) { de = dictGetRandomKey(dict); @@ -553,9 +553,9 @@ int freeMemoryIfNeeded(void) { /* Finally remove the selected key. */ if (bestkey) { - db = server.db+bestdbid; + db = g_pserver->db+bestdbid; robj *keyobj = createStringObject(bestkey,sdslen(bestkey)); - propagateExpire(db,keyobj,server.lazyfree_lazy_eviction); + propagateExpire(db,keyobj,g_pserver->lazyfree_lazy_eviction); /* We compute the amount of memory freed by db*Delete() alone. * It is possible that actually the memory needed to propagate * the DEL in AOF and replication link is greater than the one @@ -566,7 +566,7 @@ int freeMemoryIfNeeded(void) { * we only care about memory used by the key space. */ delta = (long long) zmalloc_used_memory(); latencyStartMonitor(eviction_latency); - if (server.lazyfree_lazy_eviction) + if (g_pserver->lazyfree_lazy_eviction) dbAsyncDelete(db,keyobj); else dbSyncDelete(db,keyobj); @@ -575,7 +575,7 @@ int freeMemoryIfNeeded(void) { latencyRemoveNestedEvent(latency,eviction_latency); delta -= (long long) zmalloc_used_memory(); mem_freed += delta; - server.stat_evictedkeys++; + g_pserver->stat_evictedkeys++; notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted", keyobj, db->id); decrRefCount(keyobj); @@ -594,7 +594,7 @@ int freeMemoryIfNeeded(void) { * memory, since the "mem_freed" amount is computed only * across the dbAsyncDelete() call, while the thread can * release the memory all the time. */ - if (server.lazyfree_lazy_eviction && !(keys_freed % 16)) { + if (g_pserver->lazyfree_lazy_eviction && !(keys_freed % 16)) { if (getMaxmemoryState(NULL,NULL,NULL,NULL) == C_OK) { /* Let's satisfy our stop condition. */ mem_freed = mem_tofree; @@ -632,6 +632,6 @@ int freeMemoryIfNeeded(void) { * */ int freeMemoryIfNeededAndSafe(void) { - if (server.lua_timedout || server.loading) return C_OK; + if (g_pserver->lua_timedout || g_pserver->loading) return C_OK; return freeMemoryIfNeeded(); } diff --git a/src/expire.cpp b/src/expire.cpp index 0e87a05b6..64a430389 100644 --- a/src/expire.cpp +++ b/src/expire.cpp @@ -47,7 +47,7 @@ * If the key is found to be expired, it is removed from the database and * 1 is returned. Otherwise no operation is performed and 0 is returned. * - * When a key is expired, server.stat_expiredkeys is incremented. + * When a key is expired, g_pserver->stat_expiredkeys is incremented. * * The parameter 'now' is the current time in milliseconds as is passed * to the function to avoid too many gettimeofday() syscalls. */ @@ -57,15 +57,15 @@ int activeExpireCycleTryExpire(redisDb *db, dictEntry *de, long long now) { sds key = (sds)dictGetKey(de); robj *keyobj = createStringObject(key,sdslen(key)); - propagateExpire(db,keyobj,server.lazyfree_lazy_expire); - if (server.lazyfree_lazy_expire) + propagateExpire(db,keyobj,g_pserver->lazyfree_lazy_expire); + if (g_pserver->lazyfree_lazy_expire) dbAsyncDelete(db,keyobj); else dbSyncDelete(db,keyobj); notifyKeyspaceEvent(NOTIFY_EXPIRED, "expired",keyobj,db->id); decrRefCount(keyobj); - server.stat_expiredkeys++; + g_pserver->stat_expiredkeys++; return 1; } else { return 0; @@ -126,14 +126,14 @@ void activeExpireCycle(int type) { * 2) If last time we hit the time limit, we want to scan all DBs * in this iteration, as there is work to do in some DB and we don't want * expired keys to use memory for too much time. */ - if (dbs_per_call > server.dbnum || timelimit_exit) - dbs_per_call = server.dbnum; + if (dbs_per_call > cserver.dbnum || timelimit_exit) + dbs_per_call = cserver.dbnum; /* We can use at max ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC percentage of CPU time * per iteration. Since this function gets called with a frequency of - * server.hz times per second, the following is the max amount of + * g_pserver->hz times per second, the following is the max amount of * microseconds we can spend in this function. */ - timelimit = 1000000*ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC/server.hz/100; + timelimit = 1000000*ACTIVE_EXPIRE_CYCLE_SLOW_TIME_PERC/g_pserver->hz/100; timelimit_exit = 0; if (timelimit <= 0) timelimit = 1; @@ -148,7 +148,7 @@ void activeExpireCycle(int type) { for (j = 0; j < dbs_per_call && timelimit_exit == 0; j++) { int expired; - redisDb *db = server.db+(current_db % server.dbnum); + redisDb *db = g_pserver->db+(current_db % cserver.dbnum); /* Increment the DB now so we are sure if we run out of time * in the current DB we'll restart from the next. This allows to @@ -220,7 +220,7 @@ void activeExpireCycle(int type) { elapsed = ustime()-start; if (elapsed > timelimit) { timelimit_exit = 1; - server.stat_expired_time_cap_reached_count++; + g_pserver->stat_expired_time_cap_reached_count++; break; } } @@ -239,8 +239,8 @@ void activeExpireCycle(int type) { current_perc = (double)total_expired/total_sampled; } else current_perc = 0; - server.stat_expired_stale_perc = (current_perc*0.05)+ - (server.stat_expired_stale_perc*0.95); + g_pserver->stat_expired_stale_perc = (current_perc*0.05)+ + (g_pserver->stat_expired_stale_perc*0.95); } /*----------------------------------------------------------------------------- @@ -297,14 +297,14 @@ void expireSlaveKeys(void) { /* Check the key against every database corresponding to the * bits set in the value bitmap. */ int dbid = 0; - while(dbids && dbid < server.dbnum) { + while(dbids && dbid < cserver.dbnum) { if ((dbids & 1) != 0) { - redisDb *db = server.db+dbid; + redisDb *db = g_pserver->db+dbid; dictEntry *expire = dictFind(db->expires,keyname); int expired = 0; if (expire && - activeExpireCycleTryExpire(server.db+dbid,expire,start)) + activeExpireCycleTryExpire(g_pserver->db+dbid,expire,start)) { expired = 1; } @@ -377,7 +377,7 @@ size_t getSlaveKeyWithExpireCount(void) { } /* Remove the keys in the hash table. We need to do that when data is - * flushed from the server. We may receive new keys from the master with + * flushed from the g_pserver-> We may receive new keys from the master with * the same name/db and it is no longer a good idea to expire them. * * Note: technically we should handle the case of a single DB being flushed @@ -424,16 +424,16 @@ void expireGenericCommand(client *c, long long basetime, int unit) { * * Instead we take the other branch of the IF statement setting an expire * (possibly in the past) and wait for an explicit DEL from the master. */ - if (when <= mstime() && !server.loading && !listLength(server.masters)) { + if (when <= mstime() && !g_pserver->loading && !listLength(g_pserver->masters)) { robj *aux; - int deleted = server.lazyfree_lazy_expire ? dbAsyncDelete(c->db,key) : + int deleted = g_pserver->lazyfree_lazy_expire ? dbAsyncDelete(c->db,key) : dbSyncDelete(c->db,key); serverAssertWithInfo(c,key,deleted); - server.dirty++; + g_pserver->dirty++; /* Replicate/AOF this as an explicit DEL or UNLINK. */ - aux = server.lazyfree_lazy_expire ? shared.unlink : shared.del; + aux = g_pserver->lazyfree_lazy_expire ? shared.unlink : shared.del; rewriteClientCommandVector(c,2,aux,key); signalModifiedKey(c->db,key); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id); @@ -444,7 +444,7 @@ void expireGenericCommand(client *c, long long basetime, int unit) { addReply(c,shared.cone); signalModifiedKey(c->db,key); notifyKeyspaceEvent(NOTIFY_GENERIC,"expire",key,c->db->id); - server.dirty++; + g_pserver->dirty++; return; } } @@ -507,7 +507,7 @@ void persistCommand(client *c) { if (lookupKeyWrite(c->db,c->argv[1])) { if (removeExpire(c->db,c->argv[1])) { addReply(c,shared.cone); - server.dirty++; + g_pserver->dirty++; } else { addReply(c,shared.czero); } diff --git a/src/geo.cpp b/src/geo.cpp index a88949966..55c52fc88 100644 --- a/src/geo.cpp +++ b/src/geo.cpp @@ -661,11 +661,11 @@ void georadiusGeneric(client *c, int flags) { decrRefCount(zobj); notifyKeyspaceEvent(NOTIFY_ZSET,"georadiusstore",storekey, c->db->id); - server.dirty += returned_items; + g_pserver->dirty += returned_items; } else if (dbDelete(c->db,storekey)) { signalModifiedKey(c->db,storekey); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",storekey,c->db->id); - server.dirty++; + g_pserver->dirty++; } addReplyLongLong(c, returned_items); } diff --git a/src/hyperloglog.cpp b/src/hyperloglog.cpp index 32c35d38e..344fd219f 100644 --- a/src/hyperloglog.cpp +++ b/src/hyperloglog.cpp @@ -176,7 +176,7 @@ * involved in updating the sparse representation is not justified by the * memory savings. The exact maximum length of the sparse representation * when this implementation switches to the dense representation is - * configured via the define server.hll_sparse_max_bytes. + * configured via the define g_pserver->hll_sparse_max_bytes. */ struct hllhdr { @@ -652,7 +652,7 @@ int hllSparseToDense(robj *o) { * As a side effect the function may promote the HLL representation from * sparse to dense: this happens when a register requires to be set to a value * not representable with the sparse representation, or when the resulting - * size would be greater than server.hll_sparse_max_bytes. */ + * size would be greater than g_pserver->hll_sparse_max_bytes. */ int hllSparseSet(robj *o, long index, uint8_t count) { struct hllhdr *hdr; uint8_t oldcount, *sparse, *end, *p, *prev, *next; @@ -837,7 +837,7 @@ int hllSparseSet(robj *o, long index, uint8_t count) { deltalen = seqlen-oldlen; if (deltalen > 0 && - sdslen(szFromObj(o))+deltalen > server.hll_sparse_max_bytes) goto promote; + sdslen(szFromObj(o))+deltalen > g_pserver->hll_sparse_max_bytes) goto promote; if (deltalen && next) memmove(next+deltalen,next,end-next); sdsIncrLen(szFromObj(o),deltalen); memcpy(p,seq,seqlen); @@ -1221,7 +1221,7 @@ void pfaddCommand(client *c) { if (updated) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STRING,"pfadd",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; HLL_INVALIDATE_CACHE(hdr); } addReply(c, updated ? shared.cone : shared.czero); @@ -1311,7 +1311,7 @@ void pfcountCommand(client *c) { * may be modified and given that the HLL is a Redis string * we need to propagate the change. */ signalModifiedKey(c->db,c->argv[1]); - server.dirty++; + g_pserver->dirty++; } addReplyLongLong(c,card); } @@ -1387,7 +1387,7 @@ void pfmergeCommand(client *c) { /* We generate a PFADD event for PFMERGE for semantical simplicity * since in theory this is a mass-add of elements. */ notifyKeyspaceEvent(NOTIFY_STRING,"pfadd",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; addReply(c,shared.ok); } @@ -1457,7 +1457,7 @@ void pfselftestCommand(client *c) { /* Make sure that for small cardinalities we use sparse * encoding. */ - if (j == checkpoint && j < server.hll_sparse_max_bytes/2) { + if (j == checkpoint && j < g_pserver->hll_sparse_max_bytes/2) { hdr2 = (hllhdr*)ptrFromObj(o); if (hdr2->encoding != HLL_SPARSE) { addReplyError(c, "TESTFAILED sparse encoding not used"); @@ -1528,7 +1528,7 @@ void pfdebugCommand(client *c) { addReplySds(c,sdsnew(invalid_hll_err)); return; } - server.dirty++; /* Force propagation on encoding change. */ + g_pserver->dirty++; /* Force propagation on encoding change. */ } hdr = (hllhdr*)ptrFromObj(o); @@ -1593,7 +1593,7 @@ void pfdebugCommand(client *c) { return; } conv = 1; - server.dirty++; /* Force propagation on encoding change. */ + g_pserver->dirty++; /* Force propagation on encoding change. */ } addReply(c,conv ? shared.cone : shared.czero); } else { diff --git a/src/latency.cpp b/src/latency.cpp index cfa0f496d..0dd751a3a 100644 --- a/src/latency.cpp +++ b/src/latency.cpp @@ -88,15 +88,15 @@ int THPGetAnonHugePagesSize(void) { * of time series, each time serie is craeted on demand in order to avoid * having a fixed list to maintain. */ void latencyMonitorInit(void) { - server.latency_events = dictCreate(&latencyTimeSeriesDictType,NULL); + g_pserver->latency_events = dictCreate(&latencyTimeSeriesDictType,NULL); } /* Add the specified sample to the specified time series "event". * This function is usually called via latencyAddSampleIfNeeded(), that * is a macro that only adds the sample if the latency is higher than - * server.latency_monitor_threshold. */ + * g_pserver->latency_monitor_threshold. */ void latencyAddSample(const char *event, mstime_t latency) { - struct latencyTimeSeries *ts = (latencyTimeSeries*)dictFetchValue(server.latency_events,event); + struct latencyTimeSeries *ts = (latencyTimeSeries*)dictFetchValue(g_pserver->latency_events,event); time_t now = time(NULL); int prev; @@ -106,7 +106,7 @@ void latencyAddSample(const char *event, mstime_t latency) { ts->idx = 0; ts->max = 0; memset(ts->samples,0,sizeof(ts->samples)); - dictAdd(server.latency_events,zstrdup(event),ts); + dictAdd(g_pserver->latency_events,zstrdup(event),ts); } if (latency > ts->max) ts->max = latency; @@ -137,12 +137,12 @@ int latencyResetEvent(char *event_to_reset) { dictEntry *de; int resets = 0; - di = dictGetSafeIterator(server.latency_events); + di = dictGetSafeIterator(g_pserver->latency_events); while((de = dictNext(di)) != NULL) { char *event = (char*)dictGetKey(de); if (event_to_reset == NULL || strcasecmp(event,event_to_reset) == 0) { - dictDelete(server.latency_events, event); + dictDelete(g_pserver->latency_events, event); resets++; } } @@ -158,7 +158,7 @@ int latencyResetEvent(char *event_to_reset) { * If the specified event has no elements the structure is populate with * zero values. */ void analyzeLatencyForEvent(char *event, struct latencyStats *ls) { - struct latencyTimeSeries *ts = (latencyTimeSeries*)dictFetchValue(server.latency_events,event); + struct latencyTimeSeries *ts = (latencyTimeSeries*)dictFetchValue(g_pserver->latency_events,event); int j; uint64_t sum; @@ -236,8 +236,8 @@ sds createLatencyReport(void) { /* Return ASAP if the latency engine is disabled and it looks like it * was never enabled so far. */ - if (dictSize(server.latency_events) == 0 && - server.latency_monitor_threshold == 0) + if (dictSize(g_pserver->latency_events) == 0 && + g_pserver->latency_monitor_threshold == 0) { report = sdscat(report,"I'm sorry, Dave, I can't do that. Latency monitoring is disabled in this Redis instance. You may use \"CONFIG SET latency-monitor-threshold .\" in order to enable it. If we weren't in a deep space mission I'd suggest to take a look at http://redis.io/topics/latency-monitor.\n"); return report; @@ -249,7 +249,7 @@ sds createLatencyReport(void) { dictEntry *de; int eventnum = 0; - di = dictGetSafeIterator(server.latency_events); + di = dictGetSafeIterator(g_pserver->latency_events); while((de = dictNext(di)) != NULL) { char *event = (char*)dictGetKey(de); struct latencyTimeSeries *ts = (latencyTimeSeries*)dictGetVal(de); @@ -274,31 +274,31 @@ sds createLatencyReport(void) { /* Fork */ if (!strcasecmp(event,"fork")) { const char *fork_quality; - if (server.stat_fork_rate < 10) { + if (g_pserver->stat_fork_rate < 10) { fork_quality = "terrible"; advise_better_vm = 1; advices++; - } else if (server.stat_fork_rate < 25) { + } else if (g_pserver->stat_fork_rate < 25) { fork_quality = "poor"; advise_better_vm = 1; advices++; - } else if (server.stat_fork_rate < 100) { + } else if (g_pserver->stat_fork_rate < 100) { fork_quality = "good"; } else { fork_quality = "excellent"; } report = sdscatprintf(report, - " Fork rate is %.2f GB/sec (%s).", server.stat_fork_rate, + " Fork rate is %.2f GB/sec (%s).", g_pserver->stat_fork_rate, fork_quality); } /* Potentially commands. */ if (!strcasecmp(event,"command")) { - if (server.slowlog_log_slower_than < 0) { + if (g_pserver->slowlog_log_slower_than < 0) { advise_slowlog_enabled = 1; advices++; - } else if (server.slowlog_log_slower_than/1000 > - server.latency_monitor_threshold) + } else if (g_pserver->slowlog_log_slower_than/1000 > + g_pserver->latency_monitor_threshold) { advise_slowlog_tuning = 1; advices++; @@ -401,11 +401,11 @@ sds createLatencyReport(void) { /* Slow log. */ if (advise_slowlog_enabled) { - report = sdscatprintf(report,"- There are latency issues with potentially slow commands you are using. Try to enable the Slow Log Redis feature using the command 'CONFIG SET slowlog-log-slower-than %llu'. If the Slow log is disabled Redis is not able to log slow commands execution for you.\n", (unsigned long long)server.latency_monitor_threshold*1000); + report = sdscatprintf(report,"- There are latency issues with potentially slow commands you are using. Try to enable the Slow Log Redis feature using the command 'CONFIG SET slowlog-log-slower-than %llu'. If the Slow log is disabled Redis is not able to log slow commands execution for you.\n", (unsigned long long)g_pserver->latency_monitor_threshold*1000); } if (advise_slowlog_tuning) { - report = sdscatprintf(report,"- Your current Slow Log configuration only logs events that are slower than your configured latency monitor threshold. Please use 'CONFIG SET slowlog-log-slower-than %llu'.\n", (unsigned long long)server.latency_monitor_threshold*1000); + report = sdscatprintf(report,"- Your current Slow Log configuration only logs events that are slower than your configured latency monitor threshold. Please use 'CONFIG SET slowlog-log-slower-than %llu'.\n", (unsigned long long)g_pserver->latency_monitor_threshold*1000); } if (advise_slowlog_inspect) { @@ -443,7 +443,7 @@ sds createLatencyReport(void) { report = sdscat(report,"- Assuming from the point of view of data safety this is viable in your environment, you could try to enable the 'no-appendfsync-on-rewrite' option, so that fsync will not be performed while there is a child rewriting the AOF file or producing an RDB file (the moment where there is high disk contention).\n"); } - if (advise_relax_fsync_policy && server.aof_fsync == AOF_FSYNC_ALWAYS) { + if (advise_relax_fsync_policy && g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) { report = sdscat(report,"- Your fsync policy is set to 'always'. It is very hard to get good performances with such a setup, if possible try to relax the fsync policy to 'onesec'.\n"); } @@ -451,7 +451,7 @@ sds createLatencyReport(void) { report = sdscat(report,"- Latency during the AOF atomic rename operation or when the final difference is flushed to the AOF file at the end of the rewrite, sometimes is caused by very high write load, causing the AOF buffer to get very large. If possible try to send less commands to accomplish the same work, or use Lua scripts to group multiple operations into a single EVALSHA call.\n"); } - if (advise_hz && server.hz < 100) { + if (advise_hz && g_pserver->hz < 100) { report = sdscat(report,"- In order to make the Redis keys expiring process more incremental, try to set the 'hz' configuration parameter to 100 using 'CONFIG SET hz 100'.\n"); } @@ -497,8 +497,8 @@ void latencyCommandReplyWithLatestEvents(client *c) { dictIterator *di; dictEntry *de; - addReplyArrayLen(c,dictSize(server.latency_events)); - di = dictGetIterator(server.latency_events); + addReplyArrayLen(c,dictSize(g_pserver->latency_events)); + di = dictGetIterator(g_pserver->latency_events); while((de = dictNext(di)) != NULL) { char *event = (char*)dictGetKey(de); struct latencyTimeSeries *ts = (latencyTimeSeries*)dictGetVal(de); @@ -581,7 +581,7 @@ NULL if (!strcasecmp(szFromObj(c->argv[1]),"history") && c->argc == 3) { /* LATENCY HISTORY */ - ts = (latencyTimeSeries*)dictFetchValue(server.latency_events,ptrFromObj(c->argv[2])); + ts = (latencyTimeSeries*)dictFetchValue(g_pserver->latency_events,ptrFromObj(c->argv[2])); if (ts == NULL) { addReplyArrayLen(c,0); } else { @@ -593,7 +593,7 @@ NULL dictEntry *de; char *event; - de = dictFind(server.latency_events,ptrFromObj(c->argv[2])); + de = dictFind(g_pserver->latency_events,ptrFromObj(c->argv[2])); if (de == NULL) goto nodataerr; ts = (latencyTimeSeries*)dictGetVal(de); event = (char*)dictGetKey(de); diff --git a/src/latency.h b/src/latency.h index e4f31b5d6..0191e487f 100644 --- a/src/latency.h +++ b/src/latency.h @@ -72,7 +72,7 @@ int THPIsEnabled(void); /* Latency monitoring macros. */ /* Start monitoring an event. We just set the current time. */ -#define latencyStartMonitor(var) if (server.latency_monitor_threshold) { \ +#define latencyStartMonitor(var) if (g_pserver->latency_monitor_threshold) { \ var = mstime(); \ } else { \ var = 0; \ @@ -80,14 +80,14 @@ int THPIsEnabled(void); /* End monitoring an event, compute the difference with the current time * to check the amount of time elapsed. */ -#define latencyEndMonitor(var) if (server.latency_monitor_threshold) { \ +#define latencyEndMonitor(var) if (g_pserver->latency_monitor_threshold) { \ var = mstime() - var; \ } /* Add the sample only if the elapsed time is >= to the configured threshold. */ #define latencyAddSampleIfNeeded(event,var) \ - if (server.latency_monitor_threshold && \ - (var) >= server.latency_monitor_threshold) \ + if (g_pserver->latency_monitor_threshold && \ + (var) >= g_pserver->latency_monitor_threshold) \ latencyAddSample((event),(var)); /* Remove time from a nested event. */ diff --git a/src/lazyfree.cpp b/src/lazyfree.cpp index 7f6d9220d..6d56ec86d 100644 --- a/src/lazyfree.cpp +++ b/src/lazyfree.cpp @@ -83,7 +83,7 @@ int dbAsyncDelete(redisDb *db, robj *key) { * field to NULL in order to lazy free it later. */ if (de) { dictFreeUnlinkedEntry(db->pdict,de); - if (server.cluster_enabled) slotToKeyDel(key); + if (g_pserver->cluster_enabled) slotToKeyDel(key); return 1; } else { return 0; @@ -115,11 +115,11 @@ void emptyDbAsync(redisDb *db) { /* Empty the slots-keys map of Redis CLuster by creating a new empty one * and scheduiling the old for lazy freeing. */ void slotToKeyFlushAsync(void) { - rax *old = server.cluster->slots_to_keys; + rax *old = g_pserver->cluster->slots_to_keys; - server.cluster->slots_to_keys = raxNew(); - memset(server.cluster->slots_keys_count,0, - sizeof(server.cluster->slots_keys_count)); + g_pserver->cluster->slots_to_keys = raxNew(); + memset(g_pserver->cluster->slots_keys_count,0, + sizeof(g_pserver->cluster->slots_keys_count)); atomicIncr(lazyfree_objects,old->numele); bioCreateBackgroundJob(BIO_LAZY_FREE,NULL,NULL,old); } diff --git a/src/module.cpp b/src/module.cpp index d2b7cd64d..04ca21a97 100644 --- a/src/module.cpp +++ b/src/module.cpp @@ -30,6 +30,8 @@ #include "server.h" #include "cluster.h" #include +#include +#include #define REDISMODULE_CORE 1 #include "redismodule.h" @@ -55,7 +57,7 @@ struct RedisModule { typedef struct RedisModule RedisModule; /* This represents a shared API. Shared APIs will be used to populate - * the server.sharedapi dictionary, mapping names of APIs exported by + * the g_pserver->sharedapi dictionary, mapping names of APIs exported by * modules for other modules to use, to their structure specifying the * function pointer that can be called. */ struct RedisModuleSharedAPI { @@ -235,7 +237,6 @@ static list *moduleUnblockedClients; /* We need a mutex that is unlocked / relocked in beforeSleep() in order to * allow thread safe contexts to execute commands at a safe moment. */ -static pthread_rwlock_t moduleGIL = PTHREAD_RWLOCK_INITIALIZER; int fModuleGILWlocked = FALSE; /* Function pointer type for keyspace event notification subscriptions from modules. */ @@ -293,6 +294,12 @@ typedef struct RedisModuleCommandFilter { /* Registered filters */ static list *moduleCommandFilters; +/* Module GIL Variables */ +static int s_cAcquisitionsServer = 0; +static int s_cAcquisitionsModule = 0; +static std::mutex s_mutex; +static std::condition_variable s_cv; + /* -------------------------------------------------------------------------- * Prototypes * -------------------------------------------------------------------------- */ @@ -427,8 +434,8 @@ int moduleCreateEmptyKey(RedisModuleKey *key, int type) { switch(type) { case REDISMODULE_KEYTYPE_LIST: obj = createQuicklistObject(); - quicklistSetOptions((quicklist*)obj->m_ptr, server.list_max_ziplist_size, - server.list_compress_depth); + quicklistSetOptions((quicklist*)obj->m_ptr, g_pserver->list_max_ziplist_size, + g_pserver->list_compress_depth); break; case REDISMODULE_KEYTYPE_ZSET: obj = createZsetZiplistObject(); @@ -492,7 +499,7 @@ int moduleDelKeyIfEmpty(RedisModuleKey *key) { * This function is not meant to be used by modules developer, it is only * used implicitly by including redismodule.h. */ int RM_GetApi(const char *funcname, void **targetPtrPtr) { - dictEntry *he = dictFind(server.moduleapi, funcname); + dictEntry *he = dictFind(g_pserver->moduleapi, funcname); if (!he) return REDISMODULE_ERR; *targetPtrPtr = dictGetVal(he); return REDISMODULE_OK; @@ -528,7 +535,7 @@ void moduleHandlePropagationAfterCommandCallback(RedisModuleCtx *ctx) { if (ctx->flags & REDISMODULE_CTX_MULTI_EMITTED) { robj *propargv[1]; propargv[0] = createStringObject("EXEC",4); - alsoPropagate(server.execCommand,c->db->id,propargv,1, + alsoPropagate(cserver.execCommand,c->db->id,propargv,1, PROPAGATE_AOF|PROPAGATE_REPL); decrRefCount(propargv[0]); } @@ -700,7 +707,7 @@ int commandFlagsFromString(char *s) { int RM_CreateCommand(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc cmdfunc, const char *strflags, int firstkey, int lastkey, int keystep) { int flags = strflags ? commandFlagsFromString((char*)strflags) : 0; if (flags == -1) return REDISMODULE_ERR; - if ((flags & CMD_MODULE_NO_CLUSTER) && server.cluster_enabled) + if ((flags & CMD_MODULE_NO_CLUSTER) && g_pserver->cluster_enabled) return REDISMODULE_ERR; struct redisCommand *rediscmd; @@ -734,8 +741,8 @@ int RM_CreateCommand(RedisModuleCtx *ctx, const char *name, RedisModuleCmdFunc c cp->rediscmd->keystep = keystep; cp->rediscmd->microseconds = 0; cp->rediscmd->calls = 0; - dictAdd(server.commands,sdsdup(cmdname),cp->rediscmd); - dictAdd(server.orig_commands,sdsdup(cmdname),cp->rediscmd); + dictAdd(g_pserver->commands,sdsdup(cmdname),cp->rediscmd); + dictAdd(g_pserver->orig_commands,sdsdup(cmdname),cp->rediscmd); cp->rediscmd->id = ACLGetCommandID(cmdname); /* ID used for ACL. */ return REDISMODULE_OK; } @@ -1355,7 +1362,7 @@ int RM_Replicate(RedisModuleCtx *ctx, const char *cmdname, const char *fmt, ...) /* Release the argv. */ for (j = 0; j < argc; j++) decrRefCount(argv[j]); zfree(argv); - server.dirty++; + g_pserver->dirty++; return REDISMODULE_OK; } @@ -1374,7 +1381,7 @@ int RM_ReplicateVerbatim(RedisModuleCtx *ctx) { alsoPropagate(ctx->client->cmd,ctx->client->db->id, ctx->client->argv,ctx->client->argc, PROPAGATE_AOF|PROPAGATE_REPL); - server.dirty++; + g_pserver->dirty++; return REDISMODULE_OK; } @@ -1454,29 +1461,29 @@ int RM_GetContextFlags(RedisModuleCtx *ctx) { flags |= REDISMODULE_CTX_FLAGS_REPLICATED; } - if (server.cluster_enabled) + if (g_pserver->cluster_enabled) flags |= REDISMODULE_CTX_FLAGS_CLUSTER; /* Maxmemory and eviction policy */ - if (server.maxmemory > 0) { + if (g_pserver->maxmemory > 0) { flags |= REDISMODULE_CTX_FLAGS_MAXMEMORY; - if (server.maxmemory_policy != MAXMEMORY_NO_EVICTION) + if (g_pserver->maxmemory_policy != MAXMEMORY_NO_EVICTION) flags |= REDISMODULE_CTX_FLAGS_EVICT; } /* Persistence flags */ - if (server.aof_state != AOF_OFF) + if (g_pserver->aof_state != AOF_OFF) flags |= REDISMODULE_CTX_FLAGS_AOF; - if (server.saveparamslen > 0) + if (g_pserver->saveparamslen > 0) flags |= REDISMODULE_CTX_FLAGS_RDB; /* Replication flags */ - if (listLength(server.masters) == 0) { + if (listLength(g_pserver->masters) == 0) { flags |= REDISMODULE_CTX_FLAGS_MASTER; } else { flags |= REDISMODULE_CTX_FLAGS_SLAVE; - if (server.repl_slave_ro) + if (g_pserver->repl_slave_ro) flags |= REDISMODULE_CTX_FLAGS_READONLY; } @@ -2750,7 +2757,7 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch int replicate = 0; /* Replicate this command? */ int call_flags; sds proto = nullptr; - + /* Create the client and dispatch the command. */ va_start(ap, fmt); c = createClient(-1, IDX_EVENT_LOOP_MAIN); @@ -2792,12 +2799,12 @@ RedisModuleCallReply *RM_Call(RedisModuleCtx *ctx, const char *cmdname, const ch /* If this is a Redis Cluster node, we need to make sure the module is not * trying to access non-local keys, with the exception of commands * received from our master. */ - if (server.cluster_enabled && !(ctx->client->flags & CLIENT_MASTER)) { + if (g_pserver->cluster_enabled && !(ctx->client->flags & CLIENT_MASTER)) { /* Duplicate relevant flags in the module client. */ c->flags &= ~(CLIENT_READONLY|CLIENT_ASKING); c->flags |= ctx->client->flags & (CLIENT_READONLY|CLIENT_ASKING); if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,NULL) != - server.cluster->myself) + g_pserver->cluster->myself) { errno = EPERM; goto cleanup; @@ -3507,7 +3514,7 @@ void RM_LogRaw(RedisModule *module, const char *levelstr, const char *fmt, va_li else if (!strcasecmp(levelstr,"warning")) level = LL_WARNING; else level = LL_VERBOSE; /* Default. */ - if (level < server.verbosity) return; + if (level < cserver.verbosity) return; name_len = snprintf(msg, sizeof(msg),"<%s> ", module->name); vsnprintf(msg + name_len, sizeof(msg) - name_len, fmt, ap); @@ -3663,9 +3670,22 @@ int RM_UnblockClient(RedisModuleBlockedClient *bc, void *privdata) { pthread_mutex_lock(&moduleUnblockedClientsMutex); bc->privdata = privdata; listAddNodeTail(moduleUnblockedClients,bc); - if (write(server.module_blocked_pipe[1],"A",1) != 1) { - /* Ignore the error, this is best-effort. */ + if (bc->client != nullptr) + { + if (write(g_pserver->rgthreadvar[bc->client->iel].module_blocked_pipe[1],"A",1) != 1) { + /* Ignore the error, this is best-effort. */ + } + } + else + { + for (int iel = 0; iel < cserver.cthreads; ++iel) + { + if (write(g_pserver->rgthreadvar[iel].module_blocked_pipe[1],"A",1) != 1) { + /* Ignore the error, this is best-effort. */ + } + } } + pthread_mutex_unlock(&moduleUnblockedClientsMutex); return REDISMODULE_OK; } @@ -3706,8 +3726,7 @@ void RM_SetDisconnectCallback(RedisModuleBlockedClient *bc, RedisModuleDisconnec * blocked client, it was terminated by Redis (for timeout or other reasons). * When this happens the RedisModuleBlockedClient structure in the queue * will have the 'client' field set to NULL. */ -void moduleHandleBlockedClients(void) { - listNode *ln; +void moduleHandleBlockedClients(int iel) { RedisModuleBlockedClient *bc; serverAssert(GlobalLocksAcquired()); @@ -3715,12 +3734,16 @@ void moduleHandleBlockedClients(void) { /* Here we unblock all the pending clients blocked in modules operations * so we can read every pending "awake byte" in the pipe. */ char buf[1]; - while (read(server.module_blocked_pipe[0],buf,1) == 1); - while (listLength(moduleUnblockedClients)) { - ln = listFirst(moduleUnblockedClients); + while (read(serverTL->module_blocked_pipe[0],buf,1) == 1); + listIter li; + listNode *ln; + listRewind(moduleUnblockedClients, &li); + while ((ln = listNext(&li))) { bc = (RedisModuleBlockedClient*)ln->value; client *c = bc->client; - serverAssert(c->iel == IDX_EVENT_LOOP_MAIN); + if ((c != nullptr) && (iel != c->iel)) + continue; + listDelNode(moduleUnblockedClients,ln); pthread_mutex_unlock(&moduleUnblockedClientsMutex); @@ -3788,9 +3811,9 @@ void moduleHandleBlockedClients(void) { c->flags |= CLIENT_PENDING_WRITE; AssertCorrectThread(c); - fastlock_lock(&server.rgthreadvar[c->iel].lockPendingWrite); - listAddNodeHead(server.rgthreadvar[c->iel].clients_pending_write,c); - fastlock_unlock(&server.rgthreadvar[c->iel].lockPendingWrite); + fastlock_lock(&g_pserver->rgthreadvar[c->iel].lockPendingWrite); + listAddNodeHead(g_pserver->rgthreadvar[c->iel].clients_pending_write,c); + fastlock_unlock(&g_pserver->rgthreadvar[c->iel].lockPendingWrite); } } @@ -3919,23 +3942,36 @@ void RM_ThreadSafeContextUnlock(RedisModuleCtx *ctx) { } void moduleAcquireGIL(int fServerThread) { + std::unique_lock lock(s_mutex); + int *pcheck = fServerThread ? &s_cAcquisitionsModule : &s_cAcquisitionsServer; + + while (*pcheck > 0) + s_cv.wait(lock); + if (fServerThread) { - pthread_rwlock_rdlock(&moduleGIL); + ++s_cAcquisitionsServer; } else { - pthread_rwlock_wrlock(&moduleGIL); - fModuleGILWlocked = TRUE; + ++s_cAcquisitionsModule; + fModuleGILWlocked++; } } void moduleReleaseGIL(int fServerThread) { - pthread_rwlock_unlock(&moduleGIL); - if (!fServerThread) + std::unique_lock lock(s_mutex); + + if (fServerThread) + { + --s_cAcquisitionsServer; + } + else { - fModuleGILWlocked = FALSE; + --s_cAcquisitionsModule; + fModuleGILWlocked--; } + s_cv.notify_all(); } int moduleGILAcquiredByModule(void) { @@ -4107,7 +4143,7 @@ void moduleCallClusterReceivers(const char *sender_id, uint64_t module_id, uint8 * is already a callback for this function, the callback is unregistered * (so this API call is also used in order to delete the receiver). */ void RM_RegisterClusterMessageReceiver(RedisModuleCtx *ctx, uint8_t type, RedisModuleClusterMessageReceiver callback) { - if (!server.cluster_enabled) return; + if (!g_pserver->cluster_enabled) return; uint64_t module_id = moduleTypeEncodeId(ctx->module->name,0); moduleClusterReceiver *r = clusterReceivers[type], *prev = NULL; @@ -4151,7 +4187,7 @@ void RM_RegisterClusterMessageReceiver(RedisModuleCtx *ctx, uint8_t type, RedisM * otherwise if the node is not connected or such node ID does not map to any * known cluster node, REDISMODULE_ERR is returned. */ int RM_SendClusterMessage(RedisModuleCtx *ctx, char *target_id, uint8_t type, unsigned char *msg, uint32_t len) { - if (!server.cluster_enabled) return REDISMODULE_ERR; + if (!g_pserver->cluster_enabled) return REDISMODULE_ERR; uint64_t module_id = moduleTypeEncodeId(ctx->module->name,0); if (clusterSendModuleMessageToTarget(target_id,module_id,type,msg,len) == C_OK) return REDISMODULE_OK; @@ -4184,10 +4220,10 @@ int RM_SendClusterMessage(RedisModuleCtx *ctx, char *target_id, uint8_t type, un char **RM_GetClusterNodesList(RedisModuleCtx *ctx, size_t *numnodes) { UNUSED(ctx); - if (!server.cluster_enabled) return NULL; - size_t count = dictSize(server.cluster->nodes); + if (!g_pserver->cluster_enabled) return NULL; + size_t count = dictSize(g_pserver->cluster->nodes); char **ids = (char**)zmalloc((count+1)*REDISMODULE_NODE_ID_LEN, MALLOC_LOCAL); - dictIterator *di = dictGetIterator(server.cluster->nodes); + dictIterator *di = dictGetIterator(g_pserver->cluster->nodes); dictEntry *de; int j = 0; while((de = dictNext(di)) != NULL) { @@ -4214,8 +4250,8 @@ void RM_FreeClusterNodesList(char **ids) { /* Return this node ID (REDISMODULE_CLUSTER_ID_LEN bytes) or NULL if the cluster * is disabled. */ const char *RM_GetMyClusterID(void) { - if (!server.cluster_enabled) return NULL; - return server.cluster->myself->name; + if (!g_pserver->cluster_enabled) return NULL; + return g_pserver->cluster->myself->name; } /* Return the number of nodes in the cluster, regardless of their state @@ -4223,8 +4259,8 @@ const char *RM_GetMyClusterID(void) { * be smaller, but not greater than this number. If the instance is not in * cluster mode, zero is returned. */ size_t RM_GetClusterSize(void) { - if (!server.cluster_enabled) return 0; - return dictSize(server.cluster->nodes); + if (!g_pserver->cluster_enabled) return 0; + return dictSize(g_pserver->cluster->nodes); } /* Populate the specified info for the node having as ID the specified 'id', @@ -4304,9 +4340,9 @@ int RM_GetClusterNodeInfo(RedisModuleCtx *ctx, const char *id, char *ip, char *m void RM_SetClusterFlags(RedisModuleCtx *ctx, uint64_t flags) { UNUSED(ctx); if (flags & REDISMODULE_CLUSTER_FLAG_NO_FAILOVER) - server.cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_FAILOVER; + g_pserver->cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_FAILOVER; if (flags & REDISMODULE_CLUSTER_FLAG_NO_REDIRECTION) - server.cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_REDIRECTION; + g_pserver->cluster_module_flags |= CLUSTER_MODULE_FLAG_NO_REDIRECTION; } /* -------------------------------------------------------------------------- @@ -4415,7 +4451,7 @@ RedisModuleTimerID RM_CreateTimer(RedisModuleCtx *ctx, mstime_t period, RedisMod if (memcmp(ri.key,&key,sizeof(key)) == 0) { /* This is the first key, we need to re-install the timer according * to the just added event. */ - aeDeleteTimeEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,aeTimer); + aeDeleteTimeEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,aeTimer); aeTimer = -1; } raxStop(&ri); @@ -4424,7 +4460,7 @@ RedisModuleTimerID RM_CreateTimer(RedisModuleCtx *ctx, mstime_t period, RedisMod /* If we have no main timer (the old one was invalidated, or this is the * first module timer we have), install one. */ if (aeTimer == -1) - aeTimer = aeCreateTimeEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,period,moduleTimerHandler,NULL,NULL); + aeTimer = aeCreateTimeEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,period,moduleTimerHandler,NULL,NULL); return key; } @@ -4752,7 +4788,7 @@ int RM_ExportSharedAPI(RedisModuleCtx *ctx, const char *apiname, void *func) { RedisModuleSharedAPI *sapi = (RedisModuleSharedAPI*)zmalloc(sizeof(*sapi), MALLOC_LOCAL); sapi->module = ctx->module; sapi->func = func; - if (dictAdd(server.sharedapi, (char*)apiname, sapi) != DICT_OK) { + if (dictAdd(g_pserver->sharedapi, (char*)apiname, sapi) != DICT_OK) { zfree(sapi); return REDISMODULE_ERR; } @@ -4793,7 +4829,7 @@ int RM_ExportSharedAPI(RedisModuleCtx *ctx, const char *apiname, void *func) { * } */ void *RM_GetSharedAPI(RedisModuleCtx *ctx, const char *apiname) { - dictEntry *de = dictFind(server.sharedapi, apiname); + dictEntry *de = dictFind(g_pserver->sharedapi, apiname); if (de == NULL) return NULL; RedisModuleSharedAPI *sapi = (RedisModuleSharedAPI*)dictGetVal(de); if (listSearchKey(sapi->module->usedby,ctx->module) == NULL) { @@ -4811,13 +4847,13 @@ void *RM_GetSharedAPI(RedisModuleCtx *ctx, const char *apiname) { * The number of unregistered APIs is returned. */ int moduleUnregisterSharedAPI(RedisModule *module) { int count = 0; - dictIterator *di = dictGetSafeIterator(server.sharedapi); + dictIterator *di = dictGetSafeIterator(g_pserver->sharedapi); dictEntry *de; while ((de = dictNext(di)) != NULL) { const char *apiname = (const char*)dictGetKey(de); RedisModuleSharedAPI *sapi = (RedisModuleSharedAPI*)dictGetVal(de); if (sapi->module == module) { - dictDelete(server.sharedapi,apiname); + dictDelete(g_pserver->sharedapi,apiname); zfree(sapi); count++; } @@ -5056,7 +5092,7 @@ int RM_CommandFilterArgDelete(RedisModuleCommandFilterCtx *fctx, int pos) * Modules API internals * -------------------------------------------------------------------------- */ -/* server.moduleapi dictionary type. Only uses plain C strings since +/* g_pserver->moduleapi dictionary type. Only uses plain C strings since * this gets queries from modules. */ uint64_t dictCStringKeyHash(const void *key) { @@ -5078,7 +5114,7 @@ dictType moduleAPIDictType = { }; extern "C" int moduleRegisterApi(const char *funcname, void *funcptr) { - return dictAdd(server.moduleapi, (char*)funcname, funcptr); + return dictAdd(g_pserver->moduleapi, (char*)funcname, funcptr); } #define REGISTER_API(name) \ @@ -5089,7 +5125,7 @@ void moduleRegisterCoreAPI(void); void moduleInitModulesSystem(void) { moduleUnblockedClients = listCreate(); - server.loadmodule_queue = listCreate(); + g_pserver->loadmodule_queue = listCreate(); modules = dictCreate(&modulesDictType,NULL); /* Set up the keyspace notification susbscriber list and static client */ @@ -5102,27 +5138,16 @@ void moduleInitModulesSystem(void) { moduleCommandFilters = listCreate(); moduleRegisterCoreAPI(); - if (pipe(server.module_blocked_pipe) == -1) { - serverLog(LL_WARNING, - "Can't create the pipe for module blocking commands: %s", - strerror(errno)); - exit(1); - } - /* Make the pipe non blocking. This is just a best effort aware mechanism - * and we do not want to block not in the read nor in the write half. */ - anetNonBlock(NULL,server.module_blocked_pipe[0]); - anetNonBlock(NULL,server.module_blocked_pipe[1]); /* Create the timers radix tree. */ Timers = raxNew(); /* Our thread-safe contexts GIL must start with already locked: * it is just unlocked when it's safe. */ - pthread_rwlock_init(&moduleGIL, NULL); - pthread_rwlock_rdlock(&moduleGIL); + moduleAcquireGIL(true); } -/* Load all the modules in the server.loadmodule_queue list, which is +/* Load all the modules in the g_pserver->loadmodule_queue list, which is * populated by `loadmodule` directives in the configuration file. * We can't load modules directly when processing the configuration file * because the server must be fully initialized before loading modules. @@ -5135,7 +5160,7 @@ void moduleLoadFromQueue(void) { listIter li; listNode *ln; - listRewind(server.loadmodule_queue,&li); + listRewind(g_pserver->loadmodule_queue,&li); while((ln = listNext(&li))) { struct moduleLoadQueueEntry *loadmod = (moduleLoadQueueEntry*)ln->value; if (moduleLoad(loadmod->path,(void **)loadmod->argv,loadmod->argc) @@ -5158,7 +5183,7 @@ void moduleFreeModuleStructure(struct RedisModule *module) { void moduleUnregisterCommands(struct RedisModule *module) { /* Unregister all the commands registered by this module. */ - dictIterator *di = dictGetSafeIterator(server.commands); + dictIterator *di = dictGetSafeIterator(g_pserver->commands); dictEntry *de; while ((de = dictNext(di)) != NULL) { struct redisCommand *cmd = (redisCommand*)dictGetVal(de); @@ -5167,8 +5192,8 @@ void moduleUnregisterCommands(struct RedisModule *module) { (RedisModuleCommandProxy*)(unsigned long)cmd->getkeys_proc; sds cmdname = (sds)cp->rediscmd->name; if (cp->module == module) { - dictDelete(server.commands,cmdname); - dictDelete(server.orig_commands,cmdname); + dictDelete(g_pserver->commands,cmdname); + dictDelete(g_pserver->orig_commands,cmdname); sdsfree(cmdname); zfree(cp->rediscmd); zfree(cp); @@ -5353,8 +5378,8 @@ size_t moduleCount(void) { /* Register all the APIs we export. Keep this function at the end of the * file so that's easy to seek it to add new entries. */ void moduleRegisterCoreAPI(void) { - server.moduleapi = dictCreate(&moduleAPIDictType,NULL); - server.sharedapi = dictCreate(&moduleAPIDictType,NULL); + g_pserver->moduleapi = dictCreate(&moduleAPIDictType,NULL); + g_pserver->sharedapi = dictCreate(&moduleAPIDictType,NULL); REGISTER_API(Alloc); REGISTER_API(Calloc); REGISTER_API(Realloc); diff --git a/src/multi.cpp b/src/multi.cpp index caf8555b2..262a8f1d0 100644 --- a/src/multi.cpp +++ b/src/multi.cpp @@ -111,7 +111,7 @@ void discardCommand(client *c) { void execCommandPropagateMulti(client *c) { robj *multistring = createStringObject("MULTI",5); - propagate(server.multiCommand,c->db->id,&multistring,1, + propagate(cserver.multiCommand,c->db->id,&multistring,1, PROPAGATE_AOF|PROPAGATE_REPL); decrRefCount(multistring); } @@ -122,7 +122,7 @@ void execCommand(client *c) { int orig_argc; struct redisCommand *orig_cmd; int must_propagate = 0; /* Need to propagate MULTI/EXEC to AOF / slaves? */ - int was_master = listLength(server.masters) == 0; + int was_master = listLength(g_pserver->masters) == 0; if (!(c->flags & CLIENT_MULTI)) { addReplyError(c,"EXEC without MULTI"); @@ -147,7 +147,7 @@ void execCommand(client *c) { * was initiated when the instance was a master or a writable replica and * then the configuration changed (for example instance was turned into * a replica). */ - if (!server.loading && listLength(server.masters) && server.repl_slave_ro && + if (!g_pserver->loading && listLength(g_pserver->masters) && g_pserver->repl_slave_ro && !(c->flags & CLIENT_MASTER) && c->mstate.cmd_flags & CMD_WRITE) { addReplyError(c, @@ -178,7 +178,7 @@ void execCommand(client *c) { must_propagate = 1; } - call(c,server.loading ? CMD_CALL_NONE : CMD_CALL_FULL); + call(c,g_pserver->loading ? CMD_CALL_NONE : CMD_CALL_FULL); /* Commands may alter argc/argv, restore mstate. */ c->mstate.commands[j].argc = c->argc; @@ -193,14 +193,14 @@ void execCommand(client *c) { /* Make sure the EXEC command will be propagated as well if MULTI * was already propagated. */ if (must_propagate) { - int is_master = listLength(server.masters) == 0; - server.dirty++; + int is_master = listLength(g_pserver->masters) == 0; + g_pserver->dirty++; /* If inside the MULTI/EXEC block this instance was suddenly * switched from master to slave (using the SLAVEOF command), the * initial MULTI was propagated into the replication backlog, but the * rest was not. We need to make sure to at least terminate the * backlog with the final EXEC. */ - if (server.repl_backlog && was_master && !is_master) { + if (g_pserver->repl_backlog && was_master && !is_master) { const char *execcmd = "*1\r\n$4\r\nEXEC\r\n"; feedReplicationBacklog(execcmd,strlen(execcmd)); } @@ -212,8 +212,8 @@ void execCommand(client *c) { * MUTLI, EXEC, ... commands inside transaction ... * Instead EXEC is flagged as CMD_SKIP_MONITOR in the command * table, and we do it here with correct ordering. */ - if (listLength(server.monitors) && !server.loading) - replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc); + if (listLength(g_pserver->monitors) && !g_pserver->loading) + replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc); } /* ===================== WATCH (CAS alike for MULTI/EXEC) =================== @@ -323,7 +323,7 @@ void touchWatchedKeysOnFlush(int dbid) { serverAssert(GlobalLocksAcquired()); /* For every client, check all the waited keys */ - listRewind(server.clients,&li1); + listRewind(g_pserver->clients,&li1); while((ln = listNext(&li1))) { client *c = (client*)listNodeValue(ln); listRewind(c->watched_keys,&li2); diff --git a/src/networking.cpp b/src/networking.cpp index 88cc7afb2..29d17c8a3 100644 --- a/src/networking.cpp +++ b/src/networking.cpp @@ -133,14 +133,14 @@ int listMatchObjects(void *a, void *b) { /* This function links the client to the global linked list of clients. * unlinkClient() does the opposite, among other things. */ void linkClient(client *c) { - listAddNodeTail(server.clients,c); + listAddNodeTail(g_pserver->clients,c); /* Note that we remember the linked list node where the client is stored, * this way removing the client in unlinkClient() will not require * a linear scan, but just a constant time operation. */ - c->client_list_node = listLast(server.clients); - if (c->fd != -1) atomicIncr(server.rgthreadvar[c->iel].cclients, 1); + c->client_list_node = listLast(g_pserver->clients); + if (c->fd != -1) atomicIncr(g_pserver->rgthreadvar[c->iel].cclients, 1); uint64_t id = htonu64(c->id); - raxInsert(server.clients_index,(unsigned char*)&id,sizeof(id),c,NULL); + raxInsert(g_pserver->clients_index,(unsigned char*)&id,sizeof(id),c,NULL); } client *createClient(int fd, int iel) { @@ -154,9 +154,9 @@ client *createClient(int fd, int iel) { if (fd != -1) { anetNonBlock(NULL,fd); anetEnableTcpNoDelay(NULL,fd); - if (server.tcpkeepalive) - anetKeepAlive(NULL,fd,server.tcpkeepalive); - if (aeCreateFileEvent(server.rgthreadvar[iel].el,fd,AE_READABLE|AE_READ_THREADSAFE, + if (cserver.tcpkeepalive) + anetKeepAlive(NULL,fd,cserver.tcpkeepalive); + if (aeCreateFileEvent(g_pserver->rgthreadvar[iel].el,fd,AE_READABLE|AE_READ_THREADSAFE, readQueryFromClient, c) == AE_ERR) { close(fd); @@ -167,7 +167,7 @@ client *createClient(int fd, int iel) { selectDb(c,0); uint64_t client_id; - atomicGetIncr(server.next_client_id,client_id,1); + atomicGetIncr(g_pserver->next_client_id,client_id,1); c->iel = iel; fastlock_init(&c->lock); c->id = client_id; @@ -190,7 +190,7 @@ client *createClient(int fd, int iel) { c->sentlenAsync = 0; c->flags = 0; c->fPendingAsyncWrite = FALSE; - c->ctime = c->lastinteraction = server.unixtime; + c->ctime = c->lastinteraction = g_pserver->unixtime; /* If the default user does not require authentication, the user is * directly authenticated. */ c->authenticated = (c->puser->flags & USER_FLAG_NOPASS) != 0; @@ -260,8 +260,8 @@ void clientInstallWriteHandler(client *c) { * a system call. We'll only really install the write handler if * we'll not be able to write the whole reply at once. */ c->flags |= CLIENT_PENDING_WRITE; - std::unique_lock lockf(server.rgthreadvar[c->iel].lockPendingWrite); - listAddNodeHead(server.rgthreadvar[c->iel].clients_pending_write,c); + std::unique_lock lockf(g_pserver->rgthreadvar[c->iel].lockPendingWrite); + listAddNodeHead(g_pserver->rgthreadvar[c->iel].clients_pending_write,c); } } @@ -1042,7 +1042,7 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) { #ifdef HAVE_SO_INCOMING_CPU // Set thread affinity - if (server.fThreadAffinity) + if (cserver.fThreadAffinity) { int cpu = iel; if (setsockopt(fd, SOL_SOCKET, SO_INCOMING_CPU, &cpu, sizeof(iel)) != 0) @@ -1056,14 +1056,14 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) { * connection. Note that we create the client instead to check before * for this condition, since now the socket is already set in non-blocking * mode and we can send an error for free using the Kernel I/O */ - if (listLength(server.clients) > server.maxclients) { + if (listLength(g_pserver->clients) > g_pserver->maxclients) { const char *err = "-ERR max number of clients reached\r\n"; /* That's a best effort error message, don't check write errors */ if (write(c->fd,err,strlen(err)) == -1) { /* Nothing to do, Just to avoid the warning... */ } - server.stat_rejected_conn++; + g_pserver->stat_rejected_conn++; freeClient(c); return; } @@ -1072,8 +1072,8 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) { * is no password set, nor a specific interface is bound, we don't accept * requests from non loopback interfaces. Instead we try to explain the * user what to do to fix it if needed. */ - if (server.protected_mode && - server.bindaddr_count == 0 && + if (g_pserver->protected_mode && + g_pserver->bindaddr_count == 0 && DefaultUser->flags & USER_FLAG_NOPASS && !(flags & CLIENT_UNIX_SOCKET) && ip != NULL) @@ -1094,7 +1094,7 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) { "change permanent. " "2) Alternatively you can just disable the protected mode by " "editing the Redis configuration file, and setting the protected " - "mode option to 'no', and then restarting the server. " + "mode option to 'no', and then restarting the g_pserver-> " "3) If you started the server manually just for testing, restart " "it with the '--protected-mode no' option. " "4) Setup a bind address or an authentication password. " @@ -1103,13 +1103,13 @@ static void acceptCommonHandler(int fd, int flags, char *ip, int iel) { if (write(c->fd,err,strlen(err)) == -1) { /* Nothing to do, Just to avoid the warning... */ } - server.stat_rejected_conn++; + g_pserver->stat_rejected_conn++; freeClient(c); return; } } - server.stat_numconnections++; + g_pserver->stat_numconnections++; c->flags |= flags; } @@ -1120,11 +1120,11 @@ void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask) { UNUSED(privdata); while(max--) { - cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); + cfd = anetTcpAccept(g_pserver->neterr, fd, cip, sizeof(cip), &cport); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) serverLog(LL_WARNING, - "Accepting client connection: %s", server.neterr); + "Accepting client connection: %s", g_pserver->neterr); return; } serverLog(LL_VERBOSE,"Accepted %s:%d", cip, cport); @@ -1144,15 +1144,15 @@ void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask) { UNUSED(privdata); while(max--) { - cfd = anetUnixAccept(server.neterr, fd); + cfd = anetUnixAccept(g_pserver->neterr, fd); if (cfd == ANET_ERR) { if (errno != EWOULDBLOCK) serverLog(LL_WARNING, - "Accepting client connection: %s", server.neterr); + "Accepting client connection: %s", g_pserver->neterr); return; } int ielCur = ielFromEventLoop(el); - serverLog(LL_VERBOSE,"Accepted connection to %s", server.unixsocket); + serverLog(LL_VERBOSE,"Accepted connection to %s", g_pserver->unixsocket); aeAcquireLock(); acceptCommonHandler(cfd,CLIENT_UNIX_SOCKET,NULL, ielCur); @@ -1175,7 +1175,7 @@ void disconnectSlavesExcept(unsigned char *uuid) listIter li; listNode *ln; - listRewind(server.slaves, &li); + listRewind(g_pserver->slaves, &li); while ((ln = listNext(&li))) { client *c = (client*)listNodeValue(ln); if (uuid == nullptr || !FUuidEqual(c->uuid, uuid)) @@ -1200,7 +1200,7 @@ void unlinkClient(client *c) { serverAssert(c->fd == -1 || c->lock.fOwnLock()); /* If this is marked as current client unset it. */ - if (server.current_client == c) server.current_client = NULL; + if (serverTL && serverTL->current_client == c) serverTL->current_client = NULL; /* Certain operations must be done only if the client has an active socket. * If the client was already unlinked or if it's a "fake client" the @@ -1209,8 +1209,8 @@ void unlinkClient(client *c) { /* Remove from the list of active clients. */ if (c->client_list_node) { uint64_t id = htonu64(c->id); - raxRemove(server.clients_index,(unsigned char*)&id,sizeof(id),NULL); - listDelNode(server.clients,c->client_list_node); + raxRemove(g_pserver->clients_index,(unsigned char*)&id,sizeof(id),NULL); + listDelNode(g_pserver->clients,c->client_list_node); c->client_list_node = NULL; } @@ -1225,42 +1225,42 @@ void unlinkClient(client *c) { } /* Unregister async I/O handlers and close the socket. */ - aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_READABLE); - aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_WRITABLE); + aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_READABLE); + aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_WRITABLE); close(c->fd); c->fd = -1; - atomicDecr(server.rgthreadvar[c->iel].cclients, 1); + atomicDecr(g_pserver->rgthreadvar[c->iel].cclients, 1); } /* Remove from the list of pending writes if needed. */ if (c->flags & CLIENT_PENDING_WRITE) { - std::unique_lock lockf(server.rgthreadvar[c->iel].lockPendingWrite); - ln = listSearchKey(server.rgthreadvar[c->iel].clients_pending_write,c); + std::unique_lock lockf(g_pserver->rgthreadvar[c->iel].lockPendingWrite); + ln = listSearchKey(g_pserver->rgthreadvar[c->iel].clients_pending_write,c); serverAssert(ln != NULL); - listDelNode(server.rgthreadvar[c->iel].clients_pending_write,ln); + listDelNode(g_pserver->rgthreadvar[c->iel].clients_pending_write,ln); c->flags &= ~CLIENT_PENDING_WRITE; } /* When client was just unblocked because of a blocking operation, * remove it from the list of unblocked clients. */ if (c->flags & CLIENT_UNBLOCKED) { - ln = listSearchKey(server.rgthreadvar[c->iel].unblocked_clients,c); + ln = listSearchKey(g_pserver->rgthreadvar[c->iel].unblocked_clients,c); serverAssert(ln != NULL); - listDelNode(server.rgthreadvar[c->iel].unblocked_clients,ln); + listDelNode(g_pserver->rgthreadvar[c->iel].unblocked_clients,ln); c->flags &= ~CLIENT_UNBLOCKED; } if (c->fPendingAsyncWrite) { ln = NULL; bool fFound = false; - for (int iel = 0; iel < server.cthreads; ++iel) + for (int iel = 0; iel < cserver.cthreads; ++iel) { - ln = listSearchKey(server.rgthreadvar[iel].clients_pending_asyncwrite,c); + ln = listSearchKey(g_pserver->rgthreadvar[iel].clients_pending_asyncwrite,c); if (ln) { fFound = true; - listDelNode(server.rgthreadvar[iel].clients_pending_asyncwrite,ln); + listDelNode(g_pserver->rgthreadvar[iel].clients_pending_asyncwrite,ln); } } serverAssert(fFound); @@ -1338,15 +1338,15 @@ void freeClient(client *c) { if (c->repldbfd != -1) close(c->repldbfd); if (c->replpreamble) sdsfree(c->replpreamble); } - list *l = (c->flags & CLIENT_MONITOR) ? server.monitors : server.slaves; + list *l = (c->flags & CLIENT_MONITOR) ? g_pserver->monitors : g_pserver->slaves; ln = listSearchKey(l,c); serverAssert(ln != NULL); listDelNode(l,ln); /* We need to remember the time when we started to have zero * attached slaves, as after some time we'll free the replication * backlog. */ - if (c->flags & CLIENT_SLAVE && listLength(server.slaves) == 0) - server.repl_no_slaves_since = server.unixtime; + if (c->flags & CLIENT_SLAVE && listLength(g_pserver->slaves) == 0) + g_pserver->repl_no_slaves_since = g_pserver->unixtime; refreshGoodSlavesCount(); } @@ -1357,9 +1357,9 @@ void freeClient(client *c) { /* If this client was scheduled for async freeing we need to remove it * from the queue. */ if (c->flags & CLIENT_CLOSE_ASAP) { - ln = listSearchKey(server.clients_to_close,c); + ln = listSearchKey(g_pserver->clients_to_close,c); serverAssert(ln != NULL); - listDelNode(server.clients_to_close,ln); + listDelNode(g_pserver->clients_to_close,ln); } /* Release other dynamically allocated client structure fields, @@ -1384,13 +1384,13 @@ void freeClientAsync(client *c) { lock.arm(nullptr); std::lock_guardlock)> clientlock(c->lock); c->flags |= CLIENT_CLOSE_ASAP; - listAddNodeTail(server.clients_to_close,c); + listAddNodeTail(g_pserver->clients_to_close,c); } void freeClientsInAsyncFreeQueue(int iel) { listIter li; listNode *ln; - listRewind(server.clients_to_close,&li); + listRewind(g_pserver->clients_to_close,&li); while((ln = listNext(&li))) { client *c = (client*)listNodeValue(ln); @@ -1399,8 +1399,8 @@ void freeClientsInAsyncFreeQueue(int iel) { c->flags &= ~CLIENT_CLOSE_ASAP; freeClient(c); - listDelNode(server.clients_to_close,ln); - listRewind(server.clients_to_close,&li); + listDelNode(g_pserver->clients_to_close,ln); + listRewind(g_pserver->clients_to_close,&li); } } @@ -1409,7 +1409,7 @@ void freeClientsInAsyncFreeQueue(int iel) { * are not registered clients. */ client *lookupClientByID(uint64_t id) { id = htonu64(id); - client *c = (client*)raxFind(server.clients_index,(unsigned char*)&id,sizeof(id)); + client *c = (client*)raxFind(g_pserver->clients_index,(unsigned char*)&id,sizeof(id)); return (c == raxNotFound) ? NULL : c; } @@ -1475,12 +1475,12 @@ int writeToClient(int fd, client *c, int handler_installed) { * a slave (otherwise, on high-speed traffic, the replication * buffer will grow indefinitely) */ if (totwritten > NET_MAX_WRITES_PER_EVENT && - (server.maxmemory == 0 || - zmalloc_used_memory() < server.maxmemory) && + (g_pserver->maxmemory == 0 || + zmalloc_used_memory() < g_pserver->maxmemory) && !(c->flags & CLIENT_SLAVE)) break; } - __atomic_fetch_add(&server.stat_net_output_bytes, totwritten, __ATOMIC_RELAXED); + __atomic_fetch_add(&g_pserver->stat_net_output_bytes, totwritten, __ATOMIC_RELAXED); if (nwritten == -1) { if (errno == EAGAIN) { nwritten = 0; @@ -1506,11 +1506,11 @@ int writeToClient(int fd, client *c, int handler_installed) { * as an interaction, since we always send REPLCONF ACK commands * that take some time to just fill the socket output buffer. * We just rely on data / pings received for timeout detection. */ - if (!(c->flags & CLIENT_MASTER)) c->lastinteraction = server.unixtime; + if (!(c->flags & CLIENT_MASTER)) c->lastinteraction = g_pserver->unixtime; } if (!clientHasPendingReplies(c)) { c->sentlen = 0; - if (handler_installed) aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_WRITABLE); + if (handler_installed) aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_WRITABLE); /* Close connection after entire reply has been sent. */ if (c->flags & CLIENT_CLOSE_AFTER_REPLY) { @@ -1578,8 +1578,8 @@ void ProcessPendingAsyncWrites() * so that in the middle of receiving the query, and serving it * to the client, we'll call beforeSleep() that will do the * actual fsync of AOF to disk. AE_BARRIER ensures that. */ - if (server.aof_state == AOF_ON && - server.aof_fsync == AOF_FSYNC_ALWAYS) + if (g_pserver->aof_state == AOF_ON && + g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) { ae_flags |= AE_BARRIER; } @@ -1589,7 +1589,7 @@ void ProcessPendingAsyncWrites() continue; asyncCloseClientOnOutputBufferLimitReached(c); - if (aeCreateRemoteFileEvent(server.rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c, FALSE) == AE_ERR) + if (aeCreateRemoteFileEvent(g_pserver->rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c, FALSE) == AE_ERR) continue; // We can retry later in the cron } } @@ -1602,10 +1602,10 @@ int handleClientsWithPendingWrites(int iel) { listIter li; listNode *ln; - std::unique_lock lockf(server.rgthreadvar[iel].lockPendingWrite); - list *list = server.rgthreadvar[iel].clients_pending_write; + std::unique_lock lockf(g_pserver->rgthreadvar[iel].lockPendingWrite); + list *list = g_pserver->rgthreadvar[iel].clients_pending_write; int processed = listLength(list); - serverAssert(iel == (serverTL - server.rgthreadvar)); + serverAssert(iel == (serverTL - g_pserver->rgthreadvar)); listRewind(list,&li); while((ln = listNext(&li))) { @@ -1635,13 +1635,13 @@ int handleClientsWithPendingWrites(int iel) { * so that in the middle of receiving the query, and serving it * to the client, we'll call beforeSleep() that will do the * actual fsync of AOF to disk. AE_BARRIER ensures that. */ - if (server.aof_state == AOF_ON && - server.aof_fsync == AOF_FSYNC_ALWAYS) + if (g_pserver->aof_state == AOF_ON && + g_pserver->aof_fsync == AOF_FSYNC_ALWAYS) { ae_flags |= AE_BARRIER; } - if (aeCreateFileEvent(server.rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c) == AE_ERR) + if (aeCreateFileEvent(g_pserver->rgthreadvar[c->iel].el, c->fd, ae_flags, sendReplyToClient, c) == AE_ERR) freeClientAsync(c); } } @@ -1693,8 +1693,8 @@ void resetClient(client *c) { void protectClient(client *c) { c->flags |= CLIENT_PROTECTED; AssertCorrectThread(c); - aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_READABLE); - aeDeleteFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_WRITABLE); + aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_READABLE); + aeDeleteFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_WRITABLE); } /* This will undo the client protection done by protectClient() */ @@ -1702,7 +1702,7 @@ void unprotectClient(client *c) { AssertCorrectThread(c); if (c->flags & CLIENT_PROTECTED) { c->flags &= ~CLIENT_PROTECTED; - aeCreateFileEvent(server.rgthreadvar[c->iel].el,c->fd,AE_READABLE|AE_READ_THREADSAFE,readQueryFromClient,c); + aeCreateFileEvent(g_pserver->rgthreadvar[c->iel].el,c->fd,AE_READABLE|AE_READ_THREADSAFE,readQueryFromClient,c); if (clientHasPendingReplies(c)) clientInstallWriteHandler(c); } } @@ -1751,7 +1751,7 @@ int processInlineBuffer(client *c) { * This is useful for a slave to ping back while loading a big * RDB file. */ if (querylen == 0 && c->flags & CLIENT_SLAVE) - c->repl_ack_time = server.unixtime; + c->repl_ack_time = g_pserver->unixtime; /* Move querybuffer position to the next query in the buffer. */ c->qb_pos += querylen+linefeed_chars; @@ -1779,7 +1779,7 @@ int processInlineBuffer(client *c) { * and set the client as CLIENT_CLOSE_AFTER_REPLY. */ #define PROTO_DUMP_LEN 128 static void setProtocolError(const char *errstr, client *c) { - if (server.verbosity <= LL_VERBOSE) { + if (cserver.verbosity <= LL_VERBOSE) { sds client = catClientInfoString(sdsempty(),c); /* Sample some protocol to given an idea about what was inside. */ @@ -1888,7 +1888,7 @@ int processMultibulkBuffer(client *c) { } ok = string2ll(c->querybuf+c->qb_pos+1,newline-(c->querybuf+c->qb_pos+1),&ll); - if (!ok || ll < 0 || ll > server.proto_max_bulk_len) { + if (!ok || ll < 0 || ll > g_pserver->proto_max_bulk_len) { addReplyError(c,"Protocol error: invalid bulk length"); setProtocolError("invalid bulk length",c); return C_ERR; @@ -1971,7 +1971,7 @@ void processInputBuffer(client *c, int callFlags) { * condition on the slave. We want just to accumulate the replication * stream (instead of replying -BUSY like we do with other clients) and * later resume the processing. */ - if (server.lua_timedout && c->flags & CLIENT_MASTER) break; + if (g_pserver->lua_timedout && c->flags & CLIENT_MASTER) break; /* CLIENT_CLOSE_AFTER_REPLY closes the connection once the reply is * written to the client. Make sure to not let the reply grow after @@ -2003,7 +2003,7 @@ void processInputBuffer(client *c, int callFlags) { } else { AeLocker locker; locker.arm(c); - server.current_client = c; + serverTL->current_client = c; /* Only reset the client when the command was executed. */ if (processCommand(c, callFlags) == C_OK) { @@ -2022,11 +2022,11 @@ void processInputBuffer(client *c, int callFlags) { /* freeMemoryIfNeeded may flush slave output buffers. This may * result into a slave, that may be the active client, to be * freed. */ - if (server.current_client == NULL) { + if (serverTL->current_client == NULL) { fFreed = true; break; } - server.current_client = NULL; + serverTL->current_client = NULL; } } @@ -2049,10 +2049,10 @@ void processInputBufferAndReplicate(client *c) { processInputBuffer(c, CMD_CALL_FULL); size_t applied = c->reploff - prev_offset; if (applied) { - if (!server.fActiveReplica) + if (!g_pserver->fActiveReplica) { aeAcquireLock(); - replicationFeedSlavesFromMasterStream(server.slaves, + replicationFeedSlavesFromMasterStream(g_pserver->slaves, c->pending_querybuf, applied); aeReleaseLock(); } @@ -2124,10 +2124,10 @@ void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { } sdsIncrLen(c->querybuf,nread); - c->lastinteraction = server.unixtime; + c->lastinteraction = g_pserver->unixtime; if (c->flags & CLIENT_MASTER) c->read_reploff += nread; - server.stat_net_input_bytes += nread; - if (sdslen(c->querybuf) > server.client_max_querybuf_len) { + g_pserver->stat_net_input_bytes += nread; + if (sdslen(c->querybuf) > cserver.client_max_querybuf_len) { sds ci = catClientInfoString(sdsempty(),c), bytes = sdsempty(); bytes = sdscatrepr(bytes,c->querybuf,64); @@ -2158,7 +2158,7 @@ void getClientsMaxBuffers(unsigned long *longest_output_list, listIter li; unsigned long lol = 0, bib = 0; - listRewind(server.clients,&li); + listRewind(g_pserver->clients,&li); while ((ln = listNext(&li)) != NULL) { c = (client*)listNodeValue(ln); @@ -2184,7 +2184,7 @@ void genClientPeerId(client *client, char *peerid, size_t peerid_len) { if (client->flags & CLIENT_UNIX_SOCKET) { /* Unix socket client. */ - snprintf(peerid,peerid_len,"%s:0",server.unixsocket); + snprintf(peerid,peerid_len,"%s:0",g_pserver->unixsocket); } else { /* TCP client. */ anetFormatPeer(client->fd,peerid,peerid_len); @@ -2231,7 +2231,7 @@ sds catClientInfoString(sds s, client *client) { if (p == flags) *p++ = 'N'; *p++ = '\0'; - emask = client->fd == -1 ? 0 : aeGetFileEvents(server.rgthreadvar[client->iel].el,client->fd); + emask = client->fd == -1 ? 0 : aeGetFileEvents(g_pserver->rgthreadvar[client->iel].el,client->fd); p = events; if (emask & AE_READABLE) *p++ = 'r'; if (emask & AE_WRITABLE) *p++ = 'w'; @@ -2242,8 +2242,8 @@ sds catClientInfoString(sds s, client *client) { getClientPeerId(client), client->fd, client->name ? (char*)ptrFromObj(client->name) : "", - (long long)(server.unixtime - client->ctime), - (long long)(server.unixtime - client->lastinteraction), + (long long)(g_pserver->unixtime - client->ctime), + (long long)(g_pserver->unixtime - client->lastinteraction), flags, client->db->id, (int) dictSize(client->pubsub_channels), @@ -2262,9 +2262,9 @@ sds getAllClientsInfoString(int type) { listNode *ln; listIter li; client *client; - sds o = sdsnewlen(SDS_NOINIT,200*listLength(server.clients)); + sds o = sdsnewlen(SDS_NOINIT,200*listLength(g_pserver->clients)); sdsclear(o); - listRewind(server.clients,&li); + listRewind(g_pserver->clients,&li); while ((ln = listNext(&li)) != NULL) { client = reinterpret_cast(listNodeValue(ln)); if (type != -1 && getClientType(client) != type) continue; @@ -2426,7 +2426,7 @@ NULL } /* Iterate clients killing all the matching clients. */ - listRewind(server.clients,&li); + listRewind(g_pserver->clients,&li); while ((ln = listNext(&li)) != NULL) { client = (struct client*)listNodeValue(ln); if (addr && strcmp(getClientPeerId(client),addr) != 0) continue; @@ -2557,7 +2557,7 @@ void helloCommand(client *c) { addReplyBulkCString(c,"redis"); addReplyBulkCString(c,"version"); - addReplyBulkCString(c,REDIS_VERSION); + addReplyBulkCString(c,KEYDB_SET_VERSION); addReplyBulkCString(c,"proto"); addReplyLongLong(c,3); @@ -2566,13 +2566,13 @@ void helloCommand(client *c) { addReplyLongLong(c,c->id); addReplyBulkCString(c,"mode"); - if (server.sentinel_mode) addReplyBulkCString(c,"sentinel"); - if (server.cluster_enabled) addReplyBulkCString(c,"cluster"); + if (g_pserver->sentinel_mode) addReplyBulkCString(c,"sentinel"); + if (g_pserver->cluster_enabled) addReplyBulkCString(c,"cluster"); else addReplyBulkCString(c,"standalone"); - if (!server.sentinel_mode) { + if (!g_pserver->sentinel_mode) { addReplyBulkCString(c,"role"); - addReplyBulkCString(c,listLength(server.masters) ? "replica" : "master"); + addReplyBulkCString(c,listLength(g_pserver->masters) ? "replica" : "master"); } addReplyBulkCString(c,"modules"); @@ -2739,24 +2739,24 @@ int checkClientOutputBufferLimits(client *c) { * like normal clients. */ if (clientType == CLIENT_TYPE_MASTER) clientType = CLIENT_TYPE_NORMAL; - if (server.client_obuf_limits[clientType].hard_limit_bytes && - used_mem >= server.client_obuf_limits[clientType].hard_limit_bytes) + if (cserver.client_obuf_limits[clientType].hard_limit_bytes && + used_mem >= cserver.client_obuf_limits[clientType].hard_limit_bytes) hard = 1; - if (server.client_obuf_limits[clientType].soft_limit_bytes && - used_mem >= server.client_obuf_limits[clientType].soft_limit_bytes) + if (cserver.client_obuf_limits[clientType].soft_limit_bytes && + used_mem >= cserver.client_obuf_limits[clientType].soft_limit_bytes) soft = 1; /* We need to check if the soft limit is reached continuously for the * specified amount of seconds. */ if (soft) { if (c->obuf_soft_limit_reached_time == 0) { - c->obuf_soft_limit_reached_time = server.unixtime; + c->obuf_soft_limit_reached_time = g_pserver->unixtime; soft = 0; /* First time we see the soft limit reached */ } else { - time_t elapsed = server.unixtime - c->obuf_soft_limit_reached_time; + time_t elapsed = g_pserver->unixtime - c->obuf_soft_limit_reached_time; if (elapsed <= - server.client_obuf_limits[clientType].soft_limit_seconds) { + cserver.client_obuf_limits[clientType].soft_limit_seconds) { soft = 0; /* The client still did not reached the max number of seconds for the soft limit to be considered reached. */ @@ -2797,7 +2797,7 @@ void flushSlavesOutputBuffers(void) { listIter li; listNode *ln; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)listNodeValue(ln); int events; @@ -2811,7 +2811,7 @@ void flushSlavesOutputBuffers(void) { * of put_online_on_ack is to postpone the moment it is installed. * This is what we want since slaves in this state should not receive * writes before the first ACK. */ - events = aeGetFileEvents(server.rgthreadvar[slave->iel].el,slave->fd); + events = aeGetFileEvents(g_pserver->rgthreadvar[slave->iel].el,slave->fd); if (events & AE_WRITABLE && slave->replstate == SLAVE_STATE_ONLINE && clientHasPendingReplies(slave)) @@ -2839,27 +2839,27 @@ void flushSlavesOutputBuffers(void) { * than the time left for the previous pause, no change is made to the * left duration. */ void pauseClients(mstime_t end) { - if (!server.clients_paused || end > server.clients_pause_end_time) - server.clients_pause_end_time = end; - server.clients_paused = 1; + if (!g_pserver->clients_paused || end > g_pserver->clients_pause_end_time) + g_pserver->clients_pause_end_time = end; + g_pserver->clients_paused = 1; } /* Return non-zero if clients are currently paused. As a side effect the * function checks if the pause time was reached and clear it. */ int clientsArePaused(void) { - if (server.clients_paused && - server.clients_pause_end_time < server.mstime) + if (g_pserver->clients_paused && + g_pserver->clients_pause_end_time < g_pserver->mstime) { aeAcquireLock(); listNode *ln; listIter li; client *c; - server.clients_paused = 0; + g_pserver->clients_paused = 0; /* Put all the clients in the unblocked clients queue in order to * force the re-processing of the input buffer if any. */ - listRewind(server.clients,&li); + listRewind(g_pserver->clients,&li); while ((ln = listNext(&li)) != NULL) { c = (client*)listNodeValue(ln); @@ -2870,7 +2870,7 @@ int clientsArePaused(void) { } aeReleaseLock(); } - return server.clients_paused; + return g_pserver->clients_paused; } /* This function is called by Redis in order to process a few events from @@ -2892,7 +2892,7 @@ int processEventsWhileBlocked(int iel) { aeReleaseLock(); while (iterations--) { int events = 0; - events += aeProcessEvents(server.rgthreadvar[iel].el, AE_FILE_EVENTS|AE_DONT_WAIT); + events += aeProcessEvents(g_pserver->rgthreadvar[iel].el, AE_FILE_EVENTS|AE_DONT_WAIT); events += handleClientsWithPendingWrites(iel); if (!events) break; count += events; diff --git a/src/new.h b/src/new.h new file mode 100644 index 000000000..e2ec0032f --- /dev/null +++ b/src/new.h @@ -0,0 +1,23 @@ +#pragma once +#include // std::size_t + +[[deprecated]] +inline void *operator new(size_t size) +{ + return zmalloc(size, MALLOC_LOCAL); +} + +inline void *operator new(size_t size, enum MALLOC_CLASS mclass) +{ + return zmalloc(size, mclass); +} + +inline void operator delete(void * p) +{ + zfree(p); +} + +inline void operator delete(void *p, std::size_t) +{ + zfree(p); +} \ No newline at end of file diff --git a/src/notify.cpp b/src/notify.cpp index da7b53407..d14da1990 100644 --- a/src/notify.cpp +++ b/src/notify.cpp @@ -109,12 +109,12 @@ void notifyKeyspaceEvent(int type, const char *event, robj *key, int dbid) { moduleNotifyKeyspaceEvent(type, event, key, dbid); /* If notifications for this class of events are off, return ASAP. */ - if (!(server.notify_keyspace_events & type)) return; + if (!(g_pserver->notify_keyspace_events & type)) return; eventobj = createStringObject(event,strlen(event)); /* __keyspace@__: notifications. */ - if (server.notify_keyspace_events & NOTIFY_KEYSPACE) { + if (g_pserver->notify_keyspace_events & NOTIFY_KEYSPACE) { chan = sdsnewlen("__keyspace@",11); len = ll2string(buf,sizeof(buf),dbid); chan = sdscatlen(chan, buf, len); @@ -126,7 +126,7 @@ void notifyKeyspaceEvent(int type, const char *event, robj *key, int dbid) { } /* __keyevent@__: notifications. */ - if (server.notify_keyspace_events & NOTIFY_KEYEVENT) { + if (g_pserver->notify_keyspace_events & NOTIFY_KEYEVENT) { chan = sdsnewlen("__keyevent@",11); if (len == -1) len = ll2string(buf,sizeof(buf),dbid); chan = sdscatlen(chan, buf, len); diff --git a/src/object.cpp b/src/object.cpp index 169a6e08d..0ca578de1 100644 --- a/src/object.cpp +++ b/src/object.cpp @@ -44,13 +44,11 @@ robj *createObject(int type, void *ptr) { o->encoding = OBJ_ENCODING_RAW; o->m_ptr = ptr; o->refcount = 1; -#ifdef ENABLE_MVCC o->mvcc_tstamp = OBJ_MVCC_INVALID; -#endif /* Set the LRU to the current lruclock (minutes resolution), or * alternatively the LFU counter. */ - if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { + if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) { o->lru = (LFUGetTimeInMinutes()<<8) | LFU_INIT_VAL; } else { o->lru = LRU_CLOCK(); @@ -94,10 +92,9 @@ robj *createEmbeddedStringObject(const char *ptr, size_t len) { o->type = OBJ_STRING; o->encoding = OBJ_ENCODING_EMBSTR; o->refcount = 1; -#ifdef ENABLE_MVCC o->mvcc_tstamp = OBJ_MVCC_INVALID; -#endif - if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { + + if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) { o->lru = (LFUGetTimeInMinutes()<<8) | LFU_INIT_VAL; } else { o->lru = LRU_CLOCK(); @@ -141,8 +138,8 @@ robj *createStringObject(const char *ptr, size_t len) { robj *createStringObjectFromLongLongWithOptions(long long value, int valueobj) { robj *o; - if (server.maxmemory == 0 || - !(server.maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS)) + if (g_pserver->maxmemory == 0 || + !(g_pserver->maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS)) { /* If the maxmemory policy permits, we can still return shared integers * even if valueobj is true. */ @@ -466,8 +463,8 @@ robj *tryObjectEncoding(robj *o) { * Note that we avoid using shared integers when maxmemory is used * because every object needs to have a private LRU field for the LRU * algorithm to work well. */ - if ((server.maxmemory == 0 || - !(server.maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS)) && + if ((g_pserver->maxmemory == 0 || + !(g_pserver->maxmemory_policy & MAXMEMORY_FLAG_NO_SHARED_INTEGERS)) && value >= 0 && value < OBJ_SHARED_INTEGERS) { @@ -969,39 +966,39 @@ struct redisMemOverhead *getMemoryOverheadData(void) { struct redisMemOverhead *mh = (redisMemOverhead*)zcalloc(sizeof(*mh), MALLOC_LOCAL); mh->total_allocated = zmalloc_used; - mh->startup_allocated = server.initial_memory_usage; - mh->peak_allocated = server.stat_peak_memory; + mh->startup_allocated = g_pserver->initial_memory_usage; + mh->peak_allocated = g_pserver->stat_peak_memory; mh->total_frag = - (float)server.cron_malloc_stats.process_rss / server.cron_malloc_stats.zmalloc_used; + (float)g_pserver->cron_malloc_stats.process_rss / g_pserver->cron_malloc_stats.zmalloc_used; mh->total_frag_bytes = - server.cron_malloc_stats.process_rss - server.cron_malloc_stats.zmalloc_used; + g_pserver->cron_malloc_stats.process_rss - g_pserver->cron_malloc_stats.zmalloc_used; mh->allocator_frag = - (float)server.cron_malloc_stats.allocator_active / server.cron_malloc_stats.allocator_allocated; + (float)g_pserver->cron_malloc_stats.allocator_active / g_pserver->cron_malloc_stats.allocator_allocated; mh->allocator_frag_bytes = - server.cron_malloc_stats.allocator_active - server.cron_malloc_stats.allocator_allocated; + g_pserver->cron_malloc_stats.allocator_active - g_pserver->cron_malloc_stats.allocator_allocated; mh->allocator_rss = - (float)server.cron_malloc_stats.allocator_resident / server.cron_malloc_stats.allocator_active; + (float)g_pserver->cron_malloc_stats.allocator_resident / g_pserver->cron_malloc_stats.allocator_active; mh->allocator_rss_bytes = - server.cron_malloc_stats.allocator_resident - server.cron_malloc_stats.allocator_active; + g_pserver->cron_malloc_stats.allocator_resident - g_pserver->cron_malloc_stats.allocator_active; mh->rss_extra = - (float)server.cron_malloc_stats.process_rss / server.cron_malloc_stats.allocator_resident; + (float)g_pserver->cron_malloc_stats.process_rss / g_pserver->cron_malloc_stats.allocator_resident; mh->rss_extra_bytes = - server.cron_malloc_stats.process_rss - server.cron_malloc_stats.allocator_resident; + g_pserver->cron_malloc_stats.process_rss - g_pserver->cron_malloc_stats.allocator_resident; - mem_total += server.initial_memory_usage; + mem_total += g_pserver->initial_memory_usage; mem = 0; - if (server.repl_backlog) - mem += zmalloc_size(server.repl_backlog); + if (g_pserver->repl_backlog) + mem += zmalloc_size(g_pserver->repl_backlog); mh->repl_backlog = mem; mem_total += mem; mem = 0; - if (listLength(server.slaves)) { + if (listLength(g_pserver->slaves)) { listIter li; listNode *ln; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *c = (client*)listNodeValue(ln); if (c->flags & CLIENT_CLOSE_ASAP) @@ -1015,11 +1012,11 @@ struct redisMemOverhead *getMemoryOverheadData(void) { mem_total+=mem; mem = 0; - if (listLength(server.clients)) { + if (listLength(g_pserver->clients)) { listIter li; listNode *ln; - listRewind(server.clients,&li); + listRewind(g_pserver->clients,&li); while((ln = listNext(&li))) { client *c = (client*)listNodeValue(ln); if (c->flags & CLIENT_SLAVE && !(c->flags & CLIENT_MONITOR)) @@ -1033,27 +1030,27 @@ struct redisMemOverhead *getMemoryOverheadData(void) { mem_total+=mem; mem = 0; - if (server.aof_state != AOF_OFF) { - mem += sdsalloc(server.aof_buf); + if (g_pserver->aof_state != AOF_OFF) { + mem += sdsalloc(g_pserver->aof_buf); mem += aofRewriteBufferSize(); } mh->aof_buffer = mem; mem_total+=mem; - mem = server.lua_scripts_mem; - mem += dictSize(server.lua_scripts) * sizeof(dictEntry) + - dictSlots(server.lua_scripts) * sizeof(dictEntry*); - mem += dictSize(server.repl_scriptcache_dict) * sizeof(dictEntry) + - dictSlots(server.repl_scriptcache_dict) * sizeof(dictEntry*); - if (listLength(server.repl_scriptcache_fifo) > 0) { - mem += listLength(server.repl_scriptcache_fifo) * (sizeof(listNode) + - sdsZmallocSize((sds)listNodeValue(listFirst(server.repl_scriptcache_fifo)))); + mem = g_pserver->lua_scripts_mem; + mem += dictSize(g_pserver->lua_scripts) * sizeof(dictEntry) + + dictSlots(g_pserver->lua_scripts) * sizeof(dictEntry*); + mem += dictSize(g_pserver->repl_scriptcache_dict) * sizeof(dictEntry) + + dictSlots(g_pserver->repl_scriptcache_dict) * sizeof(dictEntry*); + if (listLength(g_pserver->repl_scriptcache_fifo) > 0) { + mem += listLength(g_pserver->repl_scriptcache_fifo) * (sizeof(listNode) + + sdsZmallocSize((sds)listNodeValue(listFirst(g_pserver->repl_scriptcache_fifo)))); } mh->lua_caches = mem; mem_total+=mem; - for (j = 0; j < server.dbnum; j++) { - redisDb *db = server.db+j; + for (j = 0; j < cserver.dbnum; j++) { + redisDb *db = g_pserver->db+j; long long keyscount = dictSize(db->pdict); if (keyscount==0) continue; @@ -1149,8 +1146,8 @@ sds getMemoryDoctorReport(void) { } /* Clients using more than 200k each average? */ - long numslaves = listLength(server.slaves); - long numclients = listLength(server.clients)-numslaves; + long numslaves = listLength(g_pserver->slaves); + long numclients = listLength(g_pserver->clients)-numslaves; if ((numclients > 0) && mh->clients_normal / numclients > (1024*200)) { big_client_buf = 1; num_reports++; @@ -1163,7 +1160,7 @@ sds getMemoryDoctorReport(void) { } /* Too many scripts are cached? */ - if (dictSize(server.lua_scripts) > 1000) { + if (dictSize(g_pserver->lua_scripts) > 1000) { many_scripts = 1; num_reports++; } @@ -1213,14 +1210,14 @@ sds getMemoryDoctorReport(void) { return s; } -/* Set the object LRU/LFU depending on server.maxmemory_policy. +/* Set the object LRU/LFU depending on g_pserver->maxmemory_policy. * The lfu_freq arg is only relevant if policy is MAXMEMORY_FLAG_LFU. * The lru_idle and lru_clock args are only relevant if policy * is MAXMEMORY_FLAG_LRU. * Either or both of them may be <0, in that case, nothing is set. */ void objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle, long long lru_clock) { - if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { + if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) { if (lfu_freq >= 0) { serverAssert(lfu_freq <= 255); val->lru = (LFUGetTimeInMinutes()<<8) | lfu_freq; @@ -1286,7 +1283,7 @@ NULL } else if (!strcasecmp(szFromObj(c->argv[1]),"idletime") && c->argc == 3) { if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.null[c->resp])) == NULL) return; - if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) { + if (g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU) { addReplyError(c,"An LFU maxmemory policy is selected, idle time not tracked. Please note that when switching between policies at runtime LRU and LFU data will take some time to adjust."); return; } @@ -1294,7 +1291,7 @@ NULL } else if (!strcasecmp(szFromObj(c->argv[1]),"freq") && c->argc == 3) { if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.null[c->resp])) == NULL) return; - if (!(server.maxmemory_policy & MAXMEMORY_FLAG_LFU)) { + if (!(g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU)) { addReplyError(c,"An LFU maxmemory policy is not selected, access frequency not tracked. Please note that when switching between policies at runtime LRU and LFU data will take some time to adjust."); return; } @@ -1318,7 +1315,7 @@ void memoryCommand(client *c) { "DOCTOR - Return memory problems reports.", "MALLOC-STATS -- Return internal statistics report from the memory allocator.", "PURGE -- Attempt to purge dirty pages for reclamation by the allocator.", -"STATS -- Return information about the memory usage of the server.", +"STATS -- Return information about the memory usage of the g_pserver->", "USAGE [SAMPLES ] -- Return memory in bytes used by and its value. Nested values are sampled up to times (default: 5).", NULL }; @@ -1412,13 +1409,13 @@ NULL addReplyDouble(c,mh->peak_perc); addReplyBulkCString(c,"allocator.allocated"); - addReplyLongLong(c,server.cron_malloc_stats.allocator_allocated); + addReplyLongLong(c,g_pserver->cron_malloc_stats.allocator_allocated); addReplyBulkCString(c,"allocator.active"); - addReplyLongLong(c,server.cron_malloc_stats.allocator_active); + addReplyLongLong(c,g_pserver->cron_malloc_stats.allocator_active); addReplyBulkCString(c,"allocator.resident"); - addReplyLongLong(c,server.cron_malloc_stats.allocator_resident); + addReplyLongLong(c,g_pserver->cron_malloc_stats.allocator_resident); addReplyBulkCString(c,"allocator-fragmentation.ratio"); addReplyDouble(c,mh->allocator_frag); diff --git a/src/pubsub.cpp b/src/pubsub.cpp index 900c931d4..61a6dc373 100644 --- a/src/pubsub.cpp +++ b/src/pubsub.cpp @@ -152,10 +152,10 @@ int pubsubSubscribeChannel(client *c, robj *channel) { retval = 1; incrRefCount(channel); /* Add the client to the channel -> list of clients hash table */ - de = dictFind(server.pubsub_channels,channel); + de = dictFind(g_pserver->pubsub_channels,channel); if (de == NULL) { clients = listCreate(); - dictAdd(server.pubsub_channels,channel,clients); + dictAdd(g_pserver->pubsub_channels,channel,clients); incrRefCount(channel); } else { clients = (list*)dictGetVal(de); @@ -181,7 +181,7 @@ int pubsubUnsubscribeChannel(client *c, robj *channel, int notify) { if (dictDelete(c->pubsub_channels,channel) == DICT_OK) { retval = 1; /* Remove the client from the channel -> clients list hash table */ - de = dictFind(server.pubsub_channels,channel); + de = dictFind(g_pserver->pubsub_channels,channel); serverAssertWithInfo(c,NULL,de != NULL); clients = (list*)dictGetVal(de); ln = listSearchKey(clients,c); @@ -191,7 +191,7 @@ int pubsubUnsubscribeChannel(client *c, robj *channel, int notify) { /* Free the list and associated hash entry at all if this was * the latest client, so that it will be possible to abuse * Redis PUBSUB creating millions of channels. */ - dictDelete(server.pubsub_channels,channel); + dictDelete(g_pserver->pubsub_channels,channel); } } /* Notify the client */ @@ -212,7 +212,7 @@ int pubsubSubscribePattern(client *c, robj *pattern) { pat = (pubsubPattern*)zmalloc(sizeof(*pat), MALLOC_LOCAL); pat->pattern = getDecodedObject(pattern); pat->pclient = c; - listAddNodeTail(server.pubsub_patterns,pat); + listAddNodeTail(g_pserver->pubsub_patterns,pat); } /* Notify the client */ addReplyPubsubPatSubscribed(c,pattern); @@ -232,8 +232,8 @@ int pubsubUnsubscribePattern(client *c, robj *pattern, int notify) { listDelNode(c->pubsub_patterns,ln); pat.pclient = c; pat.pattern = pattern; - ln = listSearchKey(server.pubsub_patterns,&pat); - listDelNode(server.pubsub_patterns,ln); + ln = listSearchKey(g_pserver->pubsub_patterns,&pat); + listDelNode(g_pserver->pubsub_patterns,ln); } /* Notify the client */ if (notify) addReplyPubsubPatUnsubscribed(c,pattern); @@ -284,7 +284,7 @@ int pubsubPublishMessage(robj *channel, robj *message) { listIter li; /* Send to clients listening for that channel */ - de = dictFind(server.pubsub_channels,channel); + de = dictFind(g_pserver->pubsub_channels,channel); if (de) { list *list = reinterpret_cast<::list*>(dictGetVal(de)); listNode *ln; @@ -300,8 +300,8 @@ int pubsubPublishMessage(robj *channel, robj *message) { } } /* Send to clients listening to matching channels */ - if (listLength(server.pubsub_patterns)) { - listRewind(server.pubsub_patterns,&li); + if (listLength(g_pserver->pubsub_patterns)) { + listRewind(g_pserver->pubsub_patterns,&li); channel = getDecodedObject(channel); while ((ln = listNext(&li)) != NULL) { pubsubPattern *pat = (pubsubPattern*)ln->value; @@ -371,7 +371,7 @@ void punsubscribeCommand(client *c) { void publishCommand(client *c) { int receivers = pubsubPublishMessage(c->argv[1],c->argv[2]); - if (server.cluster_enabled) + if (g_pserver->cluster_enabled) clusterPropagatePublish(c->argv[1],c->argv[2]); else forceCommandPropagation(c,PROPAGATE_REPL); @@ -393,7 +393,7 @@ NULL { /* PUBSUB CHANNELS [] */ sds pat = (c->argc == 2) ? NULL : szFromObj(c->argv[2]); - dictIterator *di = dictGetIterator(server.pubsub_channels); + dictIterator *di = dictGetIterator(g_pserver->pubsub_channels); dictEntry *de; long mblen = 0; void *replylen; @@ -418,14 +418,14 @@ NULL addReplyArrayLen(c,(c->argc-2)*2); for (j = 2; j < c->argc; j++) { - list *l = (list*)dictFetchValue(server.pubsub_channels,c->argv[j]); + list *l = (list*)dictFetchValue(g_pserver->pubsub_channels,c->argv[j]); addReplyBulk(c,c->argv[j]); addReplyLongLong(c,l ? listLength(l) : 0); } } else if (!strcasecmp(szFromObj(c->argv[1]),"numpat") && c->argc == 2) { /* PUBSUB NUMPAT */ - addReplyLongLong(c,listLength(server.pubsub_patterns)); + addReplyLongLong(c,listLength(g_pserver->pubsub_patterns)); } else { addReplySubcommandSyntaxError(c); } diff --git a/src/rdb.cpp b/src/rdb.cpp index 12bbc8333..a5f36b95a 100644 --- a/src/rdb.cpp +++ b/src/rdb.cpp @@ -62,7 +62,7 @@ void rdbCheckThenExit(int linenum, const char *reason, ...) { if (!rdbCheckMode) { serverLog(LL_WARNING, "%s", msg); - const char * argv[2] = {"",server.rdb_filename}; + const char * argv[2] = {"",g_pserver->rdb_filename}; redis_check_rdb_main(2,argv,NULL); } else { rdbCheckError("%s",msg); @@ -418,7 +418,7 @@ ssize_t rdbSaveRawString(rio *rdb, const unsigned char *s, size_t len) { /* Try LZF compression - under 20 bytes it's unable to compress even * aaaaaaaaaaaaaaaaaa so skip it */ - if (server.rdb_compression && len > 20) { + if (g_pserver->rdb_compression && len > 20) { n = rdbSaveLzfStringObject(rdb,(const unsigned char*)s,len); if (n == -1) return -1; if (n > 0) return n; @@ -992,6 +992,31 @@ ssize_t rdbSaveObject(rio *rdb, robj_roptr o, robj *key) { return nwritten; } +/* Save an AUX field. */ +ssize_t rdbSaveAuxField(rio *rdb, const void *key, size_t keylen, const void *val, size_t vallen) { + ssize_t ret, len = 0; + if ((ret = rdbSaveType(rdb,RDB_OPCODE_AUX)) == -1) return -1; + len += ret; + if ((ret = rdbSaveRawString(rdb,(const unsigned char*)key,keylen)) == -1) return -1; + len += ret; + if ((ret = rdbSaveRawString(rdb,(const unsigned char*)val,vallen)) == -1) return -1; + len += ret; + return len; +} + +/* Wrapper for rdbSaveAuxField() used when key/val length can be obtained + * with strlen(). */ +ssize_t rdbSaveAuxFieldStrStr(rio *rdb, const char *key, const char *val) { + return rdbSaveAuxField(rdb,key,strlen(key),val,strlen(val)); +} + +/* Wrapper for strlen(key) + integer type (up to long long range). */ +ssize_t rdbSaveAuxFieldStrInt(rio *rdb, const char *key, long long val) { + char buf[LONG_STR_SIZE]; + int vlen = ll2string(buf,sizeof(buf),val); + return rdbSaveAuxField(rdb,key,strlen(key),buf,vlen); +} + /* Return the length the object will have on disk if saved with * the rdbSaveObject() function. Currently we use a trick to get * this length with very little changes to the code. In the future @@ -1007,8 +1032,8 @@ size_t rdbSavedObjectLen(robj *o) { * On success if the key was actually saved 1 is returned, otherwise 0 * is returned (the key was already expired). */ int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime) { - int savelru = server.maxmemory_policy & MAXMEMORY_FLAG_LRU; - int savelfu = server.maxmemory_policy & MAXMEMORY_FLAG_LFU; + int savelru = g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LRU; + int savelfu = g_pserver->maxmemory_policy & MAXMEMORY_FLAG_LFU; /* Save the expire time */ if (expiretime != -1) { @@ -1036,6 +1061,10 @@ int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime) { if (rdbWriteRaw(rdb,buf,1) == -1) return -1; } + char szMvcc[32]; + snprintf(szMvcc, 32, "%" PRIu64, val->mvcc_tstamp); + if (rdbSaveAuxFieldStrStr(rdb,"mvcc-tstamp", szMvcc) == -1) return -1; + /* Save type, key, value */ if (rdbSaveObjectType(rdb,val) == -1) return -1; if (rdbSaveStringObject(rdb,key) == -1) return -1; @@ -1043,38 +1072,13 @@ int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime) { return 1; } -/* Save an AUX field. */ -ssize_t rdbSaveAuxField(rio *rdb, const void *key, size_t keylen, const void *val, size_t vallen) { - ssize_t ret, len = 0; - if ((ret = rdbSaveType(rdb,RDB_OPCODE_AUX)) == -1) return -1; - len += ret; - if ((ret = rdbSaveRawString(rdb,(const unsigned char*)key,keylen)) == -1) return -1; - len += ret; - if ((ret = rdbSaveRawString(rdb,(const unsigned char*)val,vallen)) == -1) return -1; - len += ret; - return len; -} - -/* Wrapper for rdbSaveAuxField() used when key/val length can be obtained - * with strlen(). */ -ssize_t rdbSaveAuxFieldStrStr(rio *rdb, const char *key, const char *val) { - return rdbSaveAuxField(rdb,key,strlen(key),val,strlen(val)); -} - -/* Wrapper for strlen(key) + integer type (up to long long range). */ -ssize_t rdbSaveAuxFieldStrInt(rio *rdb, const char *key, long long val) { - char buf[LONG_STR_SIZE]; - int vlen = ll2string(buf,sizeof(buf),val); - return rdbSaveAuxField(rdb,key,strlen(key),buf,vlen); -} - /* Save a few default AUX fields with information about the RDB generated. */ int rdbSaveInfoAuxFields(rio *rdb, int flags, rdbSaveInfo *rsi) { int redis_bits = (sizeof(void*) == 8) ? 64 : 32; int aof_preamble = (flags & RDB_SAVE_AOF_PREAMBLE) != 0; /* Add a few fields about the state when the RDB was created. */ - if (rdbSaveAuxFieldStrStr(rdb,"redis-ver",REDIS_VERSION) == -1) return -1; + if (rdbSaveAuxFieldStrStr(rdb,"redis-ver",KEYDB_REAL_VERSION) == -1) return -1; if (rdbSaveAuxFieldStrInt(rdb,"redis-bits",redis_bits) == -1) return -1; if (rdbSaveAuxFieldStrInt(rdb,"ctime",time(NULL)) == -1) return -1; if (rdbSaveAuxFieldStrInt(rdb,"used-mem",zmalloc_used_memory()) == -1) return -1; @@ -1083,9 +1087,9 @@ int rdbSaveInfoAuxFields(rio *rdb, int flags, rdbSaveInfo *rsi) { if (rsi) { if (rdbSaveAuxFieldStrInt(rdb,"repl-stream-db",rsi->repl_stream_db) == -1) return -1; - if (rdbSaveAuxFieldStrStr(rdb,"repl-id",server.replid) + if (rdbSaveAuxFieldStrStr(rdb,"repl-id",g_pserver->replid) == -1) return -1; - if (rdbSaveAuxFieldStrInt(rdb,"repl-offset",server.master_repl_offset) + if (rdbSaveAuxFieldStrInt(rdb,"repl-offset",g_pserver->master_repl_offset) == -1) return -1; } if (rdbSaveAuxFieldStrInt(rdb,"aof-preamble",aof_preamble) == -1) return -1; @@ -1108,14 +1112,14 @@ int rdbSaveRio(rio *rdb, int *error, int flags, rdbSaveInfo *rsi) { uint64_t cksum; size_t processed = 0; - if (server.rdb_checksum) + if (g_pserver->rdb_checksum) rdb->update_cksum = rioGenericUpdateChecksum; snprintf(magic,sizeof(magic),"REDIS%04d",RDB_VERSION); if (rdbWriteRaw(rdb,magic,9) == -1) goto werr; if (rdbSaveInfoAuxFields(rdb,flags,rsi) == -1) goto werr; - for (j = 0; j < server.dbnum; j++) { - redisDb *db = server.db+j; + for (j = 0; j < cserver.dbnum; j++) { + redisDb *db = g_pserver->db+j; dict *d = db->pdict; if (dictSize(d) == 0) continue; di = dictGetSafeIterator(d); @@ -1163,8 +1167,8 @@ int rdbSaveRio(rio *rdb, int *error, int flags, rdbSaveInfo *rsi) { * the script cache as well: on successful PSYNC after a restart, we need * to be able to process any EVALSHA inside the replication backlog the * master will send us. */ - if (rsi && dictSize(server.lua_scripts)) { - di = dictGetIterator(server.lua_scripts); + if (rsi && dictSize(g_pserver->lua_scripts)) { + di = dictGetIterator(g_pserver->lua_scripts); while((de = dictNext(di)) != NULL) { robj *body = (robj*)dictGetVal(de); if (rdbSaveAuxField(rdb,"lua",3,szFromObj(body),sdslen(szFromObj(body))) == -1) @@ -1223,7 +1227,7 @@ int rdbSaveFd(int fd, rdbSaveInfo *rsi) rioInitWithFile(&rdb,fd); - if (server.rdb_save_incremental_fsync) + if (g_pserver->rdb_save_incremental_fsync) rioSetAutoSync(&rdb,REDIS_AUTOSYNC_BYTES); if (rdbSaveRio(&rdb,&error,RDB_SAVE_NONE,rsi) == C_ERR) { @@ -1236,11 +1240,11 @@ int rdbSaveFd(int fd, rdbSaveInfo *rsi) int rdbSave(rdbSaveInfo *rsi) { int err = C_OK; - if (server.rdb_filename != NULL) - err = rdbSaveFile(server.rdb_filename, rsi); + if (g_pserver->rdb_filename != NULL) + err = rdbSaveFile(g_pserver->rdb_filename, rsi); - if (err == C_OK && server.rdb_s3bucketpath != NULL) - err = rdbSaveS3(server.rdb_s3bucketpath, rsi); + if (err == C_OK && g_pserver->rdb_s3bucketpath != NULL) + err = rdbSaveS3(g_pserver->rdb_s3bucketpath, rsi); return err; } @@ -1288,9 +1292,9 @@ int rdbSaveFile(char *filename, rdbSaveInfo *rsi) { } serverLog(LL_NOTICE,"DB saved on disk"); - server.dirty = 0; - server.lastsave = time(NULL); - server.lastbgsave_status = C_OK; + g_pserver->dirty = 0; + g_pserver->lastsave = time(NULL); + g_pserver->lastbgsave_status = C_OK; return C_OK; werr: @@ -1304,10 +1308,10 @@ int rdbSaveBackground(rdbSaveInfo *rsi) { pid_t childpid; long long start; - if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) return C_ERR; + if (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1) return C_ERR; - server.dirty_before_bgsave = server.dirty; - server.lastbgsave_try = time(NULL); + g_pserver->dirty_before_bgsave = g_pserver->dirty; + g_pserver->lastbgsave_try = time(NULL); openChildInfoPipe(); start = ustime(); @@ -1327,26 +1331,26 @@ int rdbSaveBackground(rdbSaveInfo *rsi) { private_dirty/(1024*1024)); } - server.child_info_data.cow_size = private_dirty; + g_pserver->child_info_data.cow_size = private_dirty; sendChildInfo(CHILD_INFO_TYPE_RDB); } exitFromChild((retval == C_OK) ? 0 : 1); } else { /* Parent */ - server.stat_fork_time = ustime()-start; - server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */ - latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000); + g_pserver->stat_fork_time = ustime()-start; + g_pserver->stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / g_pserver->stat_fork_time / (1024*1024*1024); /* GB per second. */ + latencyAddSampleIfNeeded("fork",g_pserver->stat_fork_time/1000); if (childpid == -1) { closeChildInfoPipe(); - server.lastbgsave_status = C_ERR; + g_pserver->lastbgsave_status = C_ERR; serverLog(LL_WARNING,"Can't save in background: fork: %s", strerror(errno)); return C_ERR; } serverLog(LL_NOTICE,"Background saving started by pid %d",childpid); - server.rdb_save_time_start = time(NULL); - server.rdb_child_pid = childpid; - server.rdb_child_type = RDB_CHILD_TYPE_DISK; + g_pserver->rdb_save_time_start = time(NULL); + g_pserver->rdb_child_pid = childpid; + g_pserver->rdb_child_type = RDB_CHILD_TYPE_DISK; updateDictResizePolicy(); return C_OK; } @@ -1401,7 +1405,7 @@ robj *rdbLoadCheckModuleValue(rio *rdb, char *modulename) { /* Load a Redis object of the specified type from the specified file. * On success a newly allocated object is returned, otherwise NULL. */ -robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { +robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key, uint64_t mvcc_tstamp) { robj *o = NULL, *ele, *dec; uint64_t len; unsigned int i; @@ -1415,8 +1419,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; o = createQuicklistObject(); - quicklistSetOptions((quicklist*)ptrFromObj(o), server.list_max_ziplist_size, - server.list_compress_depth); + quicklistSetOptions((quicklist*)ptrFromObj(o), g_pserver->list_max_ziplist_size, + g_pserver->list_compress_depth); /* Load every single element of the list */ while(len--) { @@ -1432,7 +1436,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; /* Use a regular set when there are too many entries. */ - if (len > server.set_max_intset_entries) { + if (len > g_pserver->set_max_intset_entries) { o = createSetObject(); /* It's faster to expand the dict to the right size asap in order * to avoid rehashing */ @@ -1504,8 +1508,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { } /* Convert *after* loading, since sorted sets are not stored ordered. */ - if (zsetLength(o) <= server.zset_max_ziplist_entries && - maxelelen <= server.zset_max_ziplist_value) + if (zsetLength(o) <= g_pserver->zset_max_ziplist_entries && + maxelelen <= g_pserver->zset_max_ziplist_value) zsetConvert(o,OBJ_ENCODING_ZIPLIST); } else if (rdbtype == RDB_TYPE_HASH) { uint64_t len; @@ -1518,7 +1522,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { o = createHashObject(); /* Too many entries? Use a hash table. */ - if (len > server.hash_max_ziplist_entries) + if (len > g_pserver->hash_max_ziplist_entries) hashTypeConvert(o, OBJ_ENCODING_HT); /* Load every field and value into the ziplist */ @@ -1537,8 +1541,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { sdslen(value), ZIPLIST_TAIL); /* Convert to hash table if size threshold is exceeded */ - if (sdslen(field) > server.hash_max_ziplist_value || - sdslen(value) > server.hash_max_ziplist_value) + if (sdslen(field) > g_pserver->hash_max_ziplist_value || + sdslen(value) > g_pserver->hash_max_ziplist_value) { sdsfree(field); sdsfree(value); @@ -1573,8 +1577,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { } else if (rdbtype == RDB_TYPE_LIST_QUICKLIST) { if ((len = rdbLoadLen(rdb,NULL)) == RDB_LENERR) return NULL; o = createQuicklistObject(); - quicklistSetOptions((quicklist*)ptrFromObj(o), server.list_max_ziplist_size, - server.list_compress_depth); + quicklistSetOptions((quicklist*)ptrFromObj(o), g_pserver->list_max_ziplist_size, + g_pserver->list_compress_depth); while (len--) { unsigned char *zl = (unsigned char*) @@ -1622,8 +1626,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { o->type = OBJ_HASH; o->encoding = OBJ_ENCODING_ZIPLIST; - if (hashTypeLength(o) > server.hash_max_ziplist_entries || - maxlen > server.hash_max_ziplist_value) + if (hashTypeLength(o) > g_pserver->hash_max_ziplist_entries || + maxlen > g_pserver->hash_max_ziplist_value) { hashTypeConvert(o, OBJ_ENCODING_HT); } @@ -1637,19 +1641,19 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { case RDB_TYPE_SET_INTSET: o->type = OBJ_SET; o->encoding = OBJ_ENCODING_INTSET; - if (intsetLen((intset*)ptrFromObj(o)) > server.set_max_intset_entries) + if (intsetLen((intset*)ptrFromObj(o)) > g_pserver->set_max_intset_entries) setTypeConvert(o,OBJ_ENCODING_HT); break; case RDB_TYPE_ZSET_ZIPLIST: o->type = OBJ_ZSET; o->encoding = OBJ_ENCODING_ZIPLIST; - if (zsetLength(o) > server.zset_max_ziplist_entries) + if (zsetLength(o) > g_pserver->zset_max_ziplist_entries) zsetConvert(o,OBJ_ENCODING_SKIPLIST); break; case RDB_TYPE_HASH_ZIPLIST: o->type = OBJ_HASH; o->encoding = OBJ_ENCODING_ZIPLIST; - if (hashTypeLength(o) > server.hash_max_ziplist_entries) + if (hashTypeLength(o) > g_pserver->hash_max_ziplist_entries) hashTypeConvert(o, OBJ_ENCODING_HT); break; default: @@ -1816,6 +1820,8 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, robj *key) { } else { rdbExitReportCorruptRDB("Unknown RDB encoding type %d",rdbtype); } + + o->mvcc_tstamp = mvcc_tstamp; return o; } @@ -1825,35 +1831,35 @@ void startLoading(FILE *fp) { struct stat sb; /* Load the DB */ - server.loading = 1; - server.loading_start_time = time(NULL); - server.loading_loaded_bytes = 0; + g_pserver->loading = 1; + g_pserver->loading_start_time = time(NULL); + g_pserver->loading_loaded_bytes = 0; if (fstat(fileno(fp), &sb) == -1) { - server.loading_total_bytes = 0; + g_pserver->loading_total_bytes = 0; } else { - server.loading_total_bytes = sb.st_size; + g_pserver->loading_total_bytes = sb.st_size; } } /* Refresh the loading progress info */ void loadingProgress(off_t pos) { - server.loading_loaded_bytes = pos; - if (server.stat_peak_memory < zmalloc_used_memory()) - server.stat_peak_memory = zmalloc_used_memory(); + g_pserver->loading_loaded_bytes = pos; + if (g_pserver->stat_peak_memory < zmalloc_used_memory()) + g_pserver->stat_peak_memory = zmalloc_used_memory(); } /* Loading finished */ void stopLoading(void) { - server.loading = 0; + g_pserver->loading = 0; } /* Track loading progress in order to serve client's from time to time and if needed calculate rdb checksum */ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) { - if (server.rdb_checksum) + if (g_pserver->rdb_checksum) rioGenericUpdateChecksum(r, buf, len); - if (server.loading_process_events_interval_bytes && - (r->processed_bytes + len)/server.loading_process_events_interval_bytes > r->processed_bytes/server.loading_process_events_interval_bytes) + if (g_pserver->loading_process_events_interval_bytes && + (r->processed_bytes + len)/g_pserver->loading_process_events_interval_bytes > r->processed_bytes/g_pserver->loading_process_events_interval_bytes) { /* The DB can take some non trivial amount of time to load. Update * our cached time since it is used to create and update the last @@ -1861,7 +1867,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) { updateCachedTime(); listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { struct redisMaster *mi = (struct redisMaster*)listNodeValue(ln); @@ -1869,7 +1875,7 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) { replicationSendNewlineToMaster(mi); } loadingProgress(r->processed_bytes); - processEventsWhileBlocked(serverTL - server.rgthreadvar); + processEventsWhileBlocked(serverTL - g_pserver->rgthreadvar); } } @@ -1878,14 +1884,15 @@ void rdbLoadProgressCallback(rio *r, const void *buf, size_t len) { int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { uint64_t dbid; int type, rdbver; - redisDb *db = server.db+0; + redisDb *db = g_pserver->db+0; char buf[1024]; /* Key-specific attributes, set by opcodes before the key type. */ long long lru_idle = -1, lfu_freq = -1, expiretime = -1, now = mstime(); - long long lru_clock = LRU_CLOCK(); + long long lru_clock = 0; + uint64_t mvcc_tstamp = OBJ_MVCC_INVALID; rdb->update_cksum = rdbLoadProgressCallback; - rdb->max_processing_chunk = server.loading_process_events_interval_bytes; + rdb->max_processing_chunk = g_pserver->loading_process_events_interval_bytes; if (rioRead(rdb,buf,9) == 0) goto eoferr; buf[9] = '\0'; if (memcmp(buf,"REDIS",5) != 0) { @@ -1940,14 +1947,14 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { } else if (type == RDB_OPCODE_SELECTDB) { /* SELECTDB: Select the specified database. */ if ((dbid = rdbLoadLen(rdb,NULL)) == RDB_LENERR) goto eoferr; - if (dbid >= (unsigned)server.dbnum) { + if (dbid >= (unsigned)cserver.dbnum) { serverLog(LL_WARNING, "FATAL: Data file was created with a Redis " "server configured to handle more than %d " - "databases. Exiting\n", server.dbnum); + "databases. Exiting\n", cserver.dbnum); exit(1); } - db = server.db+dbid; + db = g_pserver->db+dbid; continue; /* Read next opcode. */ } else if (type == RDB_OPCODE_RESIZEDB) { /* RESIZEDB: Hint about the size of the keys in the currently @@ -1988,7 +1995,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { if (rsi) rsi->repl_offset = strtoll(szFromObj(auxval),NULL,10); } else if (!strcasecmp(szFromObj(auxkey),"lua")) { /* Load the script back in memory. */ - if (luaCreateFunction(NULL,server.lua,auxval) == NULL) { + if (luaCreateFunction(NULL,g_pserver->lua,auxval) == NULL) { rdbExitReportCorruptRDB( "Can't load Lua script from RDB file! " "BODY: %s", ptrFromObj(auxval)); @@ -2010,6 +2017,9 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { if (haspreamble) serverLog(LL_NOTICE,"RDB has an AOF tail"); } else if (!strcasecmp(szFromObj(auxkey),"redis-bits")) { /* Just ignored. */ + } else if (!strcasecmp(szFromObj(auxkey),"mvcc-tstamp")) { + static_assert(sizeof(unsigned long long) == sizeof(uint64_t), "Ensure long long is 64-bits"); + mvcc_tstamp = strtoull(szFromObj(auxval), nullptr, 10); } else { /* We ignore fields we don't understand, as by AUX field * contract. */ @@ -2053,13 +2063,13 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { /* Read key */ if ((key = rdbLoadStringObject(rdb)) == NULL) goto eoferr; /* Read value */ - if ((val = rdbLoadObject(type,rdb,key)) == NULL) goto eoferr; + if ((val = rdbLoadObject(type,rdb,key, mvcc_tstamp)) == NULL) goto eoferr; /* Check if the key already expired. This function is used when loading * an RDB file from disk, either at startup, or when an RDB was * received from the master. In the latter case, the master is * responsible for key expiry. If we would expire keys here, the * snapshot taken by the master may not be reflected on the slave. */ - if (listLength(server.masters) == 0 && !loading_aof && expiretime != -1 && expiretime < now) { + if (listLength(g_pserver->masters) == 0 && !loading_aof && expiretime != -1 && expiretime < now) { decrRefCount(key); decrRefCount(val); } else { @@ -2096,7 +2106,7 @@ int rdbLoadRio(rio *rdb, rdbSaveInfo *rsi, int loading_aof) { uint64_t cksum, expected = rdb->cksum; if (rioRead(rdb,&cksum,8) == 0) goto eoferr; - if (server.rdb_checksum) { + if (g_pserver->rdb_checksum) { memrev64ifbe(&cksum); if (cksum == 0) { serverLog(LL_WARNING,"RDB file was saved with checksum disabled: no check performed."); @@ -2118,11 +2128,11 @@ int rdbLoadFile(char *filename, rdbSaveInfo *rsi); int rdbLoad(rdbSaveInfo *rsi) { int err = C_ERR; - if (server.rdb_filename != NULL) - err = rdbLoadFile(server.rdb_filename, rsi); + if (g_pserver->rdb_filename != NULL) + err = rdbLoadFile(g_pserver->rdb_filename, rsi); - if ((err == C_ERR) && server.rdb_s3bucketpath != NULL) - err = rdbLoadS3(server.rdb_s3bucketpath, rsi); + if ((err == C_ERR) && g_pserver->rdb_s3bucketpath != NULL) + err = rdbLoadS3(g_pserver->rdb_s3bucketpath, rsi); return err; } @@ -2154,30 +2164,30 @@ void backgroundSaveDoneHandlerDisk(int exitcode, int bysignal) { if (!bysignal && exitcode == 0) { serverLog(LL_NOTICE, "Background saving terminated with success"); - server.dirty = server.dirty - server.dirty_before_bgsave; - server.lastsave = time(NULL); - server.lastbgsave_status = C_OK; + g_pserver->dirty = g_pserver->dirty - g_pserver->dirty_before_bgsave; + g_pserver->lastsave = time(NULL); + g_pserver->lastbgsave_status = C_OK; } else if (!bysignal && exitcode != 0) { serverLog(LL_WARNING, "Background saving error"); - server.lastbgsave_status = C_ERR; + g_pserver->lastbgsave_status = C_ERR; } else { mstime_t latency; serverLog(LL_WARNING, "Background saving terminated by signal %d", bysignal); latencyStartMonitor(latency); - rdbRemoveTempFile(server.rdb_child_pid); + rdbRemoveTempFile(g_pserver->rdb_child_pid); latencyEndMonitor(latency); latencyAddSampleIfNeeded("rdb-unlink-temp-file",latency); /* SIGUSR1 is whitelisted, so we have a way to kill a child without * tirggering an error condition. */ if (bysignal != SIGUSR1) - server.lastbgsave_status = C_ERR; + g_pserver->lastbgsave_status = C_ERR; } - server.rdb_child_pid = -1; - server.rdb_child_type = RDB_CHILD_TYPE_NONE; - server.rdb_save_time_last = time(NULL)-server.rdb_save_time_start; - server.rdb_save_time_start = -1; + g_pserver->rdb_child_pid = -1; + g_pserver->rdb_child_type = RDB_CHILD_TYPE_NONE; + g_pserver->rdb_save_time_last = time(NULL)-g_pserver->rdb_save_time_start; + g_pserver->rdb_save_time_start = -1; /* Possibly there are slaves waiting for a BGSAVE in order to be served * (the first stage of SYNC is a bulk transfer of dump.rdb) */ updateSlavesWaitingBgsave((!bysignal && exitcode == 0) ? C_OK : C_ERR, RDB_CHILD_TYPE_DISK); @@ -2199,9 +2209,9 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { serverLog(LL_WARNING, "Background transfer terminated by signal %d", bysignal); } - server.rdb_child_pid = -1; - server.rdb_child_type = RDB_CHILD_TYPE_NONE; - server.rdb_save_time_start = -1; + g_pserver->rdb_child_pid = -1; + g_pserver->rdb_child_type = RDB_CHILD_TYPE_NONE; + g_pserver->rdb_save_time_start = -1; /* If the child returns an OK exit code, read the set of slave client * IDs and the associated status code. We'll terminate all the slaves @@ -2215,7 +2225,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { if (!bysignal && exitcode == 0) { int readlen = sizeof(uint64_t); - if (read(server.rdb_pipe_read_result_from_child, ok_slaves, readlen) == + if (read(g_pserver->rdb_pipe_read_result_from_child, ok_slaves, readlen) == readlen) { readlen = ok_slaves[0]*sizeof(uint64_t)*2; @@ -2224,7 +2234,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { * uint64_t element in the array. */ ok_slaves = (uint64_t*)zrealloc(ok_slaves,sizeof(uint64_t)+readlen, MALLOC_LOCAL); if (readlen && - read(server.rdb_pipe_read_result_from_child, ok_slaves+1, + read(g_pserver->rdb_pipe_read_result_from_child, ok_slaves+1, readlen) != readlen) { ok_slaves[0] = 0; @@ -2232,15 +2242,15 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { } } - close(server.rdb_pipe_read_result_from_child); - close(server.rdb_pipe_write_result_to_parent); + close(g_pserver->rdb_pipe_read_result_from_child); + close(g_pserver->rdb_pipe_write_result_to_parent); /* We can continue the replication process with all the slaves that * correctly received the full payload. Others are terminated. */ listNode *ln; listIter li; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; @@ -2281,7 +2291,7 @@ void backgroundSaveDoneHandlerSocket(int exitcode, int bysignal) { /* When a background RDB saving/transfer terminates, call the right handler. */ void backgroundSaveDoneHandler(int exitcode, int bysignal) { - switch(server.rdb_child_type) { + switch(g_pserver->rdb_child_type) { case RDB_CHILD_TYPE_DISK: backgroundSaveDoneHandlerDisk(exitcode,bysignal); break; @@ -2298,8 +2308,8 @@ void backgroundSaveDoneHandler(int exitcode, int bysignal) { * the child did not exit for an error, but because we wanted), and performs * the cleanup needed. */ void killRDBChild(void) { - kill(server.rdb_child_pid,SIGUSR1); - rdbRemoveTempFile(server.rdb_child_pid); + kill(g_pserver->rdb_child_pid,SIGUSR1); + rdbRemoveTempFile(g_pserver->rdb_child_pid); closeChildInfoPipe(); updateDictResizePolicy(); } @@ -2317,25 +2327,25 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { long long start; int pipefds[2]; - if (server.aof_child_pid != -1 || server.rdb_child_pid != -1) return C_ERR; + if (g_pserver->aof_child_pid != -1 || g_pserver->rdb_child_pid != -1) return C_ERR; /* Before to fork, create a pipe that will be used in order to * send back to the parent the IDs of the slaves that successfully * received all the writes. */ if (pipe(pipefds) == -1) return C_ERR; - server.rdb_pipe_read_result_from_child = pipefds[0]; - server.rdb_pipe_write_result_to_parent = pipefds[1]; + g_pserver->rdb_pipe_read_result_from_child = pipefds[0]; + g_pserver->rdb_pipe_write_result_to_parent = pipefds[1]; /* Collect the file descriptors of the slaves we want to transfer * the RDB to, which are i WAIT_BGSAVE_START state. */ - fds = (int*)zmalloc(sizeof(int)*listLength(server.slaves), MALLOC_LOCAL); + fds = (int*)zmalloc(sizeof(int)*listLength(g_pserver->slaves), MALLOC_LOCAL); /* We also allocate an array of corresponding client IDs. This will * be useful for the child process in order to build the report * (sent via unix pipe) that will be sent to the parent. */ - clientids = (uint64_t*)zmalloc(sizeof(uint64_t)*listLength(server.slaves), MALLOC_LOCAL); + clientids = (uint64_t*)zmalloc(sizeof(uint64_t)*listLength(g_pserver->slaves), MALLOC_LOCAL); numfds = 0; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; @@ -2347,7 +2357,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { * We'll restore it when the children returns (since duped socket * will share the O_NONBLOCK attribute with the parent). */ anetBlock(NULL,slave->fd); - anetSendTimeout(NULL,slave->fd,server.repl_timeout*1000); + anetSendTimeout(NULL,slave->fd,g_pserver->repl_timeout*1000); } } @@ -2378,7 +2388,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { private_dirty/(1024*1024)); } - server.child_info_data.cow_size = private_dirty; + g_pserver->child_info_data.cow_size = private_dirty; sendChildInfo(CHILD_INFO_TYPE_RDB); /* If we are returning OK, at least one slave was served @@ -2413,7 +2423,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { * process with all the childre that were waiting. */ msglen = sizeof(uint64_t)*(1+2*numfds); if (*len == 0 || - write(server.rdb_pipe_write_result_to_parent,msg,msglen) + write(g_pserver->rdb_pipe_write_result_to_parent,msg,msglen) != msglen) { retval = C_ERR; @@ -2432,7 +2442,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { /* Undo the state change. The caller will perform cleanup on * all the slaves in BGSAVE_START state, but an early call to * replicationSetupSlaveForFullResync() turned it into BGSAVE_END */ - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; int j; @@ -2448,15 +2458,15 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { close(pipefds[1]); closeChildInfoPipe(); } else { - server.stat_fork_time = ustime()-start; - server.stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / server.stat_fork_time / (1024*1024*1024); /* GB per second. */ - latencyAddSampleIfNeeded("fork",server.stat_fork_time/1000); + g_pserver->stat_fork_time = ustime()-start; + g_pserver->stat_fork_rate = (double) zmalloc_used_memory() * 1000000 / g_pserver->stat_fork_time / (1024*1024*1024); /* GB per second. */ + latencyAddSampleIfNeeded("fork",g_pserver->stat_fork_time/1000); serverLog(LL_NOTICE,"Background RDB transfer started by pid %d", childpid); - server.rdb_save_time_start = time(NULL); - server.rdb_child_pid = childpid; - server.rdb_child_type = RDB_CHILD_TYPE_SOCKET; + g_pserver->rdb_save_time_start = time(NULL); + g_pserver->rdb_child_pid = childpid; + g_pserver->rdb_child_type = RDB_CHILD_TYPE_SOCKET; updateDictResizePolicy(); } zfree(clientids); @@ -2467,7 +2477,7 @@ int rdbSaveToSlavesSockets(rdbSaveInfo *rsi) { } void saveCommand(client *c) { - if (server.rdb_child_pid != -1) { + if (g_pserver->rdb_child_pid != -1) { addReplyError(c,"Background save already in progress"); return; } @@ -2498,11 +2508,11 @@ void bgsaveCommand(client *c) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); - if (server.rdb_child_pid != -1) { + if (g_pserver->rdb_child_pid != -1) { addReplyError(c,"Background save already in progress"); - } else if (server.aof_child_pid != -1) { + } else if (g_pserver->aof_child_pid != -1) { if (schedule) { - server.rdb_bgsave_scheduled = 1; + g_pserver->rdb_bgsave_scheduled = 1; addReplyStatus(c,"Background saving scheduled"); } else { addReplyError(c, @@ -2537,22 +2547,22 @@ rdbSaveInfo *rdbPopulateSaveInfo(rdbSaveInfo *rsi) { * connects to us, the NULL repl_backlog will trigger a full * synchronization, at the same time we will use a new replid and clear * replid2. */ - if (!listLength(server.masters) && server.repl_backlog) { - /* Note that when server.slaveseldb is -1, it means that this master + if (!listLength(g_pserver->masters) && g_pserver->repl_backlog) { + /* Note that when g_pserver->slaveseldb is -1, it means that this master * didn't apply any write commands after a full synchronization. * So we can let repl_stream_db be 0, this allows a restarted slave * to reload replication ID/offset, it's safe because the next write * command must generate a SELECT statement. */ - rsi->repl_stream_db = server.slaveseldb == -1 ? 0 : server.slaveseldb; + rsi->repl_stream_db = g_pserver->slaveseldb == -1 ? 0 : g_pserver->slaveseldb; return rsi; } - if (listLength(server.masters) > 1) + if (listLength(g_pserver->masters) > 1) { // BUGBUG, warn user about this incomplete implementation serverLog(LL_WARNING, "Warning: Only backing up first master's information in RDB"); } - struct redisMaster *miFirst = (redisMaster*)(listLength(server.masters) ? listNodeValue(listFirst(server.masters)) : NULL); + struct redisMaster *miFirst = (redisMaster*)(listLength(g_pserver->masters) ? listNodeValue(listFirst(g_pserver->masters)) : NULL); /* If the instance is a slave we need a connected master * in order to fetch the currently selected DB. */ diff --git a/src/rdb.h b/src/rdb.h index 18cd3f3d4..45cfa475a 100644 --- a/src/rdb.h +++ b/src/rdb.h @@ -146,7 +146,7 @@ int rdbSaveS3(char *path, rdbSaveInfo *rsi); int rdbLoadS3(char *path, rdbSaveInfo *rsi); ssize_t rdbSaveObject(rio *rdb, robj_roptr o, robj *key); size_t rdbSavedObjectLen(robj *o); -robj *rdbLoadObject(int type, rio *rdb, robj *key); +robj *rdbLoadObject(int type, rio *rdb, robj *key, uint64_t mvcc_tstamp); void backgroundSaveDoneHandler(int exitcode, int bysignal); int rdbSaveKeyValuePair(rio *rdb, robj *key, robj *val, long long expiretime); robj *rdbLoadStringObject(rio *rdb); diff --git a/src/redis-check-rdb.cpp b/src/redis-check-rdb.cpp index a5570095f..a1194799e 100644 --- a/src/redis-check-rdb.cpp +++ b/src/redis-check-rdb.cpp @@ -285,7 +285,7 @@ int redis_check_rdb(const char *rdbfilename, FILE *fp) { rdbstate.keys++; /* Read value */ rdbstate.doing = RDB_CHECK_DOING_READ_OBJECT_VALUE; - if ((val = rdbLoadObject(type,&rdb,key)) == NULL) goto eoferr; + if ((val = rdbLoadObject(type,&rdb,key,OBJ_MVCC_INVALID)) == NULL) goto eoferr; /* Check if the key already expired. */ if (expiretime != -1 && expiretime < now) rdbstate.already_expired++; @@ -297,7 +297,7 @@ int redis_check_rdb(const char *rdbfilename, FILE *fp) { expiretime = -1; } /* Verify the checksum if RDB version is >= 5 */ - if (rdbver >= 5 && server.rdb_checksum) { + if (rdbver >= 5 && g_pserver->rdb_checksum) { uint64_t cksum, expected = rdb.cksum; rdbstate.doing = RDB_CHECK_DOING_CHECK_SUM; @@ -349,7 +349,7 @@ int redis_check_rdb_main(int argc, const char **argv, FILE *fp) { * an already initialized Redis instance, check if we really need to. */ if (shared.integers[0] == NULL) createSharedObjects(); - server.loading_process_events_interval_bytes = 0; + g_pserver->loading_process_events_interval_bytes = 0; rdbCheckMode = 1; rdbCheckInfo("Checking RDB file %s", argv[1]); rdbCheckSetupSignals(); diff --git a/src/redis-cli.c b/src/redis-cli.c index 40c8501fe..ab3de2e73 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -282,7 +282,7 @@ static int helpEntriesLen; static sds cliVersion(void) { sds version; - version = sdscatprintf(sdsempty(), "%s", REDIS_VERSION); + version = sdscatprintf(sdsempty(), "%s", KEYDB_REAL_VERSION); /* Add git commit and working tree status when available */ if (strtoll(redisGitSHA1(),NULL,16)) { diff --git a/src/release.c b/src/release.c index 4e59c7474..a5a334f30 100644 --- a/src/release.c +++ b/src/release.c @@ -46,7 +46,7 @@ char *redisGitDirty(void) { } uint64_t redisBuildId(void) { - char *buildid = REDIS_VERSION REDIS_BUILD_ID REDIS_GIT_DIRTY REDIS_GIT_SHA1; + char *buildid = KEYDB_REAL_VERSION REDIS_BUILD_ID REDIS_GIT_DIRTY REDIS_GIT_SHA1; return crc64(0,(unsigned char*)buildid,strlen(buildid)); } diff --git a/src/replication.cpp b/src/replication.cpp index 35a460bac..eef679ea7 100644 --- a/src/replication.cpp +++ b/src/replication.cpp @@ -103,7 +103,7 @@ static bool FMasterHost(client *c) { listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { redisMaster *mi = (redisMaster*)listNodeValue(ln); @@ -117,7 +117,7 @@ static bool FAnyDisconnectedMasters() { listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { redisMaster *mi = (redisMaster*)listNodeValue(ln); @@ -130,41 +130,41 @@ static bool FAnyDisconnectedMasters() /* ---------------------------------- MASTER -------------------------------- */ void createReplicationBacklog(void) { - serverAssert(server.repl_backlog == NULL); - server.repl_backlog = (char*)zmalloc(server.repl_backlog_size, MALLOC_LOCAL); - server.repl_backlog_histlen = 0; - server.repl_backlog_idx = 0; + serverAssert(g_pserver->repl_backlog == NULL); + g_pserver->repl_backlog = (char*)zmalloc(g_pserver->repl_backlog_size, MALLOC_LOCAL); + g_pserver->repl_backlog_histlen = 0; + g_pserver->repl_backlog_idx = 0; /* We don't have any data inside our buffer, but virtually the first * byte we have is the next byte that will be generated for the * replication stream. */ - server.repl_backlog_off = server.master_repl_offset+1; + g_pserver->repl_backlog_off = g_pserver->master_repl_offset+1; } /* This function is called when the user modifies the replication backlog * size at runtime. It is up to the function to both update the - * server.repl_backlog_size and to resize the buffer and setup it so that + * g_pserver->repl_backlog_size and to resize the buffer and setup it so that * it contains the same data as the previous one (possibly less data, but * the most recent bytes, or the same data and more free space in case the * buffer is enlarged). */ void resizeReplicationBacklog(long long newsize) { if (newsize < CONFIG_REPL_BACKLOG_MIN_SIZE) newsize = CONFIG_REPL_BACKLOG_MIN_SIZE; - if (server.repl_backlog_size == newsize) return; + if (g_pserver->repl_backlog_size == newsize) return; - server.repl_backlog_size = newsize; - if (server.repl_backlog != NULL) { + g_pserver->repl_backlog_size = newsize; + if (g_pserver->repl_backlog != NULL) { /* What we actually do is to flush the old buffer and realloc a new * empty one. It will refill with new data incrementally. * The reason is that copying a few gigabytes adds latency and even * worse often we need to alloc additional space before freeing the * old buffer. */ - zfree(server.repl_backlog); - server.repl_backlog = (char*)zmalloc(server.repl_backlog_size, MALLOC_LOCAL); - server.repl_backlog_histlen = 0; - server.repl_backlog_idx = 0; + zfree(g_pserver->repl_backlog); + g_pserver->repl_backlog = (char*)zmalloc(g_pserver->repl_backlog_size, MALLOC_LOCAL); + g_pserver->repl_backlog_histlen = 0; + g_pserver->repl_backlog_idx = 0; /* Next byte we have is... the next since the buffer is empty. */ - server.repl_backlog_off = server.master_repl_offset+1; + g_pserver->repl_backlog_off = g_pserver->master_repl_offset+1; } } @@ -172,44 +172,44 @@ void freeReplicationBacklog(void) { serverAssert(GlobalLocksAcquired()); listIter li; listNode *ln; - listRewind(server.slaves, &li); + listRewind(g_pserver->slaves, &li); while ((ln = listNext(&li))) { - // server.slaves should be empty, or filled with clients pending close + // g_pserver->slaves should be empty, or filled with clients pending close client *c = (client*)listNodeValue(ln); serverAssert(c->flags & CLIENT_CLOSE_ASAP || FMasterHost(c)); } - zfree(server.repl_backlog); - server.repl_backlog = NULL; + zfree(g_pserver->repl_backlog); + g_pserver->repl_backlog = NULL; } /* Add data to the replication backlog. * This function also increments the global replication offset stored at - * server.master_repl_offset, because there is no case where we want to feed + * g_pserver->master_repl_offset, because there is no case where we want to feed * the backlog without incrementing the offset. */ void feedReplicationBacklog(const void *ptr, size_t len) { serverAssert(GlobalLocksAcquired()); const unsigned char *p = (const unsigned char*)ptr; - server.master_repl_offset += len; + g_pserver->master_repl_offset += len; /* This is a circular buffer, so write as much data we can at every * iteration and rewind the "idx" index if we reach the limit. */ while(len) { - size_t thislen = server.repl_backlog_size - server.repl_backlog_idx; + size_t thislen = g_pserver->repl_backlog_size - g_pserver->repl_backlog_idx; if (thislen > len) thislen = len; - memcpy(server.repl_backlog+server.repl_backlog_idx,p,thislen); - server.repl_backlog_idx += thislen; - if (server.repl_backlog_idx == server.repl_backlog_size) - server.repl_backlog_idx = 0; + memcpy(g_pserver->repl_backlog+g_pserver->repl_backlog_idx,p,thislen); + g_pserver->repl_backlog_idx += thislen; + if (g_pserver->repl_backlog_idx == g_pserver->repl_backlog_size) + g_pserver->repl_backlog_idx = 0; len -= thislen; p += thislen; - server.repl_backlog_histlen += thislen; + g_pserver->repl_backlog_histlen += thislen; } - if (server.repl_backlog_histlen > server.repl_backlog_size) - server.repl_backlog_histlen = server.repl_backlog_size; + if (g_pserver->repl_backlog_histlen > g_pserver->repl_backlog_size) + g_pserver->repl_backlog_histlen = g_pserver->repl_backlog_size; /* Set the offset of the first byte we have in the backlog. */ - server.repl_backlog_off = server.master_repl_offset - - server.repl_backlog_histlen + 1; + g_pserver->repl_backlog_off = g_pserver->master_repl_offset - + g_pserver->repl_backlog_histlen + 1; } /* Wrapper for feedReplicationBacklog() that takes Redis string objects @@ -235,7 +235,7 @@ void replicationFeedSlave(client *slave, int dictid, robj **argv, int argc) std::unique_locklock)> lock(slave->lock); /* Send SELECT command to every slave if needed. */ - if (server.slaveseldb != dictid) { + if (g_pserver->slaveseldb != dictid) { robj *selectcmd; /* For a few DBs we have pre-computed SELECT command. */ @@ -252,7 +252,7 @@ void replicationFeedSlave(client *slave, int dictid, robj **argv, int argc) } /* Add the SELECT command into the backlog. */ - if (server.repl_backlog) feedReplicationBacklogWithObject(selectcmd); + if (g_pserver->repl_backlog) feedReplicationBacklogWithObject(selectcmd); /* Send it to slaves */ addReply(slave,selectcmd); @@ -260,7 +260,7 @@ void replicationFeedSlave(client *slave, int dictid, robj **argv, int argc) if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS) decrRefCount(selectcmd); } - server.slaveseldb = dictid; + g_pserver->slaveseldb = dictid; /* Feed slaves that are waiting for the initial SYNC (so these commands * are queued in the output buffer until the initial SYNC completes), @@ -291,21 +291,21 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { * propagate *identical* replication stream. In this way this slave can * advertise the same replication ID as the master (since it shares the * master replication history and has the same backlog and offsets). */ - if (!server.fActiveReplica && listLength(server.masters)) return; + if (!g_pserver->fActiveReplica && listLength(g_pserver->masters)) return; /* If there aren't slaves, and there is no backlog buffer to populate, * we can return ASAP. */ - if (server.repl_backlog == NULL && listLength(slaves) == 0) return; + if (g_pserver->repl_backlog == NULL && listLength(slaves) == 0) return; /* We can't have slaves attached and no backlog. */ - serverAssert(!(listLength(slaves) != 0 && server.repl_backlog == NULL)); + serverAssert(!(listLength(slaves) != 0 && g_pserver->repl_backlog == NULL)); - client *fake = createClient(-1, serverTL - server.rgthreadvar); + client *fake = createClient(-1, serverTL - g_pserver->rgthreadvar); fake->flags |= CLIENT_FORCE_REPLY; replicationFeedSlave(fake, dictid, argv, argc); // Note: updates the repl log, keep above the repl update code below /* Write the command to the replication backlog if any. */ - if (server.repl_backlog) { + if (g_pserver->repl_backlog) { char aux[LONG_STR_SIZE+3]; /* Add the multi bulk reply length. */ @@ -339,13 +339,13 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { cchbuf += reply->used; } - bool fSendRaw = !server.fActiveReplica || (argc >= 1 && lookupCommand((sds)ptrFromObj(argv[0])) == server.rreplayCommand); + bool fSendRaw = !g_pserver->fActiveReplica; serverAssert(argc > 0); serverAssert(cchbuf > 0); char uuid[40] = {'\0'}; - uuid_unparse(server.uuid, uuid); + uuid_unparse(cserver.uuid, uuid); char proto[1024]; int cchProto = snprintf(proto, sizeof(proto), "*3\r\n$7\r\nRREPLAY\r\n$%d\r\n%s\r\n$%lld\r\n", (int)strlen(uuid), uuid, cchbuf); cchProto = std::min((int)sizeof(proto), cchProto); @@ -357,7 +357,7 @@ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { /* Don't feed slaves that are still waiting for BGSAVE to start */ if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; - if (server.current_client && FSameHost(server.current_client, slave)) continue; + if (serverTL->current_client && FSameHost(serverTL->current_client, slave)) continue; std::unique_locklock)> lock(slave->lock); if (!fSendRaw) @@ -394,7 +394,7 @@ void replicationFeedSlavesFromMasterStream(list *slaves, char *buf, size_t bufle printf("\n"); } - if (server.repl_backlog) feedReplicationBacklog(buf,buflen); + if (g_pserver->repl_backlog) feedReplicationBacklog(buf,buflen); listRewind(slaves,&li); while((ln = listNext(&li))) { @@ -427,7 +427,7 @@ void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, if (c->flags & CLIENT_LUA) { cmdrepr = sdscatprintf(cmdrepr,"[%d lua] ",dictid); } else if (c->flags & CLIENT_UNIX_SOCKET) { - cmdrepr = sdscatprintf(cmdrepr,"[%d unix:%s] ",dictid,server.unixsocket); + cmdrepr = sdscatprintf(cmdrepr,"[%d unix:%s] ",dictid,g_pserver->unixsocket); } else { cmdrepr = sdscatprintf(cmdrepr,"[%d %s] ",dictid,getClientPeerId(c)); } @@ -461,49 +461,49 @@ long long addReplyReplicationBacklog(client *c, long long offset) { serverLog(LL_DEBUG, "[PSYNC] Replica request offset: %lld", offset); - if (server.repl_backlog_histlen == 0) { + if (g_pserver->repl_backlog_histlen == 0) { serverLog(LL_DEBUG, "[PSYNC] Backlog history len is zero"); return 0; } serverLog(LL_DEBUG, "[PSYNC] Backlog size: %lld", - server.repl_backlog_size); + g_pserver->repl_backlog_size); serverLog(LL_DEBUG, "[PSYNC] First byte: %lld", - server.repl_backlog_off); + g_pserver->repl_backlog_off); serverLog(LL_DEBUG, "[PSYNC] History len: %lld", - server.repl_backlog_histlen); + g_pserver->repl_backlog_histlen); serverLog(LL_DEBUG, "[PSYNC] Current index: %lld", - server.repl_backlog_idx); + g_pserver->repl_backlog_idx); /* Compute the amount of bytes we need to discard. */ - skip = offset - server.repl_backlog_off; + skip = offset - g_pserver->repl_backlog_off; serverLog(LL_DEBUG, "[PSYNC] Skipping: %lld", skip); /* Point j to the oldest byte, that is actually our - * server.repl_backlog_off byte. */ - j = (server.repl_backlog_idx + - (server.repl_backlog_size-server.repl_backlog_histlen)) % - server.repl_backlog_size; + * g_pserver->repl_backlog_off byte. */ + j = (g_pserver->repl_backlog_idx + + (g_pserver->repl_backlog_size-g_pserver->repl_backlog_histlen)) % + g_pserver->repl_backlog_size; serverLog(LL_DEBUG, "[PSYNC] Index of first byte: %lld", j); /* Discard the amount of data to seek to the specified 'offset'. */ - j = (j + skip) % server.repl_backlog_size; + j = (j + skip) % g_pserver->repl_backlog_size; /* Feed slave with data. Since it is a circular buffer we have to * split the reply in two parts if we are cross-boundary. */ - len = server.repl_backlog_histlen - skip; + len = g_pserver->repl_backlog_histlen - skip; serverLog(LL_DEBUG, "[PSYNC] Reply total length: %lld", len); while(len) { long long thislen = - ((server.repl_backlog_size - j) < len) ? - (server.repl_backlog_size - j) : len; + ((g_pserver->repl_backlog_size - j) < len) ? + (g_pserver->repl_backlog_size - j) : len; serverLog(LL_DEBUG, "[PSYNC] addReply() length: %lld", thislen); - addReplySds(c,sdsnewlen(server.repl_backlog + j, thislen)); + addReplySds(c,sdsnewlen(g_pserver->repl_backlog + j, thislen)); len -= thislen; j = 0; } - return server.repl_backlog_histlen - skip; + return g_pserver->repl_backlog_histlen - skip; } /* Return the offset to provide as reply to the PSYNC command received @@ -511,7 +511,7 @@ long long addReplyReplicationBacklog(client *c, long long offset) { * the BGSAVE process started and before executing any other command * from clients. */ long long getPsyncInitialOffset(void) { - return server.master_repl_offset; + return g_pserver->master_repl_offset; } /* Send a FULLRESYNC reply in the specific case of a full resynchronization, @@ -539,13 +539,13 @@ int replicationSetupSlaveForFullResync(client *slave, long long offset) { /* We are going to accumulate the incremental changes for this * slave as well. Set slaveseldb to -1 in order to force to re-emit * a SELECT statement in the replication stream. */ - server.slaveseldb = -1; + g_pserver->slaveseldb = -1; /* Don't send this reply to slaves that approached us with * the old SYNC command. */ if (!(slave->flags & CLIENT_PRE_PSYNC)) { buflen = snprintf(buf,sizeof(buf),"+FULLRESYNC %s %lld\r\n", - server.replid,offset); + g_pserver->replid,offset); if (write(slave->fd,buf,buflen) != buflen) { freeClientAsync(slave); return C_ERR; @@ -578,23 +578,23 @@ int masterTryPartialResynchronization(client *c) { * * Note that there are two potentially valid replication IDs: the ID1 * and the ID2. The ID2 however is only valid up to a specific offset. */ - if (strcasecmp(master_replid, server.replid) && - (strcasecmp(master_replid, server.replid2) || - psync_offset > server.second_replid_offset)) + if (strcasecmp(master_replid, g_pserver->replid) && + (strcasecmp(master_replid, g_pserver->replid2) || + psync_offset > g_pserver->second_replid_offset)) { /* Run id "?" is used by slaves that want to force a full resync. */ if (master_replid[0] != '?') { - if (strcasecmp(master_replid, server.replid) && - strcasecmp(master_replid, server.replid2)) + if (strcasecmp(master_replid, g_pserver->replid) && + strcasecmp(master_replid, g_pserver->replid2)) { serverLog(LL_NOTICE,"Partial resynchronization not accepted: " "Replication ID mismatch (Replica asked for '%s', my " "replication IDs are '%s' and '%s')", - master_replid, server.replid, server.replid2); + master_replid, g_pserver->replid, g_pserver->replid2); } else { serverLog(LL_NOTICE,"Partial resynchronization not accepted: " "Requested offset for second ID was %lld, but I can reply " - "up to %lld", psync_offset, server.second_replid_offset); + "up to %lld", psync_offset, g_pserver->second_replid_offset); } } else { serverLog(LL_NOTICE,"Full resync requested by replica %s", @@ -604,13 +604,13 @@ int masterTryPartialResynchronization(client *c) { } /* We still have the data our slave is asking for? */ - if (!server.repl_backlog || - psync_offset < server.repl_backlog_off || - psync_offset > (server.repl_backlog_off + server.repl_backlog_histlen)) + if (!g_pserver->repl_backlog || + psync_offset < g_pserver->repl_backlog_off || + psync_offset > (g_pserver->repl_backlog_off + g_pserver->repl_backlog_histlen)) { serverLog(LL_NOTICE, "Unable to partial resync with replica %s for lack of backlog (Replica request was: %lld).", replicationGetSlaveName(c), psync_offset); - if (psync_offset > server.master_repl_offset) { + if (psync_offset > g_pserver->master_repl_offset) { serverLog(LL_WARNING, "Warning: replica %s tried to PSYNC with an offset that is greater than the master replication offset.", replicationGetSlaveName(c)); } @@ -623,14 +623,14 @@ int masterTryPartialResynchronization(client *c) { * 3) Send the backlog data (from the offset to the end) to the slave. */ c->flags |= CLIENT_SLAVE; c->replstate = SLAVE_STATE_ONLINE; - c->repl_ack_time = server.unixtime; + c->repl_ack_time = g_pserver->unixtime; c->repl_put_online_on_ack = 0; - listAddNodeTail(server.slaves,c); + listAddNodeTail(g_pserver->slaves,c); /* We can't use the connection buffers since they are used to accumulate * new commands at this stage. But we are sure the socket send buffer is * empty so this write will never fail actually. */ if (c->slave_capa & SLAVE_CAPA_PSYNC2) { - buflen = snprintf(buf,sizeof(buf),"+CONTINUE %s\r\n", server.replid); + buflen = snprintf(buf,sizeof(buf),"+CONTINUE %s\r\n", g_pserver->replid); } else { buflen = snprintf(buf,sizeof(buf),"+CONTINUE\r\n"); } @@ -646,7 +646,7 @@ int masterTryPartialResynchronization(client *c) { "Partial resynchronization request from %s accepted. Sending %lld bytes of backlog starting from offset %lld.", replicationGetSlaveName(c), psync_len, psync_offset); - /* Note that we don't need to set the selected DB at server.slaveseldb + /* Note that we don't need to set the selected DB at g_pserver->slaveseldb * to -1 to force the master to emit SELECT, since the slave already * has this state from the previous connection with the master. */ @@ -682,7 +682,7 @@ int masterTryPartialResynchronization(client *c) { int startBgsaveForReplication(int mincapa) { serverAssert(GlobalLocksAcquired()); int retval; - int socket_target = server.repl_diskless_sync && (mincapa & SLAVE_CAPA_EOF); + int socket_target = g_pserver->repl_diskless_sync && (mincapa & SLAVE_CAPA_EOF); listIter li; listNode *ln; @@ -708,7 +708,7 @@ int startBgsaveForReplication(int mincapa) { * an error about what happened, close the connection ASAP. */ if (retval == C_ERR) { serverLog(LL_WARNING,"BGSAVE for replication failed"); - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; std::unique_locklock)> lock(slave->lock); @@ -716,7 +716,7 @@ int startBgsaveForReplication(int mincapa) { if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { slave->replstate = REPL_STATE_NONE; slave->flags &= ~CLIENT_SLAVE; - listDelNode(server.slaves,ln); + listDelNode(g_pserver->slaves,ln); addReplyError(slave, "BGSAVE failed, replication can't continue"); slave->flags |= CLIENT_CLOSE_AFTER_REPLY; @@ -728,7 +728,7 @@ int startBgsaveForReplication(int mincapa) { /* If the target is socket, rdbSaveToSlavesSockets() already setup * the salves for a full resync. Otherwise for disk target do it now.*/ if (!socket_target) { - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; std::unique_locklock)> lock(slave->lock); @@ -753,7 +753,7 @@ void syncCommand(client *c) { /* Refuse SYNC requests if we are a slave but the link with our master * is not ok... */ - if (!server.fActiveReplica) { + if (!g_pserver->fActiveReplica) { if (FAnyDisconnectedMasters()) { addReplySds(c,sdsnew("-NOMASTERLINK Can't SYNC while not connected with my master\r\n")); return; @@ -783,7 +783,7 @@ void syncCommand(client *c) { * if the connection with the master is lost. */ if (!strcasecmp((const char*)ptrFromObj(c->argv[0]),"psync")) { if (masterTryPartialResynchronization(c) == C_OK) { - server.stat_sync_partial_ok++; + g_pserver->stat_sync_partial_ok++; return; /* No full resync needed, return. */ } else { char *master_replid = (char*)ptrFromObj(c->argv[1]); @@ -792,7 +792,7 @@ void syncCommand(client *c) { * replid is not "?", as this is used by slaves to force a full * resync on purpose when they are not albe to partially * resync. */ - if (master_replid[0] != '?') server.stat_sync_partial_err++; + if (master_replid[0] != '?') g_pserver->stat_sync_partial_err++; } } else { /* If a slave uses SYNC, we are dealing with an old implementation @@ -802,19 +802,19 @@ void syncCommand(client *c) { } /* Full resynchronization. */ - server.stat_sync_full++; + g_pserver->stat_sync_full++; /* Setup the slave as one waiting for BGSAVE to start. The following code * paths will change the state if we handle the slave differently. */ c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; - if (server.repl_disable_tcp_nodelay) + if (g_pserver->repl_disable_tcp_nodelay) anetDisableTcpNoDelay(NULL, c->fd); /* Non critical if it fails. */ c->repldbfd = -1; c->flags |= CLIENT_SLAVE; - listAddNodeTail(server.slaves,c); + listAddNodeTail(g_pserver->slaves,c); /* Create the replication backlog if needed. */ - if (listLength(server.slaves) == 1 && server.repl_backlog == NULL) { + if (listLength(g_pserver->slaves) == 1 && g_pserver->repl_backlog == NULL) { /* When we create the backlog from scratch, we always use a new * replication ID and clear the ID2, since there is no valid * past history. */ @@ -824,8 +824,8 @@ void syncCommand(client *c) { } /* CASE 1: BGSAVE is in progress, with disk target. */ - if (server.rdb_child_pid != -1 && - server.rdb_child_type == RDB_CHILD_TYPE_DISK) + if (g_pserver->rdb_child_pid != -1 && + g_pserver->rdb_child_type == RDB_CHILD_TYPE_DISK) { /* Ok a background save is in progress. Let's check if it is a good * one for replication, i.e. if there is another slave that is @@ -834,7 +834,7 @@ void syncCommand(client *c) { listNode *ln; listIter li; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { slave = (client*)ln->value; if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) break; @@ -855,8 +855,8 @@ void syncCommand(client *c) { } /* CASE 2: BGSAVE is in progress, with socket target. */ - } else if (server.rdb_child_pid != -1 && - server.rdb_child_type == RDB_CHILD_TYPE_SOCKET) + } else if (g_pserver->rdb_child_pid != -1 && + g_pserver->rdb_child_type == RDB_CHILD_TYPE_SOCKET) { /* There is an RDB child process but it is writing directly to * children sockets. We need to wait for the next BGSAVE @@ -865,17 +865,17 @@ void syncCommand(client *c) { /* CASE 3: There is no BGSAVE is progress. */ } else { - if (server.repl_diskless_sync && (c->slave_capa & SLAVE_CAPA_EOF)) { + if (g_pserver->repl_diskless_sync && (c->slave_capa & SLAVE_CAPA_EOF)) { /* Diskless replication RDB child is created inside * replicationCron() since we want to delay its start a * few seconds to wait for more slaves to arrive. */ - if (server.repl_diskless_sync_delay) + if (g_pserver->repl_diskless_sync_delay) serverLog(LL_NOTICE,"Delay next BGSAVE for diskless SYNC"); } else { /* Target is disk (or the slave is not capable of supporting * diskless replication) and we don't have a BGSAVE in progress, * let's start one. */ - if (server.aof_child_pid == -1) { + if (g_pserver->aof_child_pid == -1) { startBgsaveForReplication(c->slave_capa); } else { serverLog(LL_NOTICE, @@ -903,7 +903,7 @@ void processReplconfUuid(client *c, robj *arg) char szServerUUID[36 + 2]; // 1 for the '+', another for '\0' szServerUUID[0] = '+'; - uuid_unparse(server.uuid, szServerUUID+1); + uuid_unparse(cserver.uuid, szServerUUID+1); addReplyProto(c, szServerUUID, 37); addReplyProto(c, "\r\n", 2); return; @@ -970,7 +970,7 @@ void replconfCommand(client *c) { return; if (offset > c->repl_ack_off) c->repl_ack_off = offset; - c->repl_ack_time = server.unixtime; + c->repl_ack_time = g_pserver->unixtime; /* If this was a diskless replication, we need to really put * the slave online when the first ACK is received (which * confirms slave is online and ready to get more data). */ @@ -983,7 +983,7 @@ void replconfCommand(client *c) { * to the slave. */ listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { replicationSendAck((redisMaster*)listNodeValue(ln)); @@ -1017,9 +1017,9 @@ void replconfCommand(client *c) { void putSlaveOnline(client *slave) { slave->replstate = SLAVE_STATE_ONLINE; slave->repl_put_online_on_ack = 0; - slave->repl_ack_time = server.unixtime; /* Prevent false timeout. */ + slave->repl_ack_time = g_pserver->unixtime; /* Prevent false timeout. */ AssertCorrectThread(slave); - if (aeCreateFileEvent(server.rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE|AE_WRITE_THREADSAFE, + if (aeCreateFileEvent(g_pserver->rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE|AE_WRITE_THREADSAFE, sendReplyToClient, slave) == AE_ERR) { serverLog(LL_WARNING,"Unable to register writable event for replica bulk transfer: %s", strerror(errno)); freeClient(slave); @@ -1050,7 +1050,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) { freeClient(slave); return; } - server.stat_net_output_bytes += nwritten; + g_pserver->stat_net_output_bytes += nwritten; sdsrange(slave->replpreamble,nwritten,-1); if (sdslen(slave->replpreamble) == 0) { sdsfree(slave->replpreamble); @@ -1079,7 +1079,7 @@ void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) { return; } slave->repldboff += nwritten; - server.stat_net_output_bytes += nwritten; + g_pserver->stat_net_output_bytes += nwritten; if (slave->repldboff == slave->repldbsize) { close(slave->repldbfd); slave->repldbfd = -1; @@ -1110,7 +1110,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) int mincapa = -1; serverAssert(GlobalLocksAcquired()); - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; @@ -1137,7 +1137,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) * is technically online now. */ slave->replstate = SLAVE_STATE_ONLINE; slave->repl_put_online_on_ack = 1; - slave->repl_ack_time = server.unixtime; /* Timeout otherwise. */ + slave->repl_ack_time = g_pserver->unixtime; /* Timeout otherwise. */ } else { if (bgsaveerr != C_OK) { if (FCorrectThread(slave)) @@ -1147,7 +1147,7 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) serverLog(LL_WARNING,"SYNC failed. BGSAVE child returned an error"); continue; } - if ((slave->repldbfd = open(server.rdb_filename,O_RDONLY)) == -1 || + if ((slave->repldbfd = open(g_pserver->rdb_filename,O_RDONLY)) == -1 || redis_fstat(slave->repldbfd,&buf) == -1) { if (FCorrectThread(slave)) freeClient(slave); @@ -1164,16 +1164,16 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) if (FCorrectThread(slave)) { - aeDeleteFileEvent(server.rgthreadvar[slave->iel].el,slave->fd,AE_WRITABLE); - if (aeCreateFileEvent(server.rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE, sendBulkToSlave, slave) == AE_ERR) { + aeDeleteFileEvent(g_pserver->rgthreadvar[slave->iel].el,slave->fd,AE_WRITABLE); + if (aeCreateFileEvent(g_pserver->rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE, sendBulkToSlave, slave) == AE_ERR) { freeClient(slave); } } else { - aePostFunction(server.rgthreadvar[slave->iel].el, [slave]{ - aeDeleteFileEvent(server.rgthreadvar[slave->iel].el,slave->fd,AE_WRITABLE); - if (aeCreateFileEvent(server.rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE, sendBulkToSlave, slave) == AE_ERR) { + aePostFunction(g_pserver->rgthreadvar[slave->iel].el, [slave]{ + aeDeleteFileEvent(g_pserver->rgthreadvar[slave->iel].el,slave->fd,AE_WRITABLE); + if (aeCreateFileEvent(g_pserver->rgthreadvar[slave->iel].el, slave->fd, AE_WRITABLE, sendBulkToSlave, slave) == AE_ERR) { freeClient(slave); } }); @@ -1191,17 +1191,17 @@ void updateSlavesWaitingBgsave(int bgsaveerr, int type) * slaves, so the command should be called when something happens that * alters the current story of the dataset. */ void changeReplicationId(void) { - getRandomHexChars(server.replid,CONFIG_RUN_ID_SIZE); - server.replid[CONFIG_RUN_ID_SIZE] = '\0'; + getRandomHexChars(g_pserver->replid,CONFIG_RUN_ID_SIZE); + g_pserver->replid[CONFIG_RUN_ID_SIZE] = '\0'; } /* Clear (invalidate) the secondary replication ID. This happens, for * example, after a full resynchronization, when we start a new replication * history. */ void clearReplicationId2(void) { - memset(server.replid2,'0',sizeof(server.replid)); - server.replid2[CONFIG_RUN_ID_SIZE] = '\0'; - server.second_replid_offset = -1; + memset(g_pserver->replid2,'0',sizeof(g_pserver->replid)); + g_pserver->replid2[CONFIG_RUN_ID_SIZE] = '\0'; + g_pserver->second_replid_offset = -1; } /* Use the current replication ID / offset as secondary replication @@ -1210,7 +1210,7 @@ void clearReplicationId2(void) { * so that it can serve PSYNC requests performed using the master * replication ID. */ void shiftReplicationId(void) { - memcpy(server.replid2,server.replid,sizeof(server.replid)); + memcpy(g_pserver->replid2,g_pserver->replid,sizeof(g_pserver->replid)); /* We set the second replid offset to the master offset + 1, since * the slave will ask for the first byte it has not yet received, so * we need to add one to the offset: for example if, as a slave, we are @@ -1218,9 +1218,9 @@ void shiftReplicationId(void) { * are turned into a master, we can accept a PSYNC request with offset * 51, since the slave asking has the same history up to the 50th * byte, and is asking for the new bytes starting at offset 51. */ - server.second_replid_offset = server.master_repl_offset+1; + g_pserver->second_replid_offset = g_pserver->master_repl_offset+1; changeReplicationId(); - serverLog(LL_WARNING,"Setting secondary replication ID to %s, valid up to offset: %lld. New replication ID is %s", server.replid2, server.second_replid_offset, server.replid); + serverLog(LL_WARNING,"Setting secondary replication ID to %s, valid up to offset: %lld. New replication ID is %s", g_pserver->replid2, g_pserver->second_replid_offset, g_pserver->replid); } /* ----------------------------------- SLAVE -------------------------------- */ @@ -1256,7 +1256,7 @@ void replicationEmptyDbCallback(void *privdata) { UNUSED(privdata); listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { replicationSendNewlineToMaster((redisMaster*)listNodeValue(ln)); @@ -1265,10 +1265,10 @@ void replicationEmptyDbCallback(void *privdata) { /* Once we have a link with the master and the synchroniziation was * performed, this function materializes the master client we store - * at server.master, starting from the specified file descriptor. */ + * at g_pserver->master, starting from the specified file descriptor. */ void replicationCreateMasterClient(redisMaster *mi, int fd, int dbid) { serverAssert(mi->master == nullptr); - mi->master = createClient(fd, serverTL - server.rgthreadvar); + mi->master = createClient(fd, serverTL - g_pserver->rgthreadvar); mi->master->flags |= CLIENT_MASTER; mi->master->authenticated = 1; mi->master->reploff = mi->master_initial_offset; @@ -1317,7 +1317,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { UNUSED(el); UNUSED(mask); // Should we update our database, or create from scratch? - int fUpdate = server.fActiveReplica || server.enable_multimaster; + int fUpdate = g_pserver->fActiveReplica || g_pserver->enable_multimaster; redisMaster *mi = (redisMaster*)privdata; serverAssert(GlobalLocksAcquired()); @@ -1335,7 +1335,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { /* If repl_transfer_size == -1 we still have to read the bulk length * from the master reply. */ if (mi->repl_transfer_size == -1) { - if (syncReadLine(fd,buf,1024,server.repl_syncio_timeout*1000) == -1) { + if (syncReadLine(fd,buf,1024,g_pserver->repl_syncio_timeout*1000) == -1) { serverLog(LL_WARNING, "I/O error reading bulk count from MASTER: %s", strerror(errno)); @@ -1351,7 +1351,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { /* At this stage just a newline works as a PING in order to take * the connection live. So we refresh our last interaction * timestamp. */ - mi->repl_transfer_lastio = server.unixtime; + mi->repl_transfer_lastio = g_pserver->unixtime; return; } else if (buf[0] != '$') { serverLog(LL_WARNING,"Bad protocol from MASTER, the first byte is not '$' (we received '%s'), are you sure the host and port are right?", buf); @@ -1402,7 +1402,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { cancelReplicationHandshake(mi); return; } - server.stat_net_input_bytes += nread; + g_pserver->stat_net_input_bytes += nread; if (usemark) { /* Update the last bytes array, and check if it matches our delimiter.*/ @@ -1416,7 +1416,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { if (memcmp(lastbytes,eofmark,CONFIG_RUN_ID_SIZE) == 0) eof_reached = 1; } - mi->repl_transfer_lastio = server.unixtime; + mi->repl_transfer_lastio = g_pserver->unixtime; if ((nwritten = write(mi->repl_transfer_fd,buf,nread)) != nread) { serverLog(LL_WARNING,"Write error or short write writing to the DB dump file needed for MASTER <-> REPLICA synchronization: %s", (nwritten == -1) ? strerror(errno) : "short write"); @@ -1454,22 +1454,22 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { } if (eof_reached) { - int aof_is_enabled = server.aof_state != AOF_OFF; + int aof_is_enabled = g_pserver->aof_state != AOF_OFF; /* Ensure background save doesn't overwrite synced data */ - if (server.rdb_child_pid != -1) { + if (g_pserver->rdb_child_pid != -1) { serverLog(LL_NOTICE, "Replica is about to load the RDB file received from the " "master, but there is a pending RDB child running. " "Killing process %ld and removing its temp file to avoid " "any race", - (long) server.rdb_child_pid); + (long) g_pserver->rdb_child_pid); killRDBChild(); } - if (rename(mi->repl_transfer_tmpfile,server.rdb_filename) == -1) { + if (rename(mi->repl_transfer_tmpfile,g_pserver->rdb_filename) == -1) { serverLog(LL_WARNING,"Failed trying to rename the temp DB into %s in MASTER <-> REPLICA synchronization: %s", - server.rdb_filename, strerror(errno)); + g_pserver->rdb_filename, strerror(errno)); cancelReplicationHandshake(mi); return; } @@ -1482,7 +1482,7 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { signalFlushedDb(-1); emptyDb( -1, - server.repl_slave_lazy_flush ? EMPTYDB_ASYNC : EMPTYDB_NO_FLAGS, + g_pserver->repl_slave_lazy_flush ? EMPTYDB_ASYNC : EMPTYDB_NO_FLAGS, replicationEmptyDbCallback); } @@ -1510,14 +1510,14 @@ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { /* After a full resynchroniziation we use the replication ID and * offset of the master. The secondary ID / offset are cleared since * we are starting a new history. */ - memcpy(server.replid,mi->master->replid,sizeof(server.replid)); - server.master_repl_offset = mi->master->reploff; + memcpy(g_pserver->replid,mi->master->replid,sizeof(g_pserver->replid)); + g_pserver->master_repl_offset = mi->master->reploff; clearReplicationId2(); /* Let's create the replication backlog if needed. Slaves need to * accumulate the backlog regardless of the fact they have sub-slaves * or not, in order to behave correctly if they are promoted to * masters after a failover. */ - if (server.repl_backlog == NULL) createReplicationBacklog(); + if (g_pserver->repl_backlog == NULL) createReplicationBacklog(); serverLog(LL_NOTICE, "MASTER <-> REPLICA sync: Finished with success"); /* Restart the AOF subsystem now that we finished the sync. This @@ -1568,8 +1568,8 @@ char *sendSynchronousCommand(redisMaster *mi, int flags, int fd, ...) { cmd = sdscatsds(cmd,cmdargs); sdsfree(cmdargs); - /* Transfer command to the server. */ - if (syncWrite(fd,cmd,sdslen(cmd),server.repl_syncio_timeout*1000) + /* Transfer command to the g_pserver-> */ + if (syncWrite(fd,cmd,sdslen(cmd),g_pserver->repl_syncio_timeout*1000) == -1) { sdsfree(cmd); @@ -1579,17 +1579,17 @@ char *sendSynchronousCommand(redisMaster *mi, int flags, int fd, ...) { sdsfree(cmd); } - /* Read the reply from the server. */ + /* Read the reply from the g_pserver-> */ if (flags & SYNC_CMD_READ) { char buf[256]; - if (syncReadLine(fd,buf,sizeof(buf),server.repl_syncio_timeout*1000) + if (syncReadLine(fd,buf,sizeof(buf),g_pserver->repl_syncio_timeout*1000) == -1) { return sdscatprintf(sdsempty(),"-Reading from master: %s", strerror(errno)); } - mi->repl_transfer_lastio = server.unixtime; + mi->repl_transfer_lastio = g_pserver->unixtime; return sdsnew(buf); } return NULL; @@ -1607,7 +1607,7 @@ char *sendSynchronousCommand(redisMaster *mi, int flags, int fd, ...) { * 1) We pass the function an already connected socket "fd". * 2) This function does not close the file descriptor "fd". However in case * of successful partial resynchronization, the function will reuse - * 'fd' as file descriptor of the server.master client structure. + * 'fd' as file descriptor of the g_pserver->master client structure. * * The function is split in two halves: if read_reply is 0, the function * writes the PSYNC command on the socket, and a new function call is @@ -1638,8 +1638,8 @@ char *sendSynchronousCommand(redisMaster *mi, int flags, int fd, ...) { * * 1) As a side effect of the function call the function removes the readable * event handler from "fd", unless the return value is PSYNC_WAIT_REPLY. - * 2) server.master_initial_offset is set to the right value according - * to the master reply. This will be used to populate the 'server.master' + * 2) g_pserver->master_initial_offset is set to the right value according + * to the master reply. This will be used to populate the 'g_pserver->master' * structure replication offset. */ @@ -1660,10 +1660,10 @@ int slaveTryPartialResynchronization(redisMaster *mi, aeEventLoop *el, int fd, i * master run_id and offset as not valid. Later if we'll be able to do * a FULL resync using the PSYNC command we'll set the offset at the * right value, so that this information will be propagated to the - * client structure representing the master into server.master. */ + * client structure representing the master into g_pserver->master. */ mi->master_initial_offset = -1; - if (mi->cached_master && !server.fActiveReplica) { + if (mi->cached_master && !g_pserver->fActiveReplica) { psync_replid = mi->cached_master->replid; snprintf(psync_offset,sizeof(psync_offset),"%lld", mi->cached_master->reploff+1); serverLog(LL_NOTICE,"Trying a partial resynchronization (request %s:%s).", psync_replid, psync_offset); @@ -1751,17 +1751,17 @@ int slaveTryPartialResynchronization(redisMaster *mi, aeEventLoop *el, int fd, i serverLog(LL_WARNING,"Master replication ID changed to %s",sznew); /* Set the old ID as our ID2, up to the current offset+1. */ - memcpy(server.replid2,mi->cached_master->replid, - sizeof(server.replid2)); - server.second_replid_offset = server.master_repl_offset+1; + memcpy(g_pserver->replid2,mi->cached_master->replid, + sizeof(g_pserver->replid2)); + g_pserver->second_replid_offset = g_pserver->master_repl_offset+1; /* Update the cached master ID and our own primary ID to the * new one. */ - memcpy(server.replid,sznew,sizeof(server.replid)); - memcpy(mi->cached_master->replid,sznew,sizeof(server.replid)); + memcpy(g_pserver->replid,sznew,sizeof(g_pserver->replid)); + memcpy(mi->cached_master->replid,sznew,sizeof(g_pserver->replid)); /* Disconnect all the sub-slaves: they need to be notified. */ - if (!server.fActiveReplica) + if (!g_pserver->fActiveReplica) disconnectSlaves(); } } @@ -1773,7 +1773,7 @@ int slaveTryPartialResynchronization(redisMaster *mi, aeEventLoop *el, int fd, i /* If this instance was restarted and we read the metadata to * PSYNC from the persistence file, our replication backlog could * be still not initialized. Create it. */ - if (server.repl_backlog == NULL) createReplicationBacklog(); + if (g_pserver->repl_backlog == NULL) createReplicationBacklog(); return PSYNC_CONTINUE; } @@ -1910,7 +1910,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { if (mi->repl_state == REPL_STATE_SEND_UUID) { char szUUID[37] = {0}; memset(mi->master_uuid, 0, UUID_BINARY_LEN); - uuid_unparse((unsigned char*)server.uuid, szUUID); + uuid_unparse((unsigned char*)cserver.uuid, szUUID); err = sendSynchronousCommand(mi, SYNC_CMD_WRITE,fd,"REPLCONF","uuid",szUUID,NULL); if (err) goto write_error; mi->repl_state = REPL_STATE_RECEIVE_UUID; @@ -1940,8 +1940,8 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { /* Set the slave port, so that Master's INFO command can list the * slave listening port correctly. */ if (mi->repl_state == REPL_STATE_SEND_PORT) { - sds port = sdsfromlonglong(server.slave_announce_port ? - server.slave_announce_port : server.port); + sds port = sdsfromlonglong(g_pserver->slave_announce_port ? + g_pserver->slave_announce_port : g_pserver->port); err = sendSynchronousCommand(mi, SYNC_CMD_WRITE,fd,"REPLCONF", "listening-port",port, NULL); sdsfree(port); @@ -1966,7 +1966,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { /* Skip REPLCONF ip-address if there is no slave-announce-ip option set. */ if (mi->repl_state == REPL_STATE_SEND_IP && - server.slave_announce_ip == NULL) + g_pserver->slave_announce_ip == NULL) { mi->repl_state = REPL_STATE_SEND_CAPA; } @@ -1975,7 +1975,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { * slave IP address port correctly in case of port forwarding or NAT. */ if (mi->repl_state == REPL_STATE_SEND_IP) { err = sendSynchronousCommand(mi, SYNC_CMD_WRITE,fd,"REPLCONF", - "ip-address",server.slave_announce_ip, NULL); + "ip-address",g_pserver->slave_announce_ip, NULL); if (err) goto write_error; sdsfree(err); mi->repl_state = REPL_STATE_RECEIVE_IP; @@ -2066,14 +2066,14 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { * as well, if we have any sub-slaves. The master may transfer us an * entirely different data set and we have no way to incrementally feed * our slaves after that. */ - if (!server.fActiveReplica) + if (!g_pserver->fActiveReplica) { disconnectSlavesExcept(mi->master_uuid); /* Force our slaves to resync with us as well. */ freeReplicationBacklog(); /* Don't allow our chained slaves to PSYNC. */ } else { - if (listLength(server.slaves)) + if (listLength(g_pserver->slaves)) { changeReplicationId(); clearReplicationId2(); @@ -2085,11 +2085,11 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { } /* Fall back to SYNC if needed. Otherwise psync_result == PSYNC_FULLRESYNC - * and the server.master_replid and master_initial_offset are + * and the g_pserver->master_replid and master_initial_offset are * already populated. */ if (psync_result == PSYNC_NOT_SUPPORTED) { serverLog(LL_NOTICE,"Retrying with SYNC..."); - if (syncWrite(fd,"SYNC\r\n",6,server.repl_syncio_timeout*1000) == -1) { + if (syncWrite(fd,"SYNC\r\n",6,g_pserver->repl_syncio_timeout*1000) == -1) { serverLog(LL_WARNING,"I/O error writing to MASTER: %s", strerror(errno)); goto error; @@ -2099,7 +2099,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { /* Prepare a suitable temp file for bulk transfer */ while(maxtries--) { snprintf(tmpfile,256, - "temp-%d.%ld.rdb",(int)server.unixtime,(long int)getpid()); + "temp-%d.%ld.rdb",(int)g_pserver->unixtime,(long int)getpid()); dfd = open(tmpfile,O_CREAT|O_WRONLY|O_EXCL,0644); if (dfd != -1) break; sleep(1); @@ -2124,7 +2124,7 @@ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { mi->repl_transfer_read = 0; mi->repl_transfer_last_fsync_off = 0; mi->repl_transfer_fd = dfd; - mi->repl_transfer_lastio = server.unixtime; + mi->repl_transfer_lastio = g_pserver->unixtime; mi->repl_transfer_tmpfile = zstrdup(tmpfile); return; @@ -2153,7 +2153,7 @@ int connectWithMaster(redisMaster *mi) { return C_ERR; } - if (aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,fd,AE_READABLE|AE_WRITABLE,syncWithMaster,mi) == + if (aeCreateFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,fd,AE_READABLE|AE_WRITABLE,syncWithMaster,mi) == AE_ERR) { close(fd); @@ -2161,7 +2161,7 @@ int connectWithMaster(redisMaster *mi) { return C_ERR; } - mi->repl_transfer_lastio = server.unixtime; + mi->repl_transfer_lastio = g_pserver->unixtime; mi->repl_transfer_s = fd; mi->repl_state = REPL_STATE_CONNECTING; return C_OK; @@ -2174,7 +2174,7 @@ int connectWithMaster(redisMaster *mi) { void undoConnectWithMaster(redisMaster *mi) { int fd = mi->repl_transfer_s; - aeDeleteFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,fd,AE_READABLE|AE_WRITABLE); + aeDeleteFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,fd,AE_READABLE|AE_WRITABLE); close(fd); mi->repl_transfer_s = -1; } @@ -2195,7 +2195,7 @@ void replicationAbortSyncTransfer(redisMaster *mi) { * the initial bulk transfer. * * If there was a replication handshake in progress 1 is returned and - * the replication state (server.repl_state) set to REPL_STATE_CONNECT. + * the replication state (g_pserver->repl_state) set to REPL_STATE_CONNECT. * * Otherwise zero is returned and no operation is perforemd at all. */ int cancelReplicationHandshake(redisMaster *mi) { @@ -2218,7 +2218,7 @@ struct redisMaster *replicationAddMaster(char *ip, int port) { // pre-reqs: We must not already have a replica in the list with the same tuple listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { redisMaster *miCheck = (redisMaster*)listNodeValue(ln); @@ -2226,17 +2226,17 @@ struct redisMaster *replicationAddMaster(char *ip, int port) { } // Pre-req satisfied, lets continue - int was_master = listLength(server.masters) == 0; + int was_master = listLength(g_pserver->masters) == 0; redisMaster *mi = nullptr; - if (!server.enable_multimaster && listLength(server.masters)) { - serverAssert(listLength(server.masters) == 1); - mi = (redisMaster*)listNodeValue(listFirst(server.masters)); + if (!g_pserver->enable_multimaster && listLength(g_pserver->masters)) { + serverAssert(listLength(g_pserver->masters) == 1); + mi = (redisMaster*)listNodeValue(listFirst(g_pserver->masters)); } else { mi = (redisMaster*)zcalloc(sizeof(redisMaster), MALLOC_LOCAL); initMasterInfo(mi); - listAddNodeTail(server.masters, mi); + listAddNodeTail(g_pserver->masters, mi); } sdsfree(mi->masterhost); @@ -2252,7 +2252,7 @@ struct redisMaster *replicationAddMaster(char *ip, int port) { /* Force our slaves to resync with us as well. They may hopefully be able * to partially resync with us, but we can notify the replid change. */ - if (!server.fActiveReplica) + if (!g_pserver->fActiveReplica) disconnectSlaves(); cancelReplicationHandshake(mi); /* Before destroying our master state, create a cached master using @@ -2292,7 +2292,7 @@ void replicationUnsetMaster(redisMaster *mi) { * of the replication ID change (see shiftReplicationId() call). However * the slaves will be able to partially resync with us, so it will be * a very fast reconnection. */ - if (!server.fActiveReplica) + if (!g_pserver->fActiveReplica) disconnectSlaves(); mi->repl_state = REPL_STATE_NONE; @@ -2300,17 +2300,17 @@ void replicationUnsetMaster(redisMaster *mi) { * with a SELECT statement. This is forced after a full resync, but * with PSYNC version 2, there is no need for full resync after a * master switch. */ - server.slaveseldb = -1; + g_pserver->slaveseldb = -1; /* Once we turn from slave to master, we consider the starting time without * slaves (that is used to count the replication backlog time to live) as * starting from now. Otherwise the backlog will be freed after a * failover if slaves do not connect immediately. */ - server.repl_no_slaves_since = server.unixtime; + g_pserver->repl_no_slaves_since = g_pserver->unixtime; - listNode *ln = listSearchKey(server.masters, mi); + listNode *ln = listSearchKey(g_pserver->masters, mi); serverAssert(ln != nullptr); - listDelNode(server.masters, ln); + listDelNode(g_pserver->masters, ln); freeMasterInfo(mi); } @@ -2319,7 +2319,7 @@ void replicationUnsetMaster(redisMaster *mi) { void replicationHandleMasterDisconnection(redisMaster *mi) { mi->master = NULL; mi->repl_state = REPL_STATE_CONNECT; - mi->repl_down_since = server.unixtime; + mi->repl_down_since = g_pserver->unixtime; /* We lost connection with our master, don't disconnect slaves yet, * maybe we'll be able to PSYNC with our master later. We'll disconnect * the slaves only if we'll have to do a full resync with our master. */ @@ -2328,7 +2328,7 @@ void replicationHandleMasterDisconnection(redisMaster *mi) { void replicaofCommand(client *c) { /* SLAVEOF is not allowed in cluster mode as replication is automatically * configured using the current address of the master node. */ - if (server.cluster_enabled) { + if (g_pserver->cluster_enabled) { addReplyError(c,"REPLICAOF not allowed in cluster mode."); return; } @@ -2337,10 +2337,10 @@ void replicaofCommand(client *c) { * into a master. Otherwise the new master address is set. */ if (!strcasecmp((const char*)ptrFromObj(c->argv[1]),"no") && !strcasecmp((const char*)ptrFromObj(c->argv[2]),"one")) { - if (listLength(server.masters)) { - while (listLength(server.masters)) + if (listLength(g_pserver->masters)) { + while (listLength(g_pserver->masters)) { - replicationUnsetMaster((redisMaster*)listNodeValue(listFirst(server.masters))); + replicationUnsetMaster((redisMaster*)listNodeValue(listFirst(g_pserver->masters))); } sds client = catClientInfoString(sdsempty(),c); serverLog(LL_NOTICE,"MASTER MODE enabled (user request from '%s')", @@ -2365,7 +2365,7 @@ void replicaofCommand(client *c) { /* Check if we are already attached to the specified slave */ listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { redisMaster *mi = (redisMaster*)listNodeValue(ln); @@ -2394,7 +2394,7 @@ void replicaofCommand(client *c) { * (master or slave) and additional information related to replication * in an easy to process format. */ void roleCommand(client *c) { - if (listLength(server.masters) == 0) { + if (listLength(g_pserver->masters) == 0) { listIter li; listNode *ln; void *mbcount; @@ -2402,9 +2402,9 @@ void roleCommand(client *c) { addReplyArrayLen(c,3); addReplyBulkCBuffer(c,"master",6); - addReplyLongLong(c,server.master_repl_offset); + addReplyLongLong(c,g_pserver->master_repl_offset); mbcount = addReplyDeferredLen(c); - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; char ip[NET_IP_STR_LEN], *slaveip = slave->slave_ip; @@ -2425,7 +2425,7 @@ void roleCommand(client *c) { } else { listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { @@ -2474,7 +2474,7 @@ void replicationSendAck(redisMaster *mi) /* In order to implement partial synchronization we need to be able to cache * our master's client structure after a transient disconnection. - * It is cached into server.cached_master and flushed away using the following + * It is cached into g_pserver->cached_master and flushed away using the following * functions. */ /* This function is called by freeClient() in order to cache the master @@ -2514,7 +2514,7 @@ void replicationCacheMaster(redisMaster *mi, client *c) { c->bufpos = 0; resetClient(c); - /* Save the master. Server.master will be set to null later by + /* Save the master. g_pserver->master will be set to null later by * replicationHandleMasterDisconnection(). */ mi->cached_master = mi->master; @@ -2526,7 +2526,7 @@ void replicationCacheMaster(redisMaster *mi, client *c) { /* Caching the master happens instead of the actual freeClient() call, * so make sure to adjust the replication state. This function will - * also set server.master to NULL. */ + * also set g_pserver->master to NULL. */ replicationHandleMasterDisconnection(mi); } @@ -2542,12 +2542,12 @@ void replicationCacheMaster(redisMaster *mi, client *c) { void replicationCacheMasterUsingMyself(redisMaster *mi) { /* The master client we create can be set to any DBID, because * the new master will start its replication stream with SELECT. */ - mi->master_initial_offset = server.master_repl_offset; + mi->master_initial_offset = g_pserver->master_repl_offset; replicationCreateMasterClient(mi, -1,-1); std::lock_guardmaster->lock)> lock(mi->master->lock); /* Use our own ID / offset. */ - memcpy(mi->master->replid, server.replid, sizeof(server.replid)); + memcpy(mi->master->replid, g_pserver->replid, sizeof(g_pserver->replid)); /* Set as cached master. */ unlinkClient(mi->master); @@ -2582,17 +2582,17 @@ void replicationResurrectCachedMaster(redisMaster *mi, int newfd) { mi->master->fd = newfd; mi->master->flags &= ~(CLIENT_CLOSE_AFTER_REPLY|CLIENT_CLOSE_ASAP); mi->master->authenticated = 1; - mi->master->lastinteraction = server.unixtime; + mi->master->lastinteraction = g_pserver->unixtime; mi->repl_state = REPL_STATE_CONNECTED; mi->repl_down_since = 0; /* Normally changing the thread of a client is a BIG NONO, but this client was unlinked so its OK here */ - mi->master->iel = serverTL - server.rgthreadvar; // martial to this thread + mi->master->iel = serverTL - g_pserver->rgthreadvar; // martial to this thread /* Re-add to the list of clients. */ linkClient(mi->master); - if (aeCreateFileEvent(server.rgthreadvar[mi->master->iel].el, newfd, AE_READABLE|AE_READ_THREADSAFE, + if (aeCreateFileEvent(g_pserver->rgthreadvar[mi->master->iel].el, newfd, AE_READABLE|AE_READ_THREADSAFE, readQueryFromClient, mi->master)) { serverLog(LL_WARNING,"Error resurrecting the cached master, impossible to add the readable handler: %s", strerror(errno)); freeClientAsync(mi->master); /* Close ASAP. */ @@ -2601,7 +2601,7 @@ void replicationResurrectCachedMaster(redisMaster *mi, int newfd) { /* We may also need to install the write handler as well if there is * pending data in the write buffers. */ if (clientHasPendingReplies(mi->master)) { - if (aeCreateFileEvent(server.rgthreadvar[mi->master->iel].el, newfd, AE_WRITABLE|AE_WRITE_THREADSAFE, + if (aeCreateFileEvent(g_pserver->rgthreadvar[mi->master->iel].el, newfd, AE_WRITABLE|AE_WRITE_THREADSAFE, sendReplyToClient, mi->master)) { serverLog(LL_WARNING,"Error resurrecting the cached master, impossible to add the writable handler: %s", strerror(errno)); freeClientAsync(mi->master); /* Close ASAP. */ @@ -2619,18 +2619,18 @@ void refreshGoodSlavesCount(void) { listNode *ln; int good = 0; - if (!server.repl_min_slaves_to_write || - !server.repl_min_slaves_max_lag) return; + if (!g_pserver->repl_min_slaves_to_write || + !g_pserver->repl_min_slaves_max_lag) return; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; - time_t lag = server.unixtime - slave->repl_ack_time; + time_t lag = g_pserver->unixtime - slave->repl_ack_time; if (slave->replstate == SLAVE_STATE_ONLINE && - lag <= server.repl_min_slaves_max_lag) good++; + lag <= g_pserver->repl_min_slaves_max_lag) good++; } - server.repl_good_slaves_count = good; + g_pserver->repl_good_slaves_count = good; } /* ----------------------- REPLICATION SCRIPT CACHE -------------------------- @@ -2666,9 +2666,9 @@ void refreshGoodSlavesCount(void) { /* Initialize the script cache, only called at startup. */ void replicationScriptCacheInit(void) { - server.repl_scriptcache_size = 10000; - server.repl_scriptcache_dict = dictCreate(&replScriptCacheDictType,NULL); - server.repl_scriptcache_fifo = listCreate(); + g_pserver->repl_scriptcache_size = 10000; + g_pserver->repl_scriptcache_dict = dictCreate(&replScriptCacheDictType,NULL); + g_pserver->repl_scriptcache_fifo = listCreate(); } /* Empty the script cache. Should be called every time we are no longer sure @@ -2683,9 +2683,9 @@ void replicationScriptCacheInit(void) { * to reclaim otherwise unused memory. */ void replicationScriptCacheFlush(void) { - dictEmpty(server.repl_scriptcache_dict,NULL); - listRelease(server.repl_scriptcache_fifo); - server.repl_scriptcache_fifo = listCreate(); + dictEmpty(g_pserver->repl_scriptcache_dict,NULL); + listRelease(g_pserver->repl_scriptcache_fifo); + g_pserver->repl_scriptcache_fifo = listCreate(); } /* Add an entry into the script cache, if we reach max number of entries the @@ -2695,26 +2695,26 @@ void replicationScriptCacheAdd(sds sha1) { sds key = sdsdup(sha1); /* Evict oldest. */ - if (listLength(server.repl_scriptcache_fifo) == server.repl_scriptcache_size) + if (listLength(g_pserver->repl_scriptcache_fifo) == g_pserver->repl_scriptcache_size) { - listNode *ln = listLast(server.repl_scriptcache_fifo); + listNode *ln = listLast(g_pserver->repl_scriptcache_fifo); sds oldest = (sds)listNodeValue(ln); - retval = dictDelete(server.repl_scriptcache_dict,oldest); + retval = dictDelete(g_pserver->repl_scriptcache_dict,oldest); serverAssert(retval == DICT_OK); - listDelNode(server.repl_scriptcache_fifo,ln); + listDelNode(g_pserver->repl_scriptcache_fifo,ln); } /* Add current. */ - retval = dictAdd(server.repl_scriptcache_dict,key,NULL); - listAddNodeHead(server.repl_scriptcache_fifo,key); + retval = dictAdd(g_pserver->repl_scriptcache_dict,key,NULL); + listAddNodeHead(g_pserver->repl_scriptcache_fifo,key); serverAssert(retval == DICT_OK); } /* Returns non-zero if the specified entry exists inside the cache, that is, * if all the slaves are aware of this script SHA1. */ int replicationScriptCacheExists(sds sha1) { - return dictFind(server.repl_scriptcache_dict,sha1) != NULL; + return dictFind(g_pserver->repl_scriptcache_dict,sha1) != NULL; } /* ----------------------- SYNCHRONOUS REPLICATION -------------------------- @@ -2748,7 +2748,7 @@ int replicationScriptCacheExists(sds sha1) { * we "group" all the clients that want to wait for synchronouns replication * in a given event loop iteration, and send a single GETACK for them all. */ void replicationRequestAckFromSlaves(void) { - server.get_ack_from_slaves = 1; + g_pserver->get_ack_from_slaves = 1; } /* Return the number of slaves that already acknowledged the specified @@ -2758,7 +2758,7 @@ int replicationCountAcksByOffset(long long offset) { listNode *ln; int count = 0; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; @@ -2775,7 +2775,7 @@ void waitCommand(client *c) { long numreplicas, ackreplicas; long long offset = c->woff; - if (listLength(server.masters)) { + if (listLength(g_pserver->masters)) { addReplyError(c,"WAIT cannot be used with replica instances. Please also note that since Redis 4.0 if a replica is configured to be writable (which is not the default) writes to replicas are just local and are not propagated."); return; } @@ -2798,7 +2798,7 @@ void waitCommand(client *c) { c->bpop.timeout = timeout; c->bpop.reploffset = offset; c->bpop.numreplicas = numreplicas; - listAddNodeTail(server.clients_waiting_acks,c); + listAddNodeTail(g_pserver->clients_waiting_acks,c); blockClient(c,BLOCKED_WAIT); /* Make sure that the server will send an ACK request to all the slaves @@ -2811,9 +2811,9 @@ void waitCommand(client *c) { * waiting for replica acks. Never call it directly, call unblockClient() * instead. */ void unblockClientWaitingReplicas(client *c) { - listNode *ln = listSearchKey(server.clients_waiting_acks,c); + listNode *ln = listSearchKey(g_pserver->clients_waiting_acks,c); serverAssert(ln != NULL); - listDelNode(server.clients_waiting_acks,ln); + listDelNode(g_pserver->clients_waiting_acks,ln); } /* Check if there are clients blocked in WAIT that can be unblocked since @@ -2825,7 +2825,7 @@ void processClientsWaitingReplicas(void) { listIter li; listNode *ln; - listRewind(server.clients_waiting_acks,&li); + listRewind(g_pserver->clients_waiting_acks,&li); while((ln = listNext(&li))) { client *c = (client*)ln->value; fastlock_lock(&c->lock); @@ -2881,7 +2881,7 @@ void replicationCron(void) { serverAssert(GlobalLocksAcquired()); listIter liMaster; listNode *lnMaster; - listRewind(server.masters, &liMaster); + listRewind(g_pserver->masters, &liMaster); while ((lnMaster = listNext(&liMaster))) { redisMaster *mi = (redisMaster*)listNodeValue(lnMaster); @@ -2894,7 +2894,7 @@ void replicationCron(void) { if (mi->masterhost && (mi->repl_state == REPL_STATE_CONNECTING || slaveIsInHandshakeState(mi)) && - (time(NULL)-mi->repl_transfer_lastio) > server.repl_timeout) + (time(NULL)-mi->repl_transfer_lastio) > g_pserver->repl_timeout) { serverLog(LL_WARNING,"Timeout connecting to the MASTER..."); cancelReplicationHandshake(mi); @@ -2902,7 +2902,7 @@ void replicationCron(void) { /* Bulk transfer I/O timeout? */ if (mi->masterhost && mi->repl_state == REPL_STATE_TRANSFER && - (time(NULL)-mi->repl_transfer_lastio) > server.repl_timeout) + (time(NULL)-mi->repl_transfer_lastio) > g_pserver->repl_timeout) { serverLog(LL_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in redis.conf to a larger value."); cancelReplicationHandshake(mi); @@ -2910,7 +2910,7 @@ void replicationCron(void) { /* Timed out master when we are an already connected slave? */ if (mi->masterhost && mi->repl_state == REPL_STATE_CONNECTED && - (time(NULL)-mi->master->lastinteraction) > server.repl_timeout) + (time(NULL)-mi->master->lastinteraction) > g_pserver->repl_timeout) { serverLog(LL_WARNING,"MASTER timeout: no data nor PING received..."); if (FCorrectThread(mi->master)) @@ -2945,11 +2945,11 @@ void replicationCron(void) { robj *ping_argv[1]; /* First, send PING according to ping_slave_period. */ - if ((replication_cron_loops % server.repl_ping_slave_period) == 0 && - listLength(server.slaves)) + if ((replication_cron_loops % g_pserver->repl_ping_slave_period) == 0 && + listLength(g_pserver->slaves)) { ping_argv[0] = createStringObject("PING",4); - replicationFeedSlaves(server.slaves, server.slaveseldb, + replicationFeedSlaves(g_pserver->slaves, g_pserver->slaveseldb, ping_argv, 1); decrRefCount(ping_argv[0]); } @@ -2968,14 +2968,14 @@ void replicationCron(void) { * last interaction timer preventing a timeout. In this case we ignore the * ping period and refresh the connection once per second since certain * timeouts are set at a few seconds (example: PSYNC response). */ - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; int is_presync = (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START || (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END && - server.rdb_child_type != RDB_CHILD_TYPE_SOCKET)); + g_pserver->rdb_child_type != RDB_CHILD_TYPE_SOCKET)); if (is_presync) { if (write(slave->fd, "\n", 1) == -1) { @@ -2985,17 +2985,17 @@ void replicationCron(void) { } /* Disconnect timedout slaves. */ - if (listLength(server.slaves)) { + if (listLength(g_pserver->slaves)) { listIter li; listNode *ln; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; if (slave->replstate != SLAVE_STATE_ONLINE) continue; if (slave->flags & CLIENT_PRE_PSYNC) continue; - if ((server.unixtime - slave->repl_ack_time) > server.repl_timeout) + if ((g_pserver->unixtime - slave->repl_ack_time) > g_pserver->repl_timeout) { serverLog(LL_WARNING, "Disconnecting timedout replica: %s", replicationGetSlaveName(slave)); @@ -3013,12 +3013,12 @@ void replicationCron(void) { * without sub-slaves attached should still accumulate data into the * backlog, in order to reply to PSYNC queries if they are turned into * masters after a failover. */ - if (listLength(server.slaves) == 0 && server.repl_backlog_time_limit && - server.repl_backlog && listLength(server.masters) == 0) + if (listLength(g_pserver->slaves) == 0 && g_pserver->repl_backlog_time_limit && + g_pserver->repl_backlog && listLength(g_pserver->masters) == 0) { - time_t idle = server.unixtime - server.repl_no_slaves_since; + time_t idle = g_pserver->unixtime - g_pserver->repl_no_slaves_since; - if (idle > server.repl_backlog_time_limit) { + if (idle > g_pserver->repl_backlog_time_limit) { /* When we free the backlog, we always use a new * replication ID and clear the ID2. This is needed * because when there is no backlog, the master_repl_offset @@ -3040,16 +3040,16 @@ void replicationCron(void) { serverLog(LL_NOTICE, "Replication backlog freed after %d seconds " "without connected replicas.", - (int) server.repl_backlog_time_limit); + (int) g_pserver->repl_backlog_time_limit); } } /* If AOF is disabled and we no longer have attached slaves, we can * free our Replication Script Cache as there is no need to propagate * EVALSHA at all. */ - if (listLength(server.slaves) == 0 && - server.aof_state == AOF_OFF && - listLength(server.repl_scriptcache_fifo) != 0) + if (listLength(g_pserver->slaves) == 0 && + g_pserver->aof_state == AOF_OFF && + listLength(g_pserver->repl_scriptcache_fifo) != 0) { replicationScriptCacheFlush(); } @@ -3060,18 +3060,18 @@ void replicationCron(void) { * In case of diskless replication, we make sure to wait the specified * number of seconds (according to configuration) so that other slaves * have the time to arrive before we start streaming. */ - if (server.rdb_child_pid == -1 && server.aof_child_pid == -1) { + if (g_pserver->rdb_child_pid == -1 && g_pserver->aof_child_pid == -1) { time_t idle, max_idle = 0; int slaves_waiting = 0; int mincapa = -1; listNode *ln; listIter li; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)ln->value; if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { - idle = server.unixtime - slave->lastinteraction; + idle = g_pserver->unixtime - slave->lastinteraction; if (idle > max_idle) max_idle = idle; slaves_waiting++; mincapa = (mincapa == -1) ? slave->slave_capa : @@ -3080,8 +3080,8 @@ void replicationCron(void) { } if (slaves_waiting && - (!server.repl_diskless_sync || - max_idle > server.repl_diskless_sync_delay)) + (!g_pserver->repl_diskless_sync || + max_idle > g_pserver->repl_diskless_sync_delay)) { /* Start the BGSAVE. The called function may start a * BGSAVE with socket target or disk target depending on the @@ -3099,7 +3099,7 @@ int FBrokenLinkToMaster() { listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { @@ -3117,7 +3117,7 @@ int FActiveMaster(client *c) listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { redisMaster *mi = (redisMaster*)listNodeValue(ln); @@ -3131,7 +3131,7 @@ redisMaster *MasterInfoFromClient(client *c) { listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { redisMaster *mi = (redisMaster*)listNodeValue(ln); @@ -3141,8 +3141,54 @@ redisMaster *MasterInfoFromClient(client *c) return nullptr; } +#define REPLAY_MAX_NESTING 64 +class ReplicaNestState +{ +public: + bool FPush() + { + if (m_cnesting == REPLAY_MAX_NESTING) { + m_fCancelled = true; + return false; // overflow + } + + if (m_cnesting == 0) + m_fCancelled = false; + ++m_cnesting; + return true; + } + + void Pop() + { + --m_cnesting; + } + + void Cancel() + { + m_fCancelled = true; + } + + bool FCancelled() const + { + return m_fCancelled; + } + + bool FFirst() const + { + return m_cnesting == 1; + } + +private: + int m_cnesting = 0; + bool m_fCancelled = false; +}; + void replicaReplayCommand(client *c) { + static thread_local ReplicaNestState *s_pstate = nullptr; + if (s_pstate == nullptr) + s_pstate = new (MALLOC_LOCAL) ReplicaNestState; + // the replay command contains two arguments: // 1: The UUID of the source // 2: The raw command buffer to be replayed @@ -3150,13 +3196,15 @@ void replicaReplayCommand(client *c) if (!(c->flags & CLIENT_MASTER)) { addReplyError(c, "Command must be sent from a master"); + s_pstate->Cancel(); return; } /* First Validate Arguments */ - if (c->argc != 3) + if (c->argc < 3) { addReplyError(c, "Invalid number of arguments"); + s_pstate->Cancel(); return; } @@ -3165,21 +3213,27 @@ void replicaReplayCommand(client *c) || uuid_parse((sds)ptrFromObj(c->argv[1]), uuid) != 0) { addReplyError(c, "Expected UUID arg1"); + s_pstate->Cancel(); return; } if (c->argv[2]->type != OBJ_STRING) { addReplyError(c, "Expected command buffer arg2"); + s_pstate->Cancel(); return; } - if (FSameUuidNoNil(uuid, server.uuid)) + if (FSameUuidNoNil(uuid, cserver.uuid)) { addReply(c, shared.ok); + s_pstate->Cancel(); return; // Our own commands have come back to us. Ignore them. } + if (!s_pstate->FPush()) + return; + // OK We've recieved a command lets execute client *cFake = createClient(-1, c->iel); cFake->lock.lock(); @@ -3193,7 +3247,10 @@ void replicaReplayCommand(client *c) freeClient(cFake); // call() will not propogate this for us, so we do so here - alsoPropagate(server.rreplayCommand,c->db->id,c->argv,c->argc,PROPAGATE_AOF|PROPAGATE_REPL); + if (!s_pstate->FCancelled() && s_pstate->FFirst()) + alsoPropagate(cserver.rreplayCommand,c->db->id,c->argv,c->argc,PROPAGATE_AOF|PROPAGATE_REPL); + + s_pstate->Pop(); return; } @@ -3202,16 +3259,16 @@ void updateMasterAuth() listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { redisMaster *mi = (redisMaster*)listNodeValue(ln); zfree(mi->masterauth); mi->masterauth = nullptr; zfree(mi->masteruser); mi->masteruser = nullptr; - if (server.default_masterauth) - mi->masterauth = zstrdup(server.default_masterauth); - if (server.default_masteruser) - mi->masteruser = zstrdup(server.default_masteruser); + if (cserver.default_masterauth) + mi->masterauth = zstrdup(cserver.default_masterauth); + if (cserver.default_masteruser) + mi->masteruser = zstrdup(cserver.default_masteruser); } } \ No newline at end of file diff --git a/src/scripting.cpp b/src/scripting.cpp index ad504250e..15f41745e 100644 --- a/src/scripting.cpp +++ b/src/scripting.cpp @@ -191,7 +191,7 @@ char *redisProtocolToLuaType_MultiBulk(lua_State *lua, char *reply, int atype) { int j = 0; string2ll(reply+1,p-reply-1,&mbulklen); - if (server.lua_caller->resp == 2 || atype == '*') { + if (g_pserver->lua_caller->resp == 2 || atype == '*') { p += 2; if (mbulklen == -1) { lua_pushboolean(lua,0); @@ -203,7 +203,7 @@ char *redisProtocolToLuaType_MultiBulk(lua_State *lua, char *reply, int atype) { p = redisProtocolToLuaType(lua,p); lua_settable(lua,-3); } - } else if (server.lua_caller->resp == 3) { + } else if (g_pserver->lua_caller->resp == 3) { /* Here we handle only Set and Map replies in RESP3 mode, since arrays * follow the above RESP2 code path. */ p += 2; @@ -371,14 +371,14 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { int acl_retval = 0; int call_flags = CMD_CALL_SLOWLOG | CMD_CALL_STATS; struct redisCommand *cmd; - client *c = server.lua_client; + client *c = g_pserver->lua_client; sds reply; // Ensure our client is on the right thread serverAssert(!(c->flags & CLIENT_PENDING_WRITE)); serverAssert(!(c->flags & CLIENT_UNBLOCKED)); serverAssert(GlobalLocksAcquired()); - c->iel = serverTL - server.rgthreadvar; + c->iel = serverTL - g_pserver->rgthreadvar; /* Cached across calls. */ static robj **argv = NULL; @@ -388,7 +388,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { static int inuse = 0; /* Recursive calls detection. */ /* Reflect MULTI state */ - if (server.lua_multi_emitted || (server.lua_caller->flags & CLIENT_MULTI)) { + if (g_pserver->lua_multi_emitted || (g_pserver->lua_caller->flags & CLIENT_MULTI)) { c->flags |= CLIENT_MULTI; } else { c->flags &= ~CLIENT_MULTI; @@ -472,7 +472,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { /* Setup our fake client for command execution */ c->argv = argv; c->argc = argc; - c->puser = server.lua_caller->puser; + c->puser = g_pserver->lua_caller->puser; /* Process module hooks */ moduleCallCommandFilters(c); @@ -533,13 +533,13 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { * of this script. */ if (cmd->flags & CMD_WRITE) { int deny_write_type = writeCommandsDeniedByDiskError(); - if (server.lua_random_dirty && !server.lua_replicate_commands) { + if (g_pserver->lua_random_dirty && !g_pserver->lua_replicate_commands) { luaPushError(lua, "Write commands not allowed after non deterministic commands. Call redis.replicate_commands() at the start of your script in order to switch to single commands replication mode."); goto cleanup; - } else if (listLength(server.masters) && server.repl_slave_ro && - !server.loading && - !(server.lua_caller->flags & CLIENT_MASTER)) + } else if (listLength(g_pserver->masters) && g_pserver->repl_slave_ro && + !g_pserver->loading && + !(g_pserver->lua_caller->flags & CLIENT_MASTER)) { luaPushError(lua, (char*)ptrFromObj(shared.roslaveerr)); goto cleanup; @@ -549,7 +549,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { } else { sds aof_write_err = sdscatfmt(sdsempty(), "-MISCONF Errors writing to the AOF file: %s\r\n", - strerror(server.aof_last_write_errno)); + strerror(g_pserver->aof_last_write_errno)); luaPushError(lua, aof_write_err); sdsfree(aof_write_err); } @@ -561,10 +561,10 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { * could enlarge the memory usage are not allowed, but only if this is the * first write in the context of this script, otherwise we can't stop * in the middle. */ - if (server.maxmemory && /* Maxmemory is actually enabled. */ - !server.loading && /* Don't care about mem if loading. */ - !listLength(server.masters) && /* Slave must execute the script. */ - server.lua_write_dirty == 0 && /* Script had no side effects so far. */ + if (g_pserver->maxmemory && /* Maxmemory is actually enabled. */ + !g_pserver->loading && /* Don't care about mem if loading. */ + !listLength(g_pserver->masters) && /* Slave must execute the script. */ + g_pserver->lua_write_dirty == 0 && /* Script had no side effects so far. */ (cmd->flags & CMD_DENYOOM)) { if (getMaxmemoryState(NULL,NULL,NULL,NULL) != C_OK) { @@ -573,20 +573,20 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { } } - if (cmd->flags & CMD_RANDOM) server.lua_random_dirty = 1; - if (cmd->flags & CMD_WRITE) server.lua_write_dirty = 1; + if (cmd->flags & CMD_RANDOM) g_pserver->lua_random_dirty = 1; + if (cmd->flags & CMD_WRITE) g_pserver->lua_write_dirty = 1; /* If this is a Redis Cluster node, we need to make sure Lua is not * trying to access non-local keys, with the exception of commands * received from our master or when loading the AOF back in memory. */ - if (server.cluster_enabled && !server.loading && - !(server.lua_caller->flags & CLIENT_MASTER)) + if (g_pserver->cluster_enabled && !g_pserver->loading && + !(g_pserver->lua_caller->flags & CLIENT_MASTER)) { /* Duplicate relevant flags in the lua client. */ c->flags &= ~(CLIENT_READONLY|CLIENT_ASKING); - c->flags |= server.lua_caller->flags & (CLIENT_READONLY|CLIENT_ASKING); + c->flags |= g_pserver->lua_caller->flags & (CLIENT_READONLY|CLIENT_ASKING); if (getNodeByQuery(c,c->cmd,c->argv,c->argc,NULL,NULL) != - server.cluster->myself) + g_pserver->cluster->myself) { luaPushError(lua, "Lua script attempted to access a non local key in a " @@ -598,22 +598,22 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { /* If we are using single commands replication, we need to wrap what * we propagate into a MULTI/EXEC block, so that it will be atomic like * a Lua script in the context of AOF and slaves. */ - if (server.lua_replicate_commands && - !server.lua_multi_emitted && - !(server.lua_caller->flags & CLIENT_MULTI) && - server.lua_write_dirty && - server.lua_repl != PROPAGATE_NONE) + if (g_pserver->lua_replicate_commands && + !g_pserver->lua_multi_emitted && + !(g_pserver->lua_caller->flags & CLIENT_MULTI) && + g_pserver->lua_write_dirty && + g_pserver->lua_repl != PROPAGATE_NONE) { - execCommandPropagateMulti(server.lua_caller); - server.lua_multi_emitted = 1; + execCommandPropagateMulti(g_pserver->lua_caller); + g_pserver->lua_multi_emitted = 1; } /* Run the command */ - if (server.lua_replicate_commands) { + if (g_pserver->lua_replicate_commands) { /* Set flags according to redis.set_repl() settings. */ - if (server.lua_repl & PROPAGATE_AOF) + if (g_pserver->lua_repl & PROPAGATE_AOF) call_flags |= CMD_CALL_PROPAGATE_AOF; - if (server.lua_repl & PROPAGATE_REPL) + if (g_pserver->lua_repl & PROPAGATE_REPL) call_flags |= CMD_CALL_PROPAGATE_REPL; } call(c,call_flags); @@ -648,7 +648,7 @@ int luaRedisGenericCommand(lua_State *lua, int raise_error) { /* Sort the output array if needed, assuming it is a non-null multi bulk * reply as expected. */ if ((cmd->flags & CMD_SORT_FOR_SCRIPT) && - (server.lua_replicate_commands == 0) && + (g_pserver->lua_replicate_commands == 0) && (reply[0] == '*' && reply[1] != '-')) { luaSortArray(lua); } @@ -764,10 +764,10 @@ int luaRedisStatusReplyCommand(lua_State *lua) { * already started to write, returns false and stick to whole scripts * replication, which is our default. */ int luaRedisReplicateCommandsCommand(lua_State *lua) { - if (server.lua_write_dirty) { + if (g_pserver->lua_write_dirty) { lua_pushboolean(lua,0); } else { - server.lua_replicate_commands = 1; + g_pserver->lua_replicate_commands = 1; /* When we switch to single commands replication, we can provide * different math.random() sequences at every call, which is what * the user normally expects. */ @@ -817,7 +817,7 @@ int luaRedisSetReplCommand(lua_State *lua) { int argc = lua_gettop(lua); int flags; - if (server.lua_replicate_commands == 0) { + if (g_pserver->lua_replicate_commands == 0) { lua_pushstring(lua, "You can set the replication behavior only after turning on single commands replication with redis.replicate_commands()."); return lua_error(lua); } else if (argc != 1) { @@ -830,7 +830,7 @@ int luaRedisSetReplCommand(lua_State *lua) { lua_pushstring(lua, "Invalid replication flags. Use REPL_AOF, REPL_REPLICA, REPL_ALL or REPL_NONE."); return lua_error(lua); } - server.lua_repl = flags; + g_pserver->lua_repl = flags; return 0; } @@ -966,9 +966,9 @@ void scriptingInit(int setup) { lua_State *lua = lua_open(); if (setup) { - server.lua_client = NULL; - server.lua_caller = NULL; - server.lua_timedout = 0; + g_pserver->lua_client = NULL; + g_pserver->lua_caller = NULL; + g_pserver->lua_timedout = 0; ldbInit(); } @@ -978,8 +978,8 @@ void scriptingInit(int setup) { /* Initialize a dictionary we use to map SHAs to scripts. * This is useful for replication, as we need to replicate EVALSHA * as EVAL, so we need to remember the associated script. */ - server.lua_scripts = dictCreate(&shaScriptObjectDictType,NULL); - server.lua_scripts_mem = 0; + g_pserver->lua_scripts = dictCreate(&shaScriptObjectDictType,NULL); + g_pserver->lua_scripts_mem = 0; /* Register the redis commands table and fields */ lua_newtable(lua); @@ -1121,9 +1121,9 @@ void scriptingInit(int setup) { * inside the Lua interpreter. * Note: there is no need to create it again when this function is called * by scriptingReset(). */ - if (server.lua_client == NULL) { - server.lua_client = createClient(-1, IDX_EVENT_LOOP_MAIN); - server.lua_client->flags |= CLIENT_LUA; + if (g_pserver->lua_client == NULL) { + g_pserver->lua_client = createClient(-1, IDX_EVENT_LOOP_MAIN); + g_pserver->lua_client->flags |= CLIENT_LUA; } /* Lua beginners often don't use "local", this is likely to introduce @@ -1131,15 +1131,15 @@ void scriptingInit(int setup) { * to global variables. */ scriptingEnableGlobalsProtection(lua); - server.lua = lua; + g_pserver->lua = lua; } /* Release resources related to Lua scripting. * This function is used in order to reset the scripting environment. */ void scriptingRelease(void) { - dictRelease(server.lua_scripts); - server.lua_scripts_mem = 0; - lua_close(server.lua); + dictRelease(g_pserver->lua_scripts); + g_pserver->lua_scripts_mem = 0; + lua_close(g_pserver->lua); } void scriptingReset(void) { @@ -1233,7 +1233,7 @@ sds luaCreateFunction(client *c, lua_State *lua, robj *body) { sha1hex(funcname+2,(char*)ptrFromObj(body),sdslen((sds)ptrFromObj(body))); sds sha = sdsnewlen(funcname+2,40); - if ((de = dictFind(server.lua_scripts,sha)) != NULL) { + if ((de = dictFind(g_pserver->lua_scripts,sha)) != NULL) { sdsfree(sha); return (sds)dictGetKey(de); } @@ -1271,33 +1271,33 @@ sds luaCreateFunction(client *c, lua_State *lua, robj *body) { /* We also save a SHA1 -> Original script map in a dictionary * so that we can replicate / write in the AOF all the * EVALSHA commands as EVAL using the original script. */ - int retval = dictAdd(server.lua_scripts,sha,body); - serverAssertWithInfo(c ? c : server.lua_client,NULL,retval == DICT_OK); - server.lua_scripts_mem += sdsZmallocSize(sha) + getStringObjectSdsUsedMemory(body); + int retval = dictAdd(g_pserver->lua_scripts,sha,body); + serverAssertWithInfo(c ? c : g_pserver->lua_client,NULL,retval == DICT_OK); + g_pserver->lua_scripts_mem += sdsZmallocSize(sha) + getStringObjectSdsUsedMemory(body); incrRefCount(body); return sha; } /* This is the Lua script "count" hook that we use to detect scripts timeout. */ void luaMaskCountHook(lua_State *lua, lua_Debug *ar) { - long long elapsed = mstime() - server.lua_time_start; + long long elapsed = mstime() - g_pserver->lua_time_start; UNUSED(ar); UNUSED(lua); /* Set the timeout condition if not already set and the maximum * execution time was reached. */ - if (elapsed >= server.lua_time_limit && server.lua_timedout == 0) { + if (elapsed >= g_pserver->lua_time_limit && g_pserver->lua_timedout == 0) { serverLog(LL_WARNING,"Lua slow script detected: still in execution after %lld milliseconds. You can try killing the script using the SCRIPT KILL command.",elapsed); - server.lua_timedout = 1; + g_pserver->lua_timedout = 1; /* Once the script timeouts we reenter the event loop to permit others * to call SCRIPT KILL or SHUTDOWN NOSAVE if needed. For this reason * we need to mask the client executing the script from the event loop. * If we don't do that the client may disconnect and could no longer be * here when the EVAL command will return. */ - protectClient(server.lua_caller); + protectClient(g_pserver->lua_caller); } - if (server.lua_timedout) processEventsWhileBlocked(serverTL - server.rgthreadvar); - if (server.lua_kill) { + if (g_pserver->lua_timedout) processEventsWhileBlocked(serverTL - g_pserver->rgthreadvar); + if (g_pserver->lua_kill) { serverLog(LL_WARNING,"Lua script killed by user with SCRIPT KILL."); lua_pushstring(lua,"Script killed by user with SCRIPT KILL..."); lua_error(lua); @@ -1305,10 +1305,10 @@ void luaMaskCountHook(lua_State *lua, lua_Debug *ar) { } void evalGenericCommand(client *c, int evalsha) { - lua_State *lua = server.lua; + lua_State *lua = g_pserver->lua; char funcname[43]; long long numkeys; - long long initial_server_dirty = server.dirty; + long long initial_server_dirty = g_pserver->dirty; int delhook = 0, err; /* When we replicate whole scripts, we want the same PRNG sequence at @@ -1323,11 +1323,11 @@ void evalGenericCommand(client *c, int evalsha) { * * Thanks to this flag we'll raise an error every time a write command * is called after a random command was used. */ - server.lua_random_dirty = 0; - server.lua_write_dirty = 0; - server.lua_replicate_commands = server.lua_always_replicate_commands; - server.lua_multi_emitted = 0; - server.lua_repl = PROPAGATE_AOF|PROPAGATE_REPL; + g_pserver->lua_random_dirty = 0; + g_pserver->lua_write_dirty = 0; + g_pserver->lua_replicate_commands = g_pserver->lua_always_replicate_commands; + g_pserver->lua_multi_emitted = 0; + g_pserver->lua_repl = PROPAGATE_AOF|PROPAGATE_REPL; /* Get the number of arguments that are keys */ if (getLongLongFromObjectOrReply(c,c->argv[2],&numkeys,NULL) != C_OK) @@ -1393,7 +1393,7 @@ void evalGenericCommand(client *c, int evalsha) { luaSetGlobalArray(lua,"ARGV",c->argv+3+numkeys,c->argc-3-numkeys); /* Select the right DB in the context of the Lua client */ - selectDb(server.lua_client,c->db->id); + selectDb(g_pserver->lua_client,c->db->id); /* Set a hook in order to be able to stop the script execution if it * is running for too much time. @@ -1402,14 +1402,14 @@ void evalGenericCommand(client *c, int evalsha) { * * If we are debugging, we set instead a "line" hook so that the * debugger is call-back at every line executed by the script. */ - server.lua_caller = c; - server.lua_time_start = mstime(); - server.lua_kill = 0; - if (server.lua_time_limit > 0 && ldb.active == 0) { + g_pserver->lua_caller = c; + g_pserver->lua_time_start = mstime(); + g_pserver->lua_kill = 0; + if (g_pserver->lua_time_limit > 0 && ldb.active == 0) { lua_sethook(lua,luaMaskCountHook,LUA_MASKCOUNT,100000); delhook = 1; } else if (ldb.active) { - lua_sethook(server.lua,luaLdbLineHook,LUA_MASKLINE|LUA_MASKCOUNT,100000); + lua_sethook(g_pserver->lua,luaLdbLineHook,LUA_MASKLINE|LUA_MASKCOUNT,100000); delhook = 1; } @@ -1420,14 +1420,14 @@ void evalGenericCommand(client *c, int evalsha) { /* Perform some cleanup that we need to do both on error and success. */ if (delhook) lua_sethook(lua,NULL,0,0); /* Disable hook */ - if (server.lua_timedout) { - server.lua_timedout = 0; + if (g_pserver->lua_timedout) { + g_pserver->lua_timedout = 0; /* Restore the client that was protected when the script timeout * was detected. */ unprotectClient(c); listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { struct redisMaster *mi = (struct redisMaster*)listNodeValue(ln); @@ -1435,7 +1435,7 @@ void evalGenericCommand(client *c, int evalsha) { queueClientForReprocessing(mi->master); } } - server.lua_caller = NULL; + g_pserver->lua_caller = NULL; /* Call the Lua garbage collector from time to time to avoid a * full cycle performed by Lua, which adds too latency. @@ -1467,12 +1467,12 @@ void evalGenericCommand(client *c, int evalsha) { /* If we are using single commands replication, emit EXEC if there * was at least a write. */ - if (server.lua_replicate_commands) { + if (g_pserver->lua_replicate_commands) { preventCommandPropagation(c); - if (server.lua_multi_emitted) { + if (g_pserver->lua_multi_emitted) { robj *propargv[1]; propargv[0] = createStringObject("EXEC",4); - alsoPropagate(server.execCommand,c->db->id,propargv,1, + alsoPropagate(cserver.execCommand,c->db->id,propargv,1, PROPAGATE_AOF|PROPAGATE_REPL); decrRefCount(propargv[0]); } @@ -1488,12 +1488,12 @@ void evalGenericCommand(client *c, int evalsha) { * For repliation, everytime a new slave attaches to the master, we need to * flush our cache of scripts that can be replicated as EVALSHA, while * for AOF we need to do so every time we rewrite the AOF file. */ - if (evalsha && !server.lua_replicate_commands) { + if (evalsha && !g_pserver->lua_replicate_commands) { if (!replicationScriptCacheExists((sds)ptrFromObj(c->argv[1]))) { /* This script is not in our script cache, replicate it as * EVAL, then add it into the script cache, as from now on * slaves and AOF know about it. */ - robj *script = (robj*)dictFetchValue(server.lua_scripts,ptrFromObj(c->argv[1])); + robj *script = (robj*)dictFetchValue(g_pserver->lua_scripts,ptrFromObj(c->argv[1])); replicationScriptCacheAdd((sds)ptrFromObj(c->argv[1])); serverAssertWithInfo(c,NULL,script != NULL); @@ -1502,7 +1502,7 @@ void evalGenericCommand(client *c, int evalsha) { * just to replicate it as SCRIPT LOAD, otherwise we risk running * an aborted script on slaves (that may then produce results there) * or just running a CPU costly read-only script on the slaves. */ - if (server.dirty == initial_server_dirty) { + if (g_pserver->dirty == initial_server_dirty) { rewriteClientCommandVector(c,3, resetRefCount(createStringObject("SCRIPT",6)), resetRefCount(createStringObject("LOAD",4)), @@ -1556,31 +1556,31 @@ NULL scriptingReset(); addReply(c,shared.ok); replicationScriptCacheFlush(); - server.dirty++; /* Propagating this command is a good idea. */ + g_pserver->dirty++; /* Propagating this command is a good idea. */ } else if (c->argc >= 2 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"exists")) { int j; addReplyArrayLen(c, c->argc-2); for (j = 2; j < c->argc; j++) { - if (dictFind(server.lua_scripts,ptrFromObj(c->argv[j]))) + if (dictFind(g_pserver->lua_scripts,ptrFromObj(c->argv[j]))) addReply(c,shared.cone); else addReply(c,shared.czero); } } else if (c->argc == 3 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"load")) { - sds sha = luaCreateFunction(c,server.lua,c->argv[2]); + sds sha = luaCreateFunction(c,g_pserver->lua,c->argv[2]); if (sha == NULL) return; /* The error was sent by luaCreateFunction(). */ addReplyBulkCBuffer(c,sha,40); forceCommandPropagation(c,PROPAGATE_REPL|PROPAGATE_AOF); } else if (c->argc == 2 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"kill")) { - if (server.lua_caller == NULL) { + if (g_pserver->lua_caller == NULL) { addReplySds(c,sdsnew("-NOTBUSY No scripts in execution right now.\r\n")); - } else if (server.lua_caller->flags & CLIENT_MASTER) { + } else if (g_pserver->lua_caller->flags & CLIENT_MASTER) { addReplySds(c,sdsnew("-UNKILLABLE The busy script was sent by a master instance in the context of replication and cannot be killed.\r\n")); - } else if (server.lua_write_dirty) { + } else if (g_pserver->lua_write_dirty) { addReplySds(c,sdsnew("-UNKILLABLE Sorry the script already executed write commands against the dataset. You can either wait the script termination or kill the server in a hard way using the SHUTDOWN NOSAVE command.\r\n")); } else { - server.lua_kill = 1; + g_pserver->lua_kill = 1; addReply(c,shared.ok); } } else if (c->argc == 3 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"debug")) { @@ -2289,7 +2289,7 @@ void ldbEval(lua_State *lua, sds *argv, int argc) { * implementation, with ldb.step enabled, so as a side effect the Redis command * and its reply are logged. */ void ldbRedis(lua_State *lua, sds *argv, int argc) { - int j, saved_rc = server.lua_replicate_commands; + int j, saved_rc = g_pserver->lua_replicate_commands; lua_getglobal(lua,"redis"); lua_pushstring(lua,"call"); @@ -2297,10 +2297,10 @@ void ldbRedis(lua_State *lua, sds *argv, int argc) { for (j = 1; j < argc; j++) lua_pushlstring(lua,argv[j],sdslen(argv[j])); ldb.step = 1; /* Force redis.call() to log. */ - server.lua_replicate_commands = 1; + g_pserver->lua_replicate_commands = 1; lua_pcall(lua,argc-1,1,0); /* Stack: redis, result */ ldb.step = 0; /* Disable logging. */ - server.lua_replicate_commands = saved_rc; + g_pserver->lua_replicate_commands = saved_rc; lua_pop(lua,2); /* Discard the result and clean the stack. */ } @@ -2474,9 +2474,9 @@ void luaLdbLineHook(lua_State *lua, lua_Debug *ar) { /* Check if a timeout occurred. */ if (ar->event == LUA_HOOKCOUNT && ldb.step == 0 && bp == 0) { - mstime_t elapsed = mstime() - server.lua_time_start; - mstime_t timelimit = server.lua_time_limit ? - server.lua_time_limit : 5000; + mstime_t elapsed = mstime() - g_pserver->lua_time_start; + mstime_t timelimit = g_pserver->lua_time_limit ? + g_pserver->lua_time_limit : 5000; if (elapsed >= timelimit) { timeout = 1; ldb.step = 1; @@ -2504,7 +2504,7 @@ void luaLdbLineHook(lua_State *lua, lua_Debug *ar) { lua_pushstring(lua, "timeout during Lua debugging with client closing connection"); lua_error(lua); } - server.lua_time_start = mstime(); + g_pserver->lua_time_start = mstime(); } } diff --git a/src/sentinel.cpp b/src/sentinel.cpp index 2d0c2a481..521a2c62c 100644 --- a/src/sentinel.cpp +++ b/src/sentinel.cpp @@ -460,8 +460,8 @@ struct redisCommand sentinelcmds[] = { /* This function overwrites a few normal Redis config default with Sentinel * specific defaults. */ void initSentinelConfig(void) { - server.port = REDIS_SENTINEL_PORT; - server.protected_mode = 0; /* Sentinel must be exposed. */ + g_pserver->port = REDIS_SENTINEL_PORT; + g_pserver->protected_mode = 0; /* Sentinel must be exposed. */ } /* Perform the Sentinel mode initialization. */ @@ -470,12 +470,12 @@ void initSentinel(void) { /* Remove usual Redis commands from the command table, then just add * the SENTINEL command. */ - dictEmpty(server.commands,NULL); + dictEmpty(g_pserver->commands,NULL); for (j = 0; j < sizeof(sentinelcmds)/sizeof(sentinelcmds[0]); j++) { int retval; struct redisCommand *cmd = sentinelcmds+j; - retval = dictAdd(server.commands, sdsnew(cmd->name), cmd); + retval = dictAdd(g_pserver->commands, sdsnew(cmd->name), cmd); serverAssert(retval == DICT_OK); /* Translate the command string flags description into an actual @@ -504,14 +504,14 @@ void initSentinel(void) { void sentinelIsRunning(void) { int j; - if (server.configfile == NULL) { + if (cserver.configfile == NULL) { serverLog(LL_WARNING, "Sentinel started without a config file. Exiting..."); exit(1); - } else if (access(server.configfile,W_OK) == -1) { + } else if (access(cserver.configfile,W_OK) == -1) { serverLog(LL_WARNING, "Sentinel config file %s is not writable: %s. Exiting...", - server.configfile,strerror(errno)); + cserver.configfile,strerror(errno)); exit(1); } @@ -641,7 +641,7 @@ void sentinelEvent(int level, const char *type, sentinelRedisInstance *ri, } /* Log the message if the log level allows it to be logged. */ - if (level >= server.verbosity) + if (level >= cserver.verbosity) serverLog(level,"%s %s",type,msg); /* Publish the message via Pub/Sub if it's not a debugging one. */ @@ -1926,15 +1926,15 @@ void rewriteConfigSentinelOption(struct rewriteConfigState *state) { * On failure the function logs a warning on the Redis log. */ void sentinelFlushConfig(void) { int fd = -1; - int saved_hz = server.hz; + int saved_hz = g_pserver->hz; int rewrite_status; - server.hz = CONFIG_DEFAULT_HZ; - rewrite_status = rewriteConfig(server.configfile); - server.hz = saved_hz; + g_pserver->hz = CONFIG_DEFAULT_HZ; + rewrite_status = rewriteConfig(cserver.configfile); + g_pserver->hz = saved_hz; if (rewrite_status == -1) goto werr; - if ((fd = open(server.configfile,O_RDONLY)) == -1) goto werr; + if ((fd = open(cserver.configfile,O_RDONLY)) == -1) goto werr; if (fsync(fd) == -1) goto werr; if (close(fd) == EOF) goto werr; return; @@ -2018,7 +2018,7 @@ void sentinelReconnectInstance(sentinelRedisInstance *ri) { link->pending_commands = 0; link->cc_conn_time = mstime(); link->cc->data = link; - redisAeAttach(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->cc); + redisAeAttach(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->cc); redisAsyncSetConnectCallback(link->cc, sentinelLinkEstablishedCallback); redisAsyncSetDisconnectCallback(link->cc, @@ -2042,7 +2042,7 @@ void sentinelReconnectInstance(sentinelRedisInstance *ri) { link->pc_conn_time = mstime(); link->pc->data = link; - redisAeAttach(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->pc); + redisAeAttach(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,link->pc); redisAsyncSetConnectCallback(link->pc, sentinelLinkEstablishedCallback); redisAsyncSetDisconnectCallback(link->pc, @@ -2585,7 +2585,7 @@ int sentinelSendHello(sentinelRedisInstance *ri) { announce_ip = ip; } announce_port = sentinel.announce_port ? - sentinel.announce_port : server.port; + sentinel.announce_port : g_pserver->port; /* Format and send the Hello message. */ snprintf(payload,sizeof(payload), @@ -4521,6 +4521,6 @@ void sentinelTimer(void) { * exactly continue to stay synchronized asking to be voted at the * same time again and again (resulting in nobody likely winning the * election because of split brain voting). */ - server.hz = CONFIG_DEFAULT_HZ + rand() % CONFIG_DEFAULT_HZ; + g_pserver->hz = CONFIG_DEFAULT_HZ + rand() % CONFIG_DEFAULT_HZ; } diff --git a/src/server.cpp b/src/server.cpp index de128018d..7a308fea3 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -73,7 +73,11 @@ double R_Zero, R_PosInf, R_NegInf, R_Nan; /*================================= Globals ================================= */ /* Global vars */ +namespace GlobalHidden { struct redisServer server; /* Server global state */ +} +redisServer *g_pserver = &GlobalHidden::server; +struct redisServerConst cserver; __thread struct redisServerThreadVars *serverTL = NULL; // thread local server vars volatile unsigned long lru_clock; /* Server global current LRU time. */ @@ -1001,7 +1005,7 @@ struct redisCommand redisCommandTable[] = { "admin no-script ok-loading ok-stale", 0,NULL,0,0,0,0,0,0}, - {"rreplay",replicaReplayCommand,3, + {"rreplay",replicaReplayCommand,-3, "read-only fast noprop", 0,NULL,0,0,0,0,0,0} }; @@ -1020,12 +1024,12 @@ void serverLogRaw(int level, const char *msg) { FILE *fp; char buf[64]; int rawmode = (level & LL_RAW); - int log_to_stdout = server.logfile[0] == '\0'; + int log_to_stdout = g_pserver->logfile[0] == '\0'; level &= 0xff; /* clear flags */ - if (level < server.verbosity) return; + if (level < cserver.verbosity) return; - fp = log_to_stdout ? stdout : fopen(server.logfile,"a"); + fp = log_to_stdout ? stdout : fopen(g_pserver->logfile,"a"); if (!fp) return; if (rawmode) { @@ -1038,15 +1042,15 @@ void serverLogRaw(int level, const char *msg) { gettimeofday(&tv,NULL); struct tm tm; - nolocks_localtime(&tm,tv.tv_sec,server.timezone,server.daylight_active); + nolocks_localtime(&tm,tv.tv_sec,g_pserver->timezone,g_pserver->daylight_active); off = strftime(buf,sizeof(buf),"%d %b %Y %H:%M:%S.",&tm); snprintf(buf+off,sizeof(buf)-off,"%03d",(int)tv.tv_usec/1000); - if (server.sentinel_mode) { + if (g_pserver->sentinel_mode) { role_char = 'X'; /* Sentinel. */ - } else if (pid != server.pid) { + } else if (pid != cserver.pid) { role_char = 'C'; /* RDB / AOF writing child. */ } else { - role_char = (listLength(server.masters) ? 'S':'M'); /* Slave or Master. */ + role_char = (listLength(g_pserver->masters) ? 'S':'M'); /* Slave or Master. */ } fprintf(fp,"%d:%c %s %c %s\n", (int)getpid(),role_char, buf,c[level],msg); @@ -1054,7 +1058,7 @@ void serverLogRaw(int level, const char *msg) { fflush(fp); if (!log_to_stdout) fclose(fp); - if (server.syslog_enabled) syslog(syslogLevelMap[level], "%s", msg); + if (g_pserver->syslog_enabled) syslog(syslogLevelMap[level], "%s", msg); } /* Like serverLogRaw() but with printf-alike support. This is the function that @@ -1064,7 +1068,7 @@ void serverLog(int level, const char *fmt, ...) { va_list ap; char msg[LOG_MAX_LEN]; - if ((level&0xff) < server.verbosity) return; + if ((level&0xff) < cserver.verbosity) return; va_start(ap, fmt); vsnprintf(msg, sizeof(msg), fmt, ap); @@ -1081,13 +1085,13 @@ void serverLog(int level, const char *fmt, ...) { * where we need printf-alike features are served by serverLog(). */ void serverLogFromHandler(int level, const char *msg) { int fd; - int log_to_stdout = server.logfile[0] == '\0'; + int log_to_stdout = g_pserver->logfile[0] == '\0'; char buf[64]; - if ((level&0xff) < server.verbosity || (log_to_stdout && server.daemonize)) + if ((level&0xff) < cserver.verbosity || (log_to_stdout && cserver.daemonize)) return; fd = log_to_stdout ? STDOUT_FILENO : - open(server.logfile, O_APPEND|O_CREAT|O_WRONLY, 0644); + open(g_pserver->logfile, O_APPEND|O_CREAT|O_WRONLY, 0644); if (fd == -1) return; ll2string(buf,sizeof(buf),getpid()); if (write(fd,buf,strlen(buf)) == -1) goto err; @@ -1298,7 +1302,7 @@ dictType dbDictType = { dictObjectDestructor /* val destructor */ }; -/* server.lua_scripts sha (as sds string) -> scripts (as robj) cache. */ +/* g_pserver->lua_scripts sha (as sds string) -> scripts (as robj) cache. */ dictType shaScriptObjectDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ @@ -1395,7 +1399,7 @@ dictType migrateCacheDictType = { NULL /* val destructor */ }; -/* Replication cached script dict (server.repl_scriptcache_dict). +/* Replication cached script dict (g_pserver->repl_scriptcache_dict). * Keys are sds SHA1 strings, while values are not used at all in the current * implementation. */ dictType replScriptCacheDictType = { @@ -1419,10 +1423,10 @@ int htNeedsResize(dict *dict) { /* If the percentage of used slots in the HT reaches HASHTABLE_MIN_FILL * we resize the hash table to save memory */ void tryResizeHashTables(int dbid) { - if (htNeedsResize(server.db[dbid].pdict)) - dictResize(server.db[dbid].pdict); - if (htNeedsResize(server.db[dbid].expires)) - dictResize(server.db[dbid].expires); + if (htNeedsResize(g_pserver->db[dbid].pdict)) + dictResize(g_pserver->db[dbid].pdict); + if (htNeedsResize(g_pserver->db[dbid].expires)) + dictResize(g_pserver->db[dbid].expires); } /* Our hash table implementation performs rehashing incrementally while @@ -1434,13 +1438,13 @@ void tryResizeHashTables(int dbid) { * is returned. */ int incrementallyRehash(int dbid) { /* Keys dictionary */ - if (dictIsRehashing(server.db[dbid].pdict)) { - dictRehashMilliseconds(server.db[dbid].pdict,1); + if (dictIsRehashing(g_pserver->db[dbid].pdict)) { + dictRehashMilliseconds(g_pserver->db[dbid].pdict,1); return 1; /* already used our millisecond for this loop... */ } /* Expires */ - if (dictIsRehashing(server.db[dbid].expires)) { - dictRehashMilliseconds(server.db[dbid].expires,1); + if (dictIsRehashing(g_pserver->db[dbid].expires)) { + dictRehashMilliseconds(g_pserver->db[dbid].expires,1); return 1; /* already used our millisecond for this loop... */ } return 0; @@ -1453,7 +1457,7 @@ int incrementallyRehash(int dbid) { * for dict.c to resize the hash tables accordingly to the fact we have o not * running childs. */ void updateDictResizePolicy(void) { - if (server.rdb_child_pid == -1 && server.aof_child_pid == -1) + if (g_pserver->rdb_child_pid == -1 && g_pserver->aof_child_pid == -1) dictEnableResize(); else dictDisableResize(); @@ -1463,19 +1467,19 @@ void updateDictResizePolicy(void) { /* Add a sample to the operations per second array of samples. */ void trackInstantaneousMetric(int metric, long long current_reading) { - long long t = mstime() - server.inst_metric[metric].last_sample_time; + long long t = mstime() - g_pserver->inst_metric[metric].last_sample_time; long long ops = current_reading - - server.inst_metric[metric].last_sample_count; + g_pserver->inst_metric[metric].last_sample_count; long long ops_sec; ops_sec = t > 0 ? (ops*1000/t) : 0; - server.inst_metric[metric].samples[server.inst_metric[metric].idx] = + g_pserver->inst_metric[metric].samples[g_pserver->inst_metric[metric].idx] = ops_sec; - server.inst_metric[metric].idx++; - server.inst_metric[metric].idx %= STATS_METRIC_SAMPLES; - server.inst_metric[metric].last_sample_time = mstime(); - server.inst_metric[metric].last_sample_count = current_reading; + g_pserver->inst_metric[metric].idx++; + g_pserver->inst_metric[metric].idx %= STATS_METRIC_SAMPLES; + g_pserver->inst_metric[metric].last_sample_time = mstime(); + g_pserver->inst_metric[metric].last_sample_count = current_reading; } /* Return the mean of all the samples. */ @@ -1484,7 +1488,7 @@ long long getInstantaneousMetric(int metric) { long long sum = 0; for (j = 0; j < STATS_METRIC_SAMPLES; j++) - sum += server.inst_metric[metric].samples[j]; + sum += g_pserver->inst_metric[metric].samples[j]; return sum / STATS_METRIC_SAMPLES; } @@ -1495,12 +1499,12 @@ long long getInstantaneousMetric(int metric) { int clientsCronHandleTimeout(client *c, mstime_t now_ms) { time_t now = now_ms/1000; - if (server.maxidletime && + if (cserver.maxidletime && !(c->flags & CLIENT_SLAVE) && /* no timeout for slaves */ !(c->flags & CLIENT_MASTER) && /* no timeout for masters */ !(c->flags & CLIENT_BLOCKED) && /* no timeout for BLPOP */ !(c->flags & CLIENT_PUBSUB) && /* no timeout for Pub/Sub clients */ - (now - c->lastinteraction > server.maxidletime)) + (now - c->lastinteraction > cserver.maxidletime)) { serverLog(LL_VERBOSE,"Closing idle client"); freeClient(c); @@ -1508,15 +1512,15 @@ int clientsCronHandleTimeout(client *c, mstime_t now_ms) { } else if (c->flags & CLIENT_BLOCKED) { /* Blocked OPS timeout is handled with milliseconds resolution. * However note that the actual resolution is limited by - * server.hz. */ + * g_pserver->hz. */ if (c->bpop.timeout != 0 && c->bpop.timeout < now_ms) { /* Handle blocking operation specific timeout. */ replyToBlockedClientTimedOut(c); unblockClient(c); - } else if (server.cluster_enabled) { + } else if (g_pserver->cluster_enabled) { /* Cluster: handle unblock & redirect of clients blocked - * into keys no longer served by this server. */ + * into keys no longer served by this g_pserver-> */ if (clusterRedirectBlockedClientIfNeeded(c)) unblockClient(c); } @@ -1531,7 +1535,7 @@ int clientsCronHandleTimeout(client *c, mstime_t now_ms) { int clientsCronResizeQueryBuffer(client *c) { AssertCorrectThread(c); size_t querybuf_size = sdsAllocSize(c->querybuf); - time_t idletime = server.unixtime - c->lastinteraction; + time_t idletime = g_pserver->unixtime - c->lastinteraction; /* There are two conditions to resize the query buffer: * 1) Query buffer is > BIG_ARG and too big for latest peak. @@ -1588,7 +1592,7 @@ size_t ClientsPeakMemOutput[CLIENTS_PEAK_MEM_USAGE_SLOTS]; int clientsCronTrackExpansiveClients(client *c) { size_t in_usage = sdsAllocSize(c->querybuf); size_t out_usage = getClientOutputBufferMemoryUsage(c); - int i = server.unixtime % CLIENTS_PEAK_MEM_USAGE_SLOTS; + int i = g_pserver->unixtime % CLIENTS_PEAK_MEM_USAGE_SLOTS; int zeroidx = (i+1) % CLIENTS_PEAK_MEM_USAGE_SLOTS; /* Always zero the next sample, so that when we switch to that second, we'll @@ -1631,22 +1635,22 @@ void getExpansiveClientsInfo(size_t *in_usage, size_t *out_usage) { * * The function makes some effort to process all the clients every second, even * if this cannot be strictly guaranteed, since serverCron() may be called with - * an actual frequency lower than server.hz in case of latency events like slow + * an actual frequency lower than g_pserver->hz in case of latency events like slow * commands. * * It is very important for this function, and the functions it calls, to be * very fast: sometimes Redis has tens of hundreds of connected clients, and the - * default server.hz value is 10, so sometimes here we need to process thousands + * default g_pserver->hz value is 10, so sometimes here we need to process thousands * of clients per second, turning this function into a source of latency. */ #define CLIENTS_CRON_MIN_ITERATIONS 5 void clientsCron(int iel) { - /* Try to process at least numclients/server.hz of clients + /* Try to process at least numclients/g_pserver->hz of clients * per call. Since normally (if there are no big latency events) this - * function is called server.hz times per second, in the average case we + * function is called g_pserver->hz times per second, in the average case we * process all the clients in 1 second. */ - int numclients = listLength(server.clients); - int iterations = numclients/server.hz; + int numclients = listLength(g_pserver->clients); + int iterations = numclients/g_pserver->hz; mstime_t now = mstime(); /* Process at least a few clients while we are at it, even if we need @@ -1656,15 +1660,15 @@ void clientsCron(int iel) { iterations = (numclients < CLIENTS_CRON_MIN_ITERATIONS) ? numclients : CLIENTS_CRON_MIN_ITERATIONS; - while(listLength(server.clients) && iterations--) { + while(listLength(g_pserver->clients) && iterations--) { client *c; listNode *head; /* Rotate the list, take the current head, process. * This way if the client must be removed from the list it's the * first element and we don't incur into O(N) computation. */ - listRotate(server.clients); - head = (listNode*)listFirst(server.clients); + listRotate(g_pserver->clients); + head = (listNode*)listFirst(g_pserver->clients); c = (client*)listNodeValue(head); if (c->iel == iel) { @@ -1687,20 +1691,20 @@ void clientsCron(int iel) { void databasesCron(void) { /* Expire keys by random sampling. Not required for slaves * as master will synthesize DELs for us. */ - if (server.active_expire_enabled && listLength(server.masters) == 0) { + if (g_pserver->active_expire_enabled && listLength(g_pserver->masters) == 0) { activeExpireCycle(ACTIVE_EXPIRE_CYCLE_SLOW); - } else if (listLength(server.masters)) { + } else if (listLength(g_pserver->masters)) { expireSlaveKeys(); } /* Defrag keys gradually. */ - if (server.active_defrag_enabled) + if (cserver.active_defrag_enabled) activeDefragCycle(); /* Perform hash tables rehashing if needed, but only if there are no * other processes saving the DB on disk. Otherwise rehashing is bad * as will cause a lot of copy-on-write of memory pages. */ - if (server.rdb_child_pid == -1 && server.aof_child_pid == -1) { + if (g_pserver->rdb_child_pid == -1 && g_pserver->aof_child_pid == -1) { /* We use global counters so if we stop the computation at a given * DB we'll be able to start from the successive in the next * cron loop iteration. */ @@ -1710,16 +1714,16 @@ void databasesCron(void) { int j; /* Don't test more DBs than we have. */ - if (dbs_per_call > server.dbnum) dbs_per_call = server.dbnum; + if (dbs_per_call > cserver.dbnum) dbs_per_call = cserver.dbnum; /* Resize */ for (j = 0; j < dbs_per_call; j++) { - tryResizeHashTables(resize_db % server.dbnum); + tryResizeHashTables(resize_db % cserver.dbnum); resize_db++; } /* Rehash */ - if (server.activerehashing) { + if (g_pserver->activerehashing) { for (j = 0; j < dbs_per_call; j++) { int work_done = incrementallyRehash(rehash_db); if (work_done) { @@ -1729,7 +1733,7 @@ void databasesCron(void) { } else { /* If this db didn't need rehash, we'll try the next one. */ rehash_db++; - rehash_db %= server.dbnum; + rehash_db %= cserver.dbnum; } } } @@ -1742,19 +1746,19 @@ void databasesCron(void) { * a lot faster than calling time(NULL) */ void updateCachedTime(void) { time_t unixtime = time(NULL); - atomicSet(server.unixtime,unixtime); - server.mstime = mstime(); + atomicSet(g_pserver->unixtime,unixtime); + g_pserver->mstime = mstime(); /* To get information about daylight saving time, we need to call localtime_r * and cache the result. However calling localtime_r in this context is safe * since we will never fork() while here, in the main thread. The logging * function will call a thread safe version of localtime that has no locks. */ struct tm tm; - localtime_r(&server.unixtime,&tm); - server.daylight_active = tm.tm_isdst; + localtime_r(&g_pserver->unixtime,&tm); + g_pserver->daylight_active = tm.tm_isdst; } -/* This is our timer interrupt, called server.hz times per second. +/* This is our timer interrupt, called g_pserver->hz times per second. * Here is where we do a number of things that need to be done asynchronously. * For instance: * @@ -1768,7 +1772,7 @@ void updateCachedTime(void) { * - Replication reconnection. * - Many more... * - * Everything directly called here will be called server.hz times per second, + * Everything directly called here will be called g_pserver->hz times per second, * so in order to throttle execution of things we want to do less frequently * a macro is used: run_with_period(milliseconds) { .... } */ @@ -1783,32 +1787,32 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* Software watchdog: deliver the SIGALRM that will reach the signal * handler if we don't return here fast enough. */ - if (server.watchdog_period) watchdogScheduleSignal(server.watchdog_period); + if (g_pserver->watchdog_period) watchdogScheduleSignal(g_pserver->watchdog_period); /* Update the time cache. */ updateCachedTime(); - server.hz = server.config_hz; - /* Adapt the server.hz value to the number of configured clients. If we have + g_pserver->hz = g_pserver->config_hz; + /* Adapt the g_pserver->hz value to the number of configured clients. If we have * many clients, we want to call serverCron() with an higher frequency. */ - if (server.dynamic_hz) { - while (listLength(server.clients) / server.hz > + if (g_pserver->dynamic_hz) { + while (listLength(g_pserver->clients) / g_pserver->hz > MAX_CLIENTS_PER_CLOCK_TICK) { - server.hz *= 2; - if (server.hz > CONFIG_MAX_HZ) { - server.hz = CONFIG_MAX_HZ; + g_pserver->hz *= 2; + if (g_pserver->hz > CONFIG_MAX_HZ) { + g_pserver->hz = CONFIG_MAX_HZ; break; } } } run_with_period(100) { - trackInstantaneousMetric(STATS_METRIC_COMMAND,server.stat_numcommands); + trackInstantaneousMetric(STATS_METRIC_COMMAND,g_pserver->stat_numcommands); trackInstantaneousMetric(STATS_METRIC_NET_INPUT, - server.stat_net_input_bytes); + g_pserver->stat_net_input_bytes); trackInstantaneousMetric(STATS_METRIC_NET_OUTPUT, - server.stat_net_output_bytes); + g_pserver->stat_net_output_bytes); } /* We have just LRU_BITS bits per object for LRU information. @@ -1823,70 +1827,70 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { * Note that you can change the resolution altering the * LRU_CLOCK_RESOLUTION define. */ unsigned long lruclock = getLRUClock(); - atomicSet(server.lruclock,lruclock); + atomicSet(g_pserver->lruclock,lruclock); /* Record the max memory used since the server was started. */ - if (zmalloc_used_memory() > server.stat_peak_memory) - server.stat_peak_memory = zmalloc_used_memory(); + if (zmalloc_used_memory() > g_pserver->stat_peak_memory) + g_pserver->stat_peak_memory = zmalloc_used_memory(); run_with_period(100) { /* Sample the RSS and other metrics here since this is a relatively slow call. * We must sample the zmalloc_used at the same time we take the rss, otherwise * the frag ratio calculate may be off (ratio of two samples at different times) */ - server.cron_malloc_stats.process_rss = zmalloc_get_rss(); - server.cron_malloc_stats.zmalloc_used = zmalloc_used_memory(); + g_pserver->cron_malloc_stats.process_rss = zmalloc_get_rss(); + g_pserver->cron_malloc_stats.zmalloc_used = zmalloc_used_memory(); /* Sampling the allcator info can be slow too. * The fragmentation ratio it'll show is potentically more accurate * it excludes other RSS pages such as: shared libraries, LUA and other non-zmalloc * allocations, and allocator reserved pages that can be pursed (all not actual frag) */ - zmalloc_get_allocator_info(&server.cron_malloc_stats.allocator_allocated, - &server.cron_malloc_stats.allocator_active, - &server.cron_malloc_stats.allocator_resident); + zmalloc_get_allocator_info(&g_pserver->cron_malloc_stats.allocator_allocated, + &g_pserver->cron_malloc_stats.allocator_active, + &g_pserver->cron_malloc_stats.allocator_resident); /* in case the allocator isn't providing these stats, fake them so that * fragmention info still shows some (inaccurate metrics) */ - if (!server.cron_malloc_stats.allocator_resident) { + if (!g_pserver->cron_malloc_stats.allocator_resident) { /* LUA memory isn't part of zmalloc_used, but it is part of the process RSS, * so we must desuct it in order to be able to calculate correct * "allocator fragmentation" ratio */ - size_t lua_memory = lua_gc(server.lua,LUA_GCCOUNT,0)*1024LL; - server.cron_malloc_stats.allocator_resident = server.cron_malloc_stats.process_rss - lua_memory; + size_t lua_memory = lua_gc(g_pserver->lua,LUA_GCCOUNT,0)*1024LL; + g_pserver->cron_malloc_stats.allocator_resident = g_pserver->cron_malloc_stats.process_rss - lua_memory; } - if (!server.cron_malloc_stats.allocator_active) - server.cron_malloc_stats.allocator_active = server.cron_malloc_stats.allocator_resident; - if (!server.cron_malloc_stats.allocator_allocated) - server.cron_malloc_stats.allocator_allocated = server.cron_malloc_stats.zmalloc_used; + if (!g_pserver->cron_malloc_stats.allocator_active) + g_pserver->cron_malloc_stats.allocator_active = g_pserver->cron_malloc_stats.allocator_resident; + if (!g_pserver->cron_malloc_stats.allocator_allocated) + g_pserver->cron_malloc_stats.allocator_allocated = g_pserver->cron_malloc_stats.zmalloc_used; } /* We received a SIGTERM, shutting down here in a safe way, as it is * not ok doing so inside the signal handler. */ - if (server.shutdown_asap) { + if (g_pserver->shutdown_asap) { if (prepareForShutdown(SHUTDOWN_NOFLAGS) == C_OK) exit(0); serverLog(LL_WARNING,"SIGTERM received but errors trying to shut down the server, check the logs for more information"); - server.shutdown_asap = 0; + g_pserver->shutdown_asap = 0; } /* Show some info about non-empty databases */ run_with_period(5000) { - for (j = 0; j < server.dbnum; j++) { + for (j = 0; j < cserver.dbnum; j++) { long long size, used, vkeys; - size = dictSlots(server.db[j].pdict); - used = dictSize(server.db[j].pdict); - vkeys = dictSize(server.db[j].expires); + size = dictSlots(g_pserver->db[j].pdict); + used = dictSize(g_pserver->db[j].pdict); + vkeys = dictSize(g_pserver->db[j].expires); if (used || vkeys) { serverLog(LL_VERBOSE,"DB %d: %lld keys (%lld volatile) in %lld slots HT.",j,used,vkeys,size); - /* dictPrintStats(server.dict); */ + /* dictPrintStats(g_pserver->dict); */ } } } /* Show information about connected clients */ - if (!server.sentinel_mode) { + if (!g_pserver->sentinel_mode) { run_with_period(5000) { serverLog(LL_VERBOSE, "%lu clients connected (%lu replicas), %zu bytes in use", - listLength(server.clients)-listLength(server.slaves), - listLength(server.slaves), + listLength(g_pserver->clients)-listLength(g_pserver->slaves), + listLength(g_pserver->slaves), zmalloc_used_memory()); } } @@ -1899,14 +1903,14 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* Start a scheduled AOF rewrite if this was requested by the user while * a BGSAVE was in progress. */ - if (server.rdb_child_pid == -1 && server.aof_child_pid == -1 && - server.aof_rewrite_scheduled) + if (g_pserver->rdb_child_pid == -1 && g_pserver->aof_child_pid == -1 && + g_pserver->aof_rewrite_scheduled) { rewriteAppendOnlyFileBackground(); } /* Check if a background saving or AOF rewrite in progress terminated. */ - if (server.rdb_child_pid != -1 || server.aof_child_pid != -1 || + if (g_pserver->rdb_child_pid != -1 || g_pserver->aof_child_pid != -1 || ldbPendingChildren()) { int statloc; @@ -1922,12 +1926,12 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { serverLog(LL_WARNING,"wait3() returned an error: %s. " "rdb_child_pid = %d, aof_child_pid = %d", strerror(errno), - (int) server.rdb_child_pid, - (int) server.aof_child_pid); - } else if (pid == server.rdb_child_pid) { + (int) g_pserver->rdb_child_pid, + (int) g_pserver->aof_child_pid); + } else if (pid == g_pserver->rdb_child_pid) { backgroundSaveDoneHandler(exitcode,bysignal); if (!bysignal && exitcode == 0) receiveChildInfo(); - } else if (pid == server.aof_child_pid) { + } else if (pid == g_pserver->aof_child_pid) { backgroundRewriteDoneHandler(exitcode,bysignal); if (!bysignal && exitcode == 0) receiveChildInfo(); } else { @@ -1943,18 +1947,18 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { } else { /* If there is not a background saving/rewrite in progress check if * we have to save/rewrite now. */ - for (j = 0; j < server.saveparamslen; j++) { - struct saveparam *sp = server.saveparams+j; + for (j = 0; j < g_pserver->saveparamslen; j++) { + struct saveparam *sp = g_pserver->saveparams+j; /* Save if we reached the given amount of changes, * the given amount of seconds, and if the latest bgsave was * successful or if, in case of an error, at least * CONFIG_BGSAVE_RETRY_DELAY seconds already elapsed. */ - if (server.dirty >= sp->changes && - server.unixtime-server.lastsave > sp->seconds && - (server.unixtime-server.lastbgsave_try > + if (g_pserver->dirty >= sp->changes && + g_pserver->unixtime-g_pserver->lastsave > sp->seconds && + (g_pserver->unixtime-g_pserver->lastbgsave_try > CONFIG_BGSAVE_RETRY_DELAY || - server.lastbgsave_status == C_OK)) + g_pserver->lastbgsave_status == C_OK)) { serverLog(LL_NOTICE,"%d changes in %d seconds. Saving...", sp->changes, (int)sp->seconds); @@ -1966,16 +1970,16 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { } /* Trigger an AOF rewrite if needed. */ - if (server.aof_state == AOF_ON && - server.rdb_child_pid == -1 && - server.aof_child_pid == -1 && - server.aof_rewrite_perc && - server.aof_current_size > server.aof_rewrite_min_size) + if (g_pserver->aof_state == AOF_ON && + g_pserver->rdb_child_pid == -1 && + g_pserver->aof_child_pid == -1 && + g_pserver->aof_rewrite_perc && + g_pserver->aof_current_size > g_pserver->aof_rewrite_min_size) { - long long base = server.aof_rewrite_base_size ? - server.aof_rewrite_base_size : 1; - long long growth = (server.aof_current_size*100/base) - 100; - if (growth >= server.aof_rewrite_perc) { + long long base = g_pserver->aof_rewrite_base_size ? + g_pserver->aof_rewrite_base_size : 1; + long long growth = (g_pserver->aof_current_size*100/base) - 100; + if (growth >= g_pserver->aof_rewrite_perc) { serverLog(LL_NOTICE,"Starting automatic rewriting of AOF on %lld%% growth",growth); rewriteAppendOnlyFileBackground(); } @@ -1985,14 +1989,14 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* AOF postponed flush: Try at every cron cycle if the slow fsync * completed. */ - if (server.aof_flush_postponed_start) flushAppendOnlyFile(0); + if (g_pserver->aof_flush_postponed_start) flushAppendOnlyFile(0); /* AOF write errors: in this case we have a buffer to flush as well and * clear the AOF error in case of success to make the DB writable again, * however to try every second is enough in case of 'hz' is set to * an higher frequency. */ run_with_period(1000) { - if (server.aof_last_write_status == C_ERR) + if (g_pserver->aof_last_write_status == C_ERR) flushAppendOnlyFile(0); } @@ -2008,11 +2012,11 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { /* Run the Redis Cluster cron. */ run_with_period(100) { - if (server.cluster_enabled) clusterCron(); + if (g_pserver->cluster_enabled) clusterCron(); } /* Run the Sentinel timer if we are in sentinel mode. */ - if (server.sentinel_mode) sentinelTimer(); + if (g_pserver->sentinel_mode) sentinelTimer(); /* Cleanup expired MIGRATE cached sockets. */ run_with_period(1000) { @@ -2026,19 +2030,19 @@ int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { * Note: this code must be after the replicationCron() call above so * make sure when refactoring this file to keep this order. This is useful * because we want to give priority to RDB savings for replication. */ - if (server.rdb_child_pid == -1 && server.aof_child_pid == -1 && - server.rdb_bgsave_scheduled && - (server.unixtime-server.lastbgsave_try > CONFIG_BGSAVE_RETRY_DELAY || - server.lastbgsave_status == C_OK)) + if (g_pserver->rdb_child_pid == -1 && g_pserver->aof_child_pid == -1 && + g_pserver->rdb_bgsave_scheduled && + (g_pserver->unixtime-g_pserver->lastbgsave_try > CONFIG_BGSAVE_RETRY_DELAY || + g_pserver->lastbgsave_status == C_OK)) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); if (rdbSaveBackground(rsiptr) == C_OK) - server.rdb_bgsave_scheduled = 0; + g_pserver->rdb_bgsave_scheduled = 0; } - server.cronloops++; - return 1000/server.hz; + g_pserver->cronloops++; + return 1000/g_pserver->hz; } // serverCron for worker threads other than the main thread @@ -2057,7 +2061,7 @@ int serverCronLite(struct aeEventLoop *eventLoop, long long id, void *clientData freeClientsInAsyncFreeQueue(iel); aeReleaseLock(); - return 1000/server.hz; + return 1000/g_pserver->hz; } /* This function gets called every time Redis is entering the @@ -2070,39 +2074,39 @@ void beforeSleep(struct aeEventLoop *eventLoop) { * may change the state of Redis Cluster (from ok to fail or vice versa), * so it's a good idea to call it before serving the unblocked clients * later in this function. */ - if (server.cluster_enabled) clusterBeforeSleep(); + if (g_pserver->cluster_enabled) clusterBeforeSleep(); /* Run a fast expire cycle (the called function will return * ASAP if a fast cycle is not needed). */ - if (server.active_expire_enabled && listLength(server.masters) == 0) + if (g_pserver->active_expire_enabled && listLength(g_pserver->masters) == 0) activeExpireCycle(ACTIVE_EXPIRE_CYCLE_FAST); /* Send all the slaves an ACK request if at least one client blocked * during the previous event loop iteration. */ - if (server.get_ack_from_slaves) { + if (g_pserver->get_ack_from_slaves) { robj *argv[3]; argv[0] = createStringObject("REPLCONF",8); argv[1] = createStringObject("GETACK",6); argv[2] = createStringObject("*",1); /* Not used argument. */ - replicationFeedSlaves(server.slaves, server.slaveseldb, argv, 3); + replicationFeedSlaves(g_pserver->slaves, g_pserver->slaveseldb, argv, 3); decrRefCount(argv[0]); decrRefCount(argv[1]); decrRefCount(argv[2]); - server.get_ack_from_slaves = 0; + g_pserver->get_ack_from_slaves = 0; } /* Unblock all the clients blocked for synchronous replication * in WAIT. */ - if (listLength(server.clients_waiting_acks)) + if (listLength(g_pserver->clients_waiting_acks)) processClientsWaitingReplicas(); /* Check if there are clients unblocked by modules that implement * blocking commands. */ - moduleHandleBlockedClients(); + moduleHandleBlockedClients(ielFromEventLoop(eventLoop)); /* Try to process pending commands for clients that were just unblocked. */ - if (listLength(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].unblocked_clients)) + if (listLength(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].unblocked_clients)) { processUnblockedClients(IDX_EVENT_LOOP_MAIN); } @@ -2127,9 +2131,13 @@ void beforeSleepLite(struct aeEventLoop *eventLoop) /* Try to process pending commands for clients that were just unblocked. */ aeAcquireLock(); - if (listLength(server.rgthreadvar[iel].unblocked_clients)) { + if (listLength(g_pserver->rgthreadvar[iel].unblocked_clients)) { processUnblockedClients(iel); } + + /* Check if there are clients unblocked by modules that implement + * blocking commands. */ + moduleHandleBlockedClients(ielFromEventLoop(eventLoop)); aeReleaseLock(); /* Handle writes with pending output buffers. */ @@ -2256,13 +2264,13 @@ void createSharedObjects(void) { void initMasterInfo(redisMaster *master) { - if (server.default_masterauth) - master->masterauth = zstrdup(server.default_masterauth); + if (cserver.default_masterauth) + master->masterauth = zstrdup(cserver.default_masterauth); else master->masterauth = NULL; - if (server.default_masteruser) - master->masteruser = zstrdup(server.default_masteruser); + if (cserver.default_masteruser) + master->masteruser = zstrdup(cserver.default_masteruser); else master->masteruser = NULL; @@ -2279,127 +2287,126 @@ void initMasterInfo(redisMaster *master) void initServerConfig(void) { int j; - serverAssert(pthread_mutex_init(&server.next_client_id_mutex,NULL) == 0); - serverAssert(pthread_mutex_init(&server.lruclock_mutex,NULL) == 0); - serverAssert(pthread_mutex_init(&server.unixtime_mutex,NULL) == 0); + serverAssert(pthread_mutex_init(&g_pserver->next_client_id_mutex,NULL) == 0); + serverAssert(pthread_mutex_init(&g_pserver->lruclock_mutex,NULL) == 0); + serverAssert(pthread_mutex_init(&g_pserver->unixtime_mutex,NULL) == 0); updateCachedTime(); - getRandomHexChars(server.runid,CONFIG_RUN_ID_SIZE); - server.runid[CONFIG_RUN_ID_SIZE] = '\0'; + getRandomHexChars(g_pserver->runid,CONFIG_RUN_ID_SIZE); + g_pserver->runid[CONFIG_RUN_ID_SIZE] = '\0'; changeReplicationId(); clearReplicationId2(); - server.clients = listCreate(); - server.slaves = listCreate(); - server.monitors = listCreate(); - server.timezone = getTimeZone(); /* Initialized by tzset(). */ - server.configfile = NULL; - server.executable = NULL; - server.hz = server.config_hz = CONFIG_DEFAULT_HZ; - server.dynamic_hz = CONFIG_DEFAULT_DYNAMIC_HZ; - server.arch_bits = (sizeof(long) == 8) ? 64 : 32; - server.port = CONFIG_DEFAULT_SERVER_PORT; - server.tcp_backlog = CONFIG_DEFAULT_TCP_BACKLOG; - server.bindaddr_count = 0; - server.unixsocket = NULL; - server.unixsocketperm = CONFIG_DEFAULT_UNIX_SOCKET_PERM; - server.sofd = -1; - server.protected_mode = CONFIG_DEFAULT_PROTECTED_MODE; - server.dbnum = CONFIG_DEFAULT_DBNUM; - server.verbosity = CONFIG_DEFAULT_VERBOSITY; - server.maxidletime = CONFIG_DEFAULT_CLIENT_TIMEOUT; - server.tcpkeepalive = CONFIG_DEFAULT_TCP_KEEPALIVE; - server.active_expire_enabled = 1; - server.active_defrag_enabled = CONFIG_DEFAULT_ACTIVE_DEFRAG; - server.active_defrag_ignore_bytes = CONFIG_DEFAULT_DEFRAG_IGNORE_BYTES; - server.active_defrag_threshold_lower = CONFIG_DEFAULT_DEFRAG_THRESHOLD_LOWER; - server.active_defrag_threshold_upper = CONFIG_DEFAULT_DEFRAG_THRESHOLD_UPPER; - server.active_defrag_cycle_min = CONFIG_DEFAULT_DEFRAG_CYCLE_MIN; - server.active_defrag_cycle_max = CONFIG_DEFAULT_DEFRAG_CYCLE_MAX; - server.active_defrag_max_scan_fields = CONFIG_DEFAULT_DEFRAG_MAX_SCAN_FIELDS; - server.proto_max_bulk_len = CONFIG_DEFAULT_PROTO_MAX_BULK_LEN; - server.client_max_querybuf_len = PROTO_MAX_QUERYBUF_LEN; - server.saveparams = NULL; - server.loading = 0; - server.logfile = zstrdup(CONFIG_DEFAULT_LOGFILE); - server.syslog_enabled = CONFIG_DEFAULT_SYSLOG_ENABLED; - server.syslog_ident = zstrdup(CONFIG_DEFAULT_SYSLOG_IDENT); - server.syslog_facility = LOG_LOCAL0; - server.daemonize = CONFIG_DEFAULT_DAEMONIZE; - server.supervised = 0; - server.supervised_mode = SUPERVISED_NONE; - server.aof_state = AOF_OFF; - server.aof_fsync = CONFIG_DEFAULT_AOF_FSYNC; - server.aof_no_fsync_on_rewrite = CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE; - server.aof_rewrite_perc = AOF_REWRITE_PERC; - server.aof_rewrite_min_size = AOF_REWRITE_MIN_SIZE; - server.aof_rewrite_base_size = 0; - server.aof_rewrite_scheduled = 0; - server.aof_last_fsync = time(NULL); - server.aof_rewrite_time_last = -1; - server.aof_rewrite_time_start = -1; - server.aof_lastbgrewrite_status = C_OK; - server.aof_delayed_fsync = 0; - server.aof_fd = -1; - server.aof_selected_db = -1; /* Make sure the first time will not match */ - server.aof_flush_postponed_start = 0; - server.aof_rewrite_incremental_fsync = CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC; - server.rdb_save_incremental_fsync = CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC; - server.aof_load_truncated = CONFIG_DEFAULT_AOF_LOAD_TRUNCATED; - server.aof_use_rdb_preamble = CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE; - server.pidfile = NULL; - server.rdb_filename = NULL; - server.rdb_s3bucketpath = NULL; - server.aof_filename = zstrdup(CONFIG_DEFAULT_AOF_FILENAME); - server.acl_filename = zstrdup(CONFIG_DEFAULT_ACL_FILENAME); - server.rdb_compression = CONFIG_DEFAULT_RDB_COMPRESSION; - server.rdb_checksum = CONFIG_DEFAULT_RDB_CHECKSUM; - server.stop_writes_on_bgsave_err = CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR; - server.activerehashing = CONFIG_DEFAULT_ACTIVE_REHASHING; - server.active_defrag_running = 0; - server.notify_keyspace_events = 0; - server.maxclients = CONFIG_DEFAULT_MAX_CLIENTS; - server.blocked_clients = 0; - memset(server.blocked_clients_by_type,0, - sizeof(server.blocked_clients_by_type)); - server.maxmemory = CONFIG_DEFAULT_MAXMEMORY; - server.maxmemory_policy = CONFIG_DEFAULT_MAXMEMORY_POLICY; - server.maxmemory_samples = CONFIG_DEFAULT_MAXMEMORY_SAMPLES; - server.lfu_log_factor = CONFIG_DEFAULT_LFU_LOG_FACTOR; - server.lfu_decay_time = CONFIG_DEFAULT_LFU_DECAY_TIME; - server.hash_max_ziplist_entries = OBJ_HASH_MAX_ZIPLIST_ENTRIES; - server.hash_max_ziplist_value = OBJ_HASH_MAX_ZIPLIST_VALUE; - server.list_max_ziplist_size = OBJ_LIST_MAX_ZIPLIST_SIZE; - server.list_compress_depth = OBJ_LIST_COMPRESS_DEPTH; - server.set_max_intset_entries = OBJ_SET_MAX_INTSET_ENTRIES; - server.zset_max_ziplist_entries = OBJ_ZSET_MAX_ZIPLIST_ENTRIES; - server.zset_max_ziplist_value = OBJ_ZSET_MAX_ZIPLIST_VALUE; - server.hll_sparse_max_bytes = CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES; - server.stream_node_max_bytes = OBJ_STREAM_NODE_MAX_BYTES; - server.stream_node_max_entries = OBJ_STREAM_NODE_MAX_ENTRIES; - server.shutdown_asap = 0; - server.cluster_enabled = 0; - server.cluster_node_timeout = CLUSTER_DEFAULT_NODE_TIMEOUT; - server.cluster_migration_barrier = CLUSTER_DEFAULT_MIGRATION_BARRIER; - server.cluster_slave_validity_factor = CLUSTER_DEFAULT_SLAVE_VALIDITY; - server.cluster_require_full_coverage = CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE; - server.cluster_slave_no_failover = CLUSTER_DEFAULT_SLAVE_NO_FAILOVER; - server.cluster_configfile = zstrdup(CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); - server.cluster_announce_ip = CONFIG_DEFAULT_CLUSTER_ANNOUNCE_IP; - server.cluster_announce_port = CONFIG_DEFAULT_CLUSTER_ANNOUNCE_PORT; - server.cluster_announce_bus_port = CONFIG_DEFAULT_CLUSTER_ANNOUNCE_BUS_PORT; - server.cluster_module_flags = CLUSTER_MODULE_FLAG_NONE; - server.migrate_cached_sockets = dictCreate(&migrateCacheDictType,NULL); - server.next_client_id = 1; /* Client IDs, start from 1 .*/ - server.loading_process_events_interval_bytes = (1024*1024*2); - server.lazyfree_lazy_eviction = CONFIG_DEFAULT_LAZYFREE_LAZY_EVICTION; - server.lazyfree_lazy_expire = CONFIG_DEFAULT_LAZYFREE_LAZY_EXPIRE; - server.lazyfree_lazy_server_del = CONFIG_DEFAULT_LAZYFREE_LAZY_SERVER_DEL; - server.always_show_logo = CONFIG_DEFAULT_ALWAYS_SHOW_LOGO; - server.lua_time_limit = LUA_SCRIPT_TIME_LIMIT; - server.fActiveReplica = CONFIG_DEFAULT_ACTIVE_REPLICA; + g_pserver->clients = listCreate(); + g_pserver->slaves = listCreate(); + g_pserver->monitors = listCreate(); + g_pserver->timezone = getTimeZone(); /* Initialized by tzset(). */ + cserver.configfile = NULL; + cserver.executable = NULL; + g_pserver->hz = g_pserver->config_hz = CONFIG_DEFAULT_HZ; + g_pserver->dynamic_hz = CONFIG_DEFAULT_DYNAMIC_HZ; + g_pserver->port = CONFIG_DEFAULT_SERVER_PORT; + g_pserver->tcp_backlog = CONFIG_DEFAULT_TCP_BACKLOG; + g_pserver->bindaddr_count = 0; + g_pserver->unixsocket = NULL; + g_pserver->unixsocketperm = CONFIG_DEFAULT_UNIX_SOCKET_PERM; + g_pserver->sofd = -1; + g_pserver->protected_mode = CONFIG_DEFAULT_PROTECTED_MODE; + cserver.dbnum = CONFIG_DEFAULT_DBNUM; + cserver.verbosity = CONFIG_DEFAULT_VERBOSITY; + cserver.maxidletime = CONFIG_DEFAULT_CLIENT_TIMEOUT; + cserver.tcpkeepalive = CONFIG_DEFAULT_TCP_KEEPALIVE; + g_pserver->active_expire_enabled = 1; + cserver.active_defrag_enabled = CONFIG_DEFAULT_ACTIVE_DEFRAG; + cserver.active_defrag_ignore_bytes = CONFIG_DEFAULT_DEFRAG_IGNORE_BYTES; + cserver.active_defrag_threshold_lower = CONFIG_DEFAULT_DEFRAG_THRESHOLD_LOWER; + cserver.active_defrag_threshold_upper = CONFIG_DEFAULT_DEFRAG_THRESHOLD_UPPER; + cserver.active_defrag_cycle_min = CONFIG_DEFAULT_DEFRAG_CYCLE_MIN; + cserver.active_defrag_cycle_max = CONFIG_DEFAULT_DEFRAG_CYCLE_MAX; + cserver.active_defrag_max_scan_fields = CONFIG_DEFAULT_DEFRAG_MAX_SCAN_FIELDS; + g_pserver->proto_max_bulk_len = CONFIG_DEFAULT_PROTO_MAX_BULK_LEN; + cserver.client_max_querybuf_len = PROTO_MAX_QUERYBUF_LEN; + g_pserver->saveparams = NULL; + g_pserver->loading = 0; + g_pserver->logfile = zstrdup(CONFIG_DEFAULT_LOGFILE); + g_pserver->syslog_enabled = CONFIG_DEFAULT_SYSLOG_ENABLED; + g_pserver->syslog_ident = zstrdup(CONFIG_DEFAULT_SYSLOG_IDENT); + g_pserver->syslog_facility = LOG_LOCAL0; + cserver.daemonize = CONFIG_DEFAULT_DAEMONIZE; + cserver.supervised = 0; + cserver.supervised_mode = SUPERVISED_NONE; + g_pserver->aof_state = AOF_OFF; + g_pserver->aof_fsync = CONFIG_DEFAULT_AOF_FSYNC; + g_pserver->aof_no_fsync_on_rewrite = CONFIG_DEFAULT_AOF_NO_FSYNC_ON_REWRITE; + g_pserver->aof_rewrite_perc = AOF_REWRITE_PERC; + g_pserver->aof_rewrite_min_size = AOF_REWRITE_MIN_SIZE; + g_pserver->aof_rewrite_base_size = 0; + g_pserver->aof_rewrite_scheduled = 0; + g_pserver->aof_last_fsync = time(NULL); + g_pserver->aof_rewrite_time_last = -1; + g_pserver->aof_rewrite_time_start = -1; + g_pserver->aof_lastbgrewrite_status = C_OK; + g_pserver->aof_delayed_fsync = 0; + g_pserver->aof_fd = -1; + g_pserver->aof_selected_db = -1; /* Make sure the first time will not match */ + g_pserver->aof_flush_postponed_start = 0; + g_pserver->aof_rewrite_incremental_fsync = CONFIG_DEFAULT_AOF_REWRITE_INCREMENTAL_FSYNC; + g_pserver->rdb_save_incremental_fsync = CONFIG_DEFAULT_RDB_SAVE_INCREMENTAL_FSYNC; + g_pserver->aof_load_truncated = CONFIG_DEFAULT_AOF_LOAD_TRUNCATED; + g_pserver->aof_use_rdb_preamble = CONFIG_DEFAULT_AOF_USE_RDB_PREAMBLE; + cserver.pidfile = NULL; + g_pserver->rdb_filename = NULL; + g_pserver->rdb_s3bucketpath = NULL; + g_pserver->aof_filename = zstrdup(CONFIG_DEFAULT_AOF_FILENAME); + g_pserver->acl_filename = zstrdup(CONFIG_DEFAULT_ACL_FILENAME); + g_pserver->rdb_compression = CONFIG_DEFAULT_RDB_COMPRESSION; + g_pserver->rdb_checksum = CONFIG_DEFAULT_RDB_CHECKSUM; + g_pserver->stop_writes_on_bgsave_err = CONFIG_DEFAULT_STOP_WRITES_ON_BGSAVE_ERROR; + g_pserver->activerehashing = CONFIG_DEFAULT_ACTIVE_REHASHING; + g_pserver->active_defrag_running = 0; + g_pserver->notify_keyspace_events = 0; + g_pserver->maxclients = CONFIG_DEFAULT_MAX_CLIENTS; + g_pserver->blocked_clients = 0; + memset(g_pserver->blocked_clients_by_type,0, + sizeof(g_pserver->blocked_clients_by_type)); + g_pserver->maxmemory = CONFIG_DEFAULT_MAXMEMORY; + g_pserver->maxmemory_policy = CONFIG_DEFAULT_MAXMEMORY_POLICY; + g_pserver->maxmemory_samples = CONFIG_DEFAULT_MAXMEMORY_SAMPLES; + g_pserver->lfu_log_factor = CONFIG_DEFAULT_LFU_LOG_FACTOR; + g_pserver->lfu_decay_time = CONFIG_DEFAULT_LFU_DECAY_TIME; + g_pserver->hash_max_ziplist_entries = OBJ_HASH_MAX_ZIPLIST_ENTRIES; + g_pserver->hash_max_ziplist_value = OBJ_HASH_MAX_ZIPLIST_VALUE; + g_pserver->list_max_ziplist_size = OBJ_LIST_MAX_ZIPLIST_SIZE; + g_pserver->list_compress_depth = OBJ_LIST_COMPRESS_DEPTH; + g_pserver->set_max_intset_entries = OBJ_SET_MAX_INTSET_ENTRIES; + g_pserver->zset_max_ziplist_entries = OBJ_ZSET_MAX_ZIPLIST_ENTRIES; + g_pserver->zset_max_ziplist_value = OBJ_ZSET_MAX_ZIPLIST_VALUE; + g_pserver->hll_sparse_max_bytes = CONFIG_DEFAULT_HLL_SPARSE_MAX_BYTES; + g_pserver->stream_node_max_bytes = OBJ_STREAM_NODE_MAX_BYTES; + g_pserver->stream_node_max_entries = OBJ_STREAM_NODE_MAX_ENTRIES; + g_pserver->shutdown_asap = 0; + g_pserver->cluster_enabled = 0; + g_pserver->cluster_node_timeout = CLUSTER_DEFAULT_NODE_TIMEOUT; + g_pserver->cluster_migration_barrier = CLUSTER_DEFAULT_MIGRATION_BARRIER; + g_pserver->cluster_slave_validity_factor = CLUSTER_DEFAULT_SLAVE_VALIDITY; + g_pserver->cluster_require_full_coverage = CLUSTER_DEFAULT_REQUIRE_FULL_COVERAGE; + g_pserver->cluster_slave_no_failover = CLUSTER_DEFAULT_SLAVE_NO_FAILOVER; + g_pserver->cluster_configfile = zstrdup(CONFIG_DEFAULT_CLUSTER_CONFIG_FILE); + g_pserver->cluster_announce_ip = CONFIG_DEFAULT_CLUSTER_ANNOUNCE_IP; + g_pserver->cluster_announce_port = CONFIG_DEFAULT_CLUSTER_ANNOUNCE_PORT; + g_pserver->cluster_announce_bus_port = CONFIG_DEFAULT_CLUSTER_ANNOUNCE_BUS_PORT; + g_pserver->cluster_module_flags = CLUSTER_MODULE_FLAG_NONE; + g_pserver->migrate_cached_sockets = dictCreate(&migrateCacheDictType,NULL); + g_pserver->next_client_id = 1; /* Client IDs, start from 1 .*/ + g_pserver->loading_process_events_interval_bytes = (1024*1024*2); + g_pserver->lazyfree_lazy_eviction = CONFIG_DEFAULT_LAZYFREE_LAZY_EVICTION; + g_pserver->lazyfree_lazy_expire = CONFIG_DEFAULT_LAZYFREE_LAZY_EXPIRE; + g_pserver->lazyfree_lazy_server_del = CONFIG_DEFAULT_LAZYFREE_LAZY_SERVER_DEL; + g_pserver->always_show_logo = CONFIG_DEFAULT_ALWAYS_SHOW_LOGO; + g_pserver->lua_time_limit = LUA_SCRIPT_TIME_LIMIT; + g_pserver->fActiveReplica = CONFIG_DEFAULT_ACTIVE_REPLICA; unsigned int lruclock = getLRUClock(); - atomicSet(server.lruclock,lruclock); + atomicSet(g_pserver->lruclock,lruclock); resetServerSaveParams(); appendServerSaveParams(60*60,1); /* save after 1 hour and 1 change */ @@ -2407,37 +2414,37 @@ void initServerConfig(void) { appendServerSaveParams(60,10000); /* save after 1 minute and 10000 changes */ /* Replication related */ - server.masters = listCreate(); - server.enable_multimaster = CONFIG_DEFAULT_ENABLE_MULTIMASTER; - server.repl_syncio_timeout = CONFIG_REPL_SYNCIO_TIMEOUT; - server.repl_serve_stale_data = CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA; - server.repl_slave_ro = CONFIG_DEFAULT_SLAVE_READ_ONLY; - server.repl_slave_ignore_maxmemory = CONFIG_DEFAULT_SLAVE_IGNORE_MAXMEMORY; - server.repl_slave_lazy_flush = CONFIG_DEFAULT_SLAVE_LAZY_FLUSH; - server.repl_disable_tcp_nodelay = CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY; - server.repl_diskless_sync = CONFIG_DEFAULT_REPL_DISKLESS_SYNC; - server.repl_diskless_sync_delay = CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY; - server.repl_ping_slave_period = CONFIG_DEFAULT_REPL_PING_SLAVE_PERIOD; - server.repl_timeout = CONFIG_DEFAULT_REPL_TIMEOUT; - server.repl_min_slaves_to_write = CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE; - server.repl_min_slaves_max_lag = CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG; - server.slave_priority = CONFIG_DEFAULT_SLAVE_PRIORITY; - server.slave_announce_ip = CONFIG_DEFAULT_SLAVE_ANNOUNCE_IP; - server.slave_announce_port = CONFIG_DEFAULT_SLAVE_ANNOUNCE_PORT; - server.master_repl_offset = 0; + g_pserver->masters = listCreate(); + g_pserver->enable_multimaster = CONFIG_DEFAULT_ENABLE_MULTIMASTER; + g_pserver->repl_syncio_timeout = CONFIG_REPL_SYNCIO_TIMEOUT; + g_pserver->repl_serve_stale_data = CONFIG_DEFAULT_SLAVE_SERVE_STALE_DATA; + g_pserver->repl_slave_ro = CONFIG_DEFAULT_SLAVE_READ_ONLY; + g_pserver->repl_slave_ignore_maxmemory = CONFIG_DEFAULT_SLAVE_IGNORE_MAXMEMORY; + g_pserver->repl_slave_lazy_flush = CONFIG_DEFAULT_SLAVE_LAZY_FLUSH; + g_pserver->repl_disable_tcp_nodelay = CONFIG_DEFAULT_REPL_DISABLE_TCP_NODELAY; + g_pserver->repl_diskless_sync = CONFIG_DEFAULT_REPL_DISKLESS_SYNC; + g_pserver->repl_diskless_sync_delay = CONFIG_DEFAULT_REPL_DISKLESS_SYNC_DELAY; + g_pserver->repl_ping_slave_period = CONFIG_DEFAULT_REPL_PING_SLAVE_PERIOD; + g_pserver->repl_timeout = CONFIG_DEFAULT_REPL_TIMEOUT; + g_pserver->repl_min_slaves_to_write = CONFIG_DEFAULT_MIN_SLAVES_TO_WRITE; + g_pserver->repl_min_slaves_max_lag = CONFIG_DEFAULT_MIN_SLAVES_MAX_LAG; + g_pserver->slave_priority = CONFIG_DEFAULT_SLAVE_PRIORITY; + g_pserver->slave_announce_ip = CONFIG_DEFAULT_SLAVE_ANNOUNCE_IP; + g_pserver->slave_announce_port = CONFIG_DEFAULT_SLAVE_ANNOUNCE_PORT; + g_pserver->master_repl_offset = 0; /* Replication partial resync backlog */ - server.repl_backlog = NULL; - server.repl_backlog_size = CONFIG_DEFAULT_REPL_BACKLOG_SIZE; - server.repl_backlog_histlen = 0; - server.repl_backlog_idx = 0; - server.repl_backlog_off = 0; - server.repl_backlog_time_limit = CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT; - server.repl_no_slaves_since = time(NULL); + g_pserver->repl_backlog = NULL; + g_pserver->repl_backlog_size = CONFIG_DEFAULT_REPL_BACKLOG_SIZE; + g_pserver->repl_backlog_histlen = 0; + g_pserver->repl_backlog_idx = 0; + g_pserver->repl_backlog_off = 0; + g_pserver->repl_backlog_time_limit = CONFIG_DEFAULT_REPL_BACKLOG_TIME_LIMIT; + g_pserver->repl_no_slaves_since = time(NULL); /* Client output buffer limits */ for (j = 0; j < CLIENT_TYPE_OBUF_COUNT; j++) - server.client_obuf_limits[j] = clientBufferLimitsDefaults[j]; + cserver.client_obuf_limits[j] = clientBufferLimitsDefaults[j]; /* Double constants initialization */ R_Zero = 0.0; @@ -2448,47 +2455,61 @@ void initServerConfig(void) { /* Command table -- we initiialize it here as it is part of the * initial configuration, since command names may be changed via * redis.conf using the rename-command directive. */ - server.commands = dictCreate(&commandTableDictType,NULL); - server.orig_commands = dictCreate(&commandTableDictType,NULL); + g_pserver->commands = dictCreate(&commandTableDictType,NULL); + g_pserver->orig_commands = dictCreate(&commandTableDictType,NULL); populateCommandTable(); - server.delCommand = lookupCommandByCString("del"); - server.multiCommand = lookupCommandByCString("multi"); - server.lpushCommand = lookupCommandByCString("lpush"); - server.lpopCommand = lookupCommandByCString("lpop"); - server.rpopCommand = lookupCommandByCString("rpop"); - server.zpopminCommand = lookupCommandByCString("zpopmin"); - server.zpopmaxCommand = lookupCommandByCString("zpopmax"); - server.sremCommand = lookupCommandByCString("srem"); - server.execCommand = lookupCommandByCString("exec"); - server.expireCommand = lookupCommandByCString("expire"); - server.pexpireCommand = lookupCommandByCString("pexpire"); - server.xclaimCommand = lookupCommandByCString("xclaim"); - server.xgroupCommand = lookupCommandByCString("xgroup"); - server.rreplayCommand = lookupCommandByCString("rreplay"); + cserver.delCommand = lookupCommandByCString("del"); + cserver.multiCommand = lookupCommandByCString("multi"); + cserver.lpushCommand = lookupCommandByCString("lpush"); + cserver.lpopCommand = lookupCommandByCString("lpop"); + cserver.rpopCommand = lookupCommandByCString("rpop"); + cserver.zpopminCommand = lookupCommandByCString("zpopmin"); + cserver.zpopmaxCommand = lookupCommandByCString("zpopmax"); + cserver.sremCommand = lookupCommandByCString("srem"); + cserver.execCommand = lookupCommandByCString("exec"); + cserver.expireCommand = lookupCommandByCString("expire"); + cserver.pexpireCommand = lookupCommandByCString("pexpire"); + cserver.xclaimCommand = lookupCommandByCString("xclaim"); + cserver.xgroupCommand = lookupCommandByCString("xgroup"); + cserver.rreplayCommand = lookupCommandByCString("rreplay"); /* Slow log */ - server.slowlog_log_slower_than = CONFIG_DEFAULT_SLOWLOG_LOG_SLOWER_THAN; - server.slowlog_max_len = CONFIG_DEFAULT_SLOWLOG_MAX_LEN; + g_pserver->slowlog_log_slower_than = CONFIG_DEFAULT_SLOWLOG_LOG_SLOWER_THAN; + g_pserver->slowlog_max_len = CONFIG_DEFAULT_SLOWLOG_MAX_LEN; /* Latency monitor */ - server.latency_monitor_threshold = CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD; + g_pserver->latency_monitor_threshold = CONFIG_DEFAULT_LATENCY_MONITOR_THRESHOLD; /* Debugging */ - server.assert_failed = ""; - server.assert_file = ""; - server.assert_line = 0; - server.bug_report_start = 0; - server.watchdog_period = 0; + g_pserver->assert_failed = ""; + g_pserver->assert_file = ""; + g_pserver->assert_line = 0; + g_pserver->bug_report_start = 0; + g_pserver->watchdog_period = 0; /* By default we want scripts to be always replicated by effects * (single commands executed by the script), and not by sending the * script to the slave / AOF. This is the new way starting from * Redis 5. However it is possible to revert it via redis.conf. */ - server.lua_always_replicate_commands = 1; + g_pserver->lua_always_replicate_commands = 1; /* Multithreading */ - server.cthreads = CONFIG_DEFAULT_THREADS; - server.fThreadAffinity = CONFIG_DEFAULT_THREAD_AFFINITY; + cserver.cthreads = CONFIG_DEFAULT_THREADS; + cserver.fThreadAffinity = CONFIG_DEFAULT_THREAD_AFFINITY; + + g_pserver->db = (redisDb*)zmalloc(sizeof(redisDb)*cserver.dbnum, MALLOC_LOCAL); + + /* Create the Redis databases, and initialize other internal state. */ + for (int j = 0; j < cserver.dbnum; j++) { + g_pserver->db[j].pdict = dictCreate(&dbDictType,NULL); + g_pserver->db[j].expires = dictCreate(&keyptrDictType,NULL); + g_pserver->db[j].blocking_keys = dictCreate(&keylistDictType,NULL); + g_pserver->db[j].ready_keys = dictCreate(&objectKeyPointerValueDictType,NULL); + g_pserver->db[j].watched_keys = dictCreate(&keylistDictType,NULL); + g_pserver->db[j].id = j; + g_pserver->db[j].avg_ttl = 0; + g_pserver->db[j].defrag_later = listCreate(); + } } extern char **environ; @@ -2513,16 +2534,16 @@ int restartServer(int flags, mstime_t delay) { /* Check if we still have accesses to the executable that started this * server instance. */ - if (access(server.executable,X_OK) == -1) { + if (access(cserver.executable,X_OK) == -1) { serverLog(LL_WARNING,"Can't restart: this process has no " - "permissions to execute %s", server.executable); + "permissions to execute %s", cserver.executable); return C_ERR; } /* Config rewriting. */ if (flags & RESTART_SERVER_CONFIG_REWRITE && - server.configfile && - rewriteConfig(server.configfile) == -1) + cserver.configfile && + rewriteConfig(cserver.configfile) == -1) { serverLog(LL_WARNING,"Can't restart: configuration rewrite process " "failed"); @@ -2539,7 +2560,7 @@ int restartServer(int flags, mstime_t delay) { /* Close all file descriptors, with the exception of stdin, stdout, strerr * which are useful if we restart a Redis server which is not daemonized. */ - for (j = 3; j < (int)server.maxclients + 1024; j++) { + for (j = 3; j < (int)g_pserver->maxclients + 1024; j++) { /* Test the descriptor validity before closing it, otherwise * Valgrind issues a warning on close(). */ if (fcntl(j,F_GETFD) != -1) close(j); @@ -2547,9 +2568,9 @@ int restartServer(int flags, mstime_t delay) { /* Execute the server with the original command line. */ if (delay) usleep(delay*1000); - zfree(server.exec_argv[0]); - server.exec_argv[0] = zstrdup(server.executable); - execve(server.executable,server.exec_argv,environ); + zfree(cserver.exec_argv[0]); + cserver.exec_argv[0] = zstrdup(cserver.executable); + execve(cserver.executable,cserver.exec_argv,environ); /* If an error occurred here, there is nothing we can do, but exit. */ _exit(1); @@ -2564,15 +2585,15 @@ int restartServer(int flags, mstime_t delay) { * * If it will not be possible to set the limit accordingly to the configured * max number of clients, the function will do the reverse setting - * server.maxclients to the value that we can actually handle. */ + * g_pserver->maxclients to the value that we can actually handle. */ void adjustOpenFilesLimit(void) { - rlim_t maxfiles = server.maxclients+CONFIG_MIN_RESERVED_FDS; + rlim_t maxfiles = g_pserver->maxclients+CONFIG_MIN_RESERVED_FDS; struct rlimit limit; if (getrlimit(RLIMIT_NOFILE,&limit) == -1) { serverLog(LL_WARNING,"Unable to obtain the current NOFILE limit (%s), assuming 1024 and setting the max clients configuration accordingly.", strerror(errno)); - server.maxclients = 1024-CONFIG_MIN_RESERVED_FDS; + g_pserver->maxclients = 1024-CONFIG_MIN_RESERVED_FDS; } else { rlim_t oldlimit = limit.rlim_cur; @@ -2604,8 +2625,8 @@ void adjustOpenFilesLimit(void) { if (bestlimit < oldlimit) bestlimit = oldlimit; if (bestlimit < maxfiles) { - unsigned int old_maxclients = server.maxclients; - server.maxclients = bestlimit-CONFIG_MIN_RESERVED_FDS; + unsigned int old_maxclients = g_pserver->maxclients; + g_pserver->maxclients = bestlimit-CONFIG_MIN_RESERVED_FDS; /* maxclients is unsigned so may overflow: in order * to check if maxclients is now logically less than 1 * we test indirectly via bestlimit. */ @@ -2629,7 +2650,7 @@ void adjustOpenFilesLimit(void) { "maxclients has been reduced to %d to compensate for " "low ulimit. " "If you need higher maxclients increase 'ulimit -n'.", - (unsigned long long) bestlimit, server.maxclients); + (unsigned long long) bestlimit, g_pserver->maxclients); } else { serverLog(LL_NOTICE,"Increased maximum number of open files " "to %llu (it was originally set to %llu).", @@ -2640,7 +2661,7 @@ void adjustOpenFilesLimit(void) { } } -/* Check that server.tcp_backlog can be actually enforced in Linux according +/* Check that g_pserver->tcp_backlog can be actually enforced in Linux according * to the value of /proc/sys/net/core/somaxconn, or warn about it. */ void checkTcpBacklogSettings(void) { #ifdef HAVE_PROC_SOMAXCONN @@ -2649,8 +2670,8 @@ void checkTcpBacklogSettings(void) { if (!fp) return; if (fgets(buf,sizeof(buf),fp) != NULL) { int somaxconn = atoi(buf); - if (somaxconn > 0 && somaxconn < server.tcp_backlog) { - serverLog(LL_WARNING,"WARNING: The TCP backlog setting of %d cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of %d.", server.tcp_backlog, somaxconn); + if (somaxconn > 0 && somaxconn < g_pserver->tcp_backlog) { + serverLog(LL_WARNING,"WARNING: The TCP backlog setting of %d cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of %d.", g_pserver->tcp_backlog, somaxconn); } } fclose(fp); @@ -2663,15 +2684,15 @@ void checkTcpBacklogSettings(void) { * The listening file descriptors are stored in the integer array 'fds' * and their number is set in '*count'. * - * The addresses to bind are specified in the global server.bindaddr array - * and their number is server.bindaddr_count. If the server configuration + * The addresses to bind are specified in the global g_pserver->bindaddr array + * and their number is g_pserver->bindaddr_count. If the server configuration * contains no specific addresses to bind, this function will try to * bind * (all addresses) for both the IPv4 and IPv6 protocols. * * On success the function returns C_OK. * * On error the function returns C_ERR. For the function to be on - * error, at least one of the server.bindaddr addresses was + * error, at least one of the g_pserver->bindaddr addresses was * impossible to bind, or no bind addresses were specified in the server * configuration but the function is not able to bind * for at least * one of the IPv4 or IPv6 protocols. */ @@ -2680,14 +2701,14 @@ int listenToPort(int port, int *fds, int *count, int fReusePort) { /* Force binding of 0.0.0.0 if no bind address is specified, always * entering the loop if j == 0. */ - if (server.bindaddr_count == 0) server.bindaddr[0] = NULL; - for (j = 0; j < server.bindaddr_count || j == 0; j++) { - if (server.bindaddr[j] == NULL) { + if (g_pserver->bindaddr_count == 0) g_pserver->bindaddr[0] = NULL; + for (j = 0; j < g_pserver->bindaddr_count || j == 0; j++) { + if (g_pserver->bindaddr[j] == NULL) { int unsupported = 0; /* Bind * for both IPv6 and IPv4, we enter here only if - * server.bindaddr_count == 0. */ - fds[*count] = anetTcp6Server(server.neterr,port,NULL, - server.tcp_backlog, fReusePort); + * g_pserver->bindaddr_count == 0. */ + fds[*count] = anetTcp6Server(g_pserver->neterr,port,NULL, + g_pserver->tcp_backlog, fReusePort); if (fds[*count] != ANET_ERR) { anetNonBlock(NULL,fds[*count]); (*count)++; @@ -2698,8 +2719,8 @@ int listenToPort(int port, int *fds, int *count, int fReusePort) { if (*count == 1 || unsupported) { /* Bind the IPv4 address as well. */ - fds[*count] = anetTcpServer(server.neterr,port,NULL, - server.tcp_backlog, fReusePort); + fds[*count] = anetTcpServer(g_pserver->neterr,port,NULL, + g_pserver->tcp_backlog, fReusePort); if (fds[*count] != ANET_ERR) { anetNonBlock(NULL,fds[*count]); (*count)++; @@ -2712,20 +2733,20 @@ int listenToPort(int port, int *fds, int *count, int fReusePort) { * otherwise fds[*count] will be ANET_ERR and we'll print an * error and return to the caller with an error. */ if (*count + unsupported == 2) break; - } else if (strchr(server.bindaddr[j],':')) { + } else if (strchr(g_pserver->bindaddr[j],':')) { /* Bind IPv6 address. */ - fds[*count] = anetTcp6Server(server.neterr,port,server.bindaddr[j], - server.tcp_backlog, fReusePort); + fds[*count] = anetTcp6Server(g_pserver->neterr,port,g_pserver->bindaddr[j], + g_pserver->tcp_backlog, fReusePort); } else { /* Bind IPv4 address. */ - fds[*count] = anetTcpServer(server.neterr,port,server.bindaddr[j], - server.tcp_backlog, fReusePort); + fds[*count] = anetTcpServer(g_pserver->neterr,port,g_pserver->bindaddr[j], + g_pserver->tcp_backlog, fReusePort); } if (fds[*count] == ANET_ERR) { serverLog(LL_WARNING, "Could not create server TCP listening socket %s:%d: %s", - server.bindaddr[j] ? server.bindaddr[j] : "*", - port, server.neterr); + g_pserver->bindaddr[j] ? g_pserver->bindaddr[j] : "*", + port, g_pserver->neterr); if (errno == ENOPROTOOPT || errno == EPROTONOSUPPORT || errno == ESOCKTNOSUPPORT || errno == EPFNOSUPPORT || errno == EAFNOSUPPORT || errno == EADDRNOTAVAIL) @@ -2744,35 +2765,35 @@ int listenToPort(int port, int *fds, int *count, int fReusePort) { void resetServerStats(void) { int j; - server.stat_numcommands = 0; - server.stat_numconnections = 0; - server.stat_expiredkeys = 0; - server.stat_expired_stale_perc = 0; - server.stat_expired_time_cap_reached_count = 0; - server.stat_evictedkeys = 0; - server.stat_keyspace_misses = 0; - server.stat_keyspace_hits = 0; - server.stat_active_defrag_hits = 0; - server.stat_active_defrag_misses = 0; - server.stat_active_defrag_key_hits = 0; - server.stat_active_defrag_key_misses = 0; - server.stat_active_defrag_scanned = 0; - server.stat_fork_time = 0; - server.stat_fork_rate = 0; - server.stat_rejected_conn = 0; - server.stat_sync_full = 0; - server.stat_sync_partial_ok = 0; - server.stat_sync_partial_err = 0; + g_pserver->stat_numcommands = 0; + g_pserver->stat_numconnections = 0; + g_pserver->stat_expiredkeys = 0; + g_pserver->stat_expired_stale_perc = 0; + g_pserver->stat_expired_time_cap_reached_count = 0; + g_pserver->stat_evictedkeys = 0; + g_pserver->stat_keyspace_misses = 0; + g_pserver->stat_keyspace_hits = 0; + g_pserver->stat_active_defrag_hits = 0; + g_pserver->stat_active_defrag_misses = 0; + g_pserver->stat_active_defrag_key_hits = 0; + g_pserver->stat_active_defrag_key_misses = 0; + g_pserver->stat_active_defrag_scanned = 0; + g_pserver->stat_fork_time = 0; + g_pserver->stat_fork_rate = 0; + g_pserver->stat_rejected_conn = 0; + g_pserver->stat_sync_full = 0; + g_pserver->stat_sync_partial_ok = 0; + g_pserver->stat_sync_partial_err = 0; for (j = 0; j < STATS_METRIC_COUNT; j++) { - server.inst_metric[j].idx = 0; - server.inst_metric[j].last_sample_time = mstime(); - server.inst_metric[j].last_sample_count = 0; - memset(server.inst_metric[j].samples,0, - sizeof(server.inst_metric[j].samples)); - } - server.stat_net_input_bytes = 0; - server.stat_net_output_bytes = 0; - server.aof_delayed_fsync = 0; + g_pserver->inst_metric[j].idx = 0; + g_pserver->inst_metric[j].last_sample_time = mstime(); + g_pserver->inst_metric[j].last_sample_count = 0; + memset(g_pserver->inst_metric[j].samples,0, + sizeof(g_pserver->inst_metric[j].samples)); + } + g_pserver->stat_net_input_bytes = 0; + g_pserver->stat_net_output_bytes = 0; + g_pserver->aof_delayed_fsync = 0; } static void initNetworkingThread(int iel, int fReusePort) @@ -2780,54 +2801,54 @@ static void initNetworkingThread(int iel, int fReusePort) /* Open the TCP listening socket for the user commands. */ if (fReusePort || (iel == IDX_EVENT_LOOP_MAIN)) { - if (server.port != 0 && - listenToPort(server.port,server.rgthreadvar[iel].ipfd,&server.rgthreadvar[iel].ipfd_count, fReusePort) == C_ERR) + if (g_pserver->port != 0 && + listenToPort(g_pserver->port,g_pserver->rgthreadvar[iel].ipfd,&g_pserver->rgthreadvar[iel].ipfd_count, fReusePort) == C_ERR) exit(1); } else { // We use the main threads file descriptors - memcpy(server.rgthreadvar[iel].ipfd, server.rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd, sizeof(int)*CONFIG_BINDADDR_MAX); - server.rgthreadvar[iel].ipfd_count = server.rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd_count; + memcpy(g_pserver->rgthreadvar[iel].ipfd, g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd, sizeof(int)*CONFIG_BINDADDR_MAX); + g_pserver->rgthreadvar[iel].ipfd_count = g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd_count; } /* Create an event handler for accepting new connections in TCP */ - for (int j = 0; j < server.rgthreadvar[iel].ipfd_count; j++) { - if (aeCreateFileEvent(server.rgthreadvar[iel].el, server.rgthreadvar[iel].ipfd[j], AE_READABLE|AE_READ_THREADSAFE, + for (int j = 0; j < g_pserver->rgthreadvar[iel].ipfd_count; j++) { + if (aeCreateFileEvent(g_pserver->rgthreadvar[iel].el, g_pserver->rgthreadvar[iel].ipfd[j], AE_READABLE|AE_READ_THREADSAFE, acceptTcpHandler,NULL) == AE_ERR) { serverPanic( - "Unrecoverable error creating server.ipfd file event."); + "Unrecoverable error creating g_pserver->ipfd file event."); } } } static void initNetworking(int fReusePort) { - int celListen = (fReusePort) ? server.cthreads : 1; + int celListen = (fReusePort) ? cserver.cthreads : 1; for (int iel = 0; iel < celListen; ++iel) initNetworkingThread(iel, fReusePort); /* Open the listening Unix domain socket. */ - if (server.unixsocket != NULL) { - unlink(server.unixsocket); /* don't care if this fails */ - server.sofd = anetUnixServer(server.neterr,server.unixsocket, - server.unixsocketperm, server.tcp_backlog); - if (server.sofd == ANET_ERR) { - serverLog(LL_WARNING, "Opening Unix socket: %s", server.neterr); + if (g_pserver->unixsocket != NULL) { + unlink(g_pserver->unixsocket); /* don't care if this fails */ + g_pserver->sofd = anetUnixServer(g_pserver->neterr,g_pserver->unixsocket, + g_pserver->unixsocketperm, g_pserver->tcp_backlog); + if (g_pserver->sofd == ANET_ERR) { + serverLog(LL_WARNING, "Opening Unix socket: %s", g_pserver->neterr); exit(1); } - anetNonBlock(NULL,server.sofd); + anetNonBlock(NULL,g_pserver->sofd); } /* Abort if there are no listening sockets at all. */ - if (server.rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd_count == 0 && server.sofd < 0) { + if (g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd_count == 0 && g_pserver->sofd < 0) { serverLog(LL_WARNING, "Configured to not listen anywhere, exiting."); exit(1); } - if (server.sofd > 0 && aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el,server.sofd,AE_READABLE|AE_READ_THREADSAFE, - acceptUnixHandler,NULL) == AE_ERR) serverPanic("Unrecoverable error creating server.sofd file event."); + if (g_pserver->sofd > 0 && aeCreateFileEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el,g_pserver->sofd,AE_READABLE|AE_READ_THREADSAFE, + acceptUnixHandler,NULL) == AE_ERR) serverPanic("Unrecoverable error creating g_pserver->sofd file event."); } static void initServerThread(struct redisServerThreadVars *pvar, int fMain) @@ -2837,7 +2858,8 @@ static void initServerThread(struct redisServerThreadVars *pvar, int fMain) pvar->clients_pending_asyncwrite = listCreate(); pvar->ipfd_count = 0; pvar->cclients = 0; - pvar->el = aeCreateEventLoop(server.maxclients+CONFIG_FDSET_INCR); + pvar->el = aeCreateEventLoop(g_pserver->maxclients+CONFIG_FDSET_INCR); + pvar->current_client = nullptr; if (pvar->el == NULL) { serverLog(LL_WARNING, "Failed creating the event loop. Error message: '%s'", @@ -2854,6 +2876,37 @@ static void initServerThread(struct redisServerThreadVars *pvar, int fMain) exit(1); } } + + if (pipe(pvar->module_blocked_pipe) == -1) { + serverLog(LL_WARNING, + "Can't create the pipe for module blocking commands: %s", + strerror(errno)); + exit(1); + } + + /* Make the pipe non blocking. This is just a best effort aware mechanism + * and we do not want to block not in the read nor in the write half. */ + anetNonBlock(NULL,pvar->module_blocked_pipe[0]); + anetNonBlock(NULL,pvar->module_blocked_pipe[1]); + + /* Register a readable event for the pipe used to awake the event loop + * when a blocked client in a module needs attention. */ + if (aeCreateFileEvent(pvar->el, pvar->module_blocked_pipe[0], AE_READABLE, + moduleBlockedClientPipeReadable,NULL) == AE_ERR) { + serverPanic( + "Error registering the readable event for the module " + "blocked clients subsystem."); + } + + + /* Register a readable event for the pipe used to awake the event loop + * when a blocked client in a module needs attention. */ + if (aeCreateFileEvent(pvar->el, pvar->module_blocked_pipe[0], AE_READABLE, + moduleBlockedClientPipeReadable,NULL) == AE_ERR) { + serverPanic( + "Error registering the readable event for the module " + "blocked clients subsystem."); + } } void initServer(void) { @@ -2861,109 +2914,78 @@ void initServer(void) { signal(SIGPIPE, SIG_IGN); setupSignalHandlers(); - fastlock_init(&server.flock); + fastlock_init(&g_pserver->flock); - if (server.syslog_enabled) { - openlog(server.syslog_ident, LOG_PID | LOG_NDELAY | LOG_NOWAIT, - server.syslog_facility); + if (g_pserver->syslog_enabled) { + openlog(g_pserver->syslog_ident, LOG_PID | LOG_NDELAY | LOG_NOWAIT, + g_pserver->syslog_facility); } - server.hz = server.config_hz; - server.pid = getpid(); - server.current_client = NULL; - server.clients_index = raxNew(); - server.clients_to_close = listCreate(); - server.slaveseldb = -1; /* Force to emit the first SELECT command. */ - server.ready_keys = listCreate(); - server.clients_waiting_acks = listCreate(); - server.get_ack_from_slaves = 0; - server.clients_paused = 0; - server.system_memory_size = zmalloc_get_memory_size(); + g_pserver->hz = g_pserver->config_hz; + cserver.pid = getpid(); + g_pserver->clients_index = raxNew(); + g_pserver->clients_to_close = listCreate(); + g_pserver->slaveseldb = -1; /* Force to emit the first SELECT command. */ + g_pserver->ready_keys = listCreate(); + g_pserver->clients_waiting_acks = listCreate(); + g_pserver->get_ack_from_slaves = 0; + g_pserver->clients_paused = 0; + cserver.system_memory_size = zmalloc_get_memory_size(); createSharedObjects(); adjustOpenFilesLimit(); - server.db = (redisDb*)zmalloc(sizeof(redisDb)*server.dbnum, MALLOC_LOCAL); - - /* Create the Redis databases, and initialize other internal state. */ - for (int j = 0; j < server.dbnum; j++) { - server.db[j].pdict = dictCreate(&dbDictType,NULL); - server.db[j].expires = dictCreate(&keyptrDictType,NULL); - server.db[j].blocking_keys = dictCreate(&keylistDictType,NULL); - server.db[j].ready_keys = dictCreate(&objectKeyPointerValueDictType,NULL); - server.db[j].watched_keys = dictCreate(&keylistDictType,NULL); - server.db[j].id = j; - server.db[j].avg_ttl = 0; - server.db[j].defrag_later = listCreate(); - } evictionPoolAlloc(); /* Initialize the LRU keys pool. */ - server.pubsub_channels = dictCreate(&keylistDictType,NULL); - server.pubsub_patterns = listCreate(); - listSetFreeMethod(server.pubsub_patterns,freePubsubPattern); - listSetMatchMethod(server.pubsub_patterns,listMatchPubsubPattern); - server.cronloops = 0; - server.rdb_child_pid = -1; - server.aof_child_pid = -1; - server.rdb_child_type = RDB_CHILD_TYPE_NONE; - server.rdb_bgsave_scheduled = 0; - server.child_info_pipe[0] = -1; - server.child_info_pipe[1] = -1; - server.child_info_data.magic = 0; + g_pserver->pubsub_channels = dictCreate(&keylistDictType,NULL); + g_pserver->pubsub_patterns = listCreate(); + listSetFreeMethod(g_pserver->pubsub_patterns,freePubsubPattern); + listSetMatchMethod(g_pserver->pubsub_patterns,listMatchPubsubPattern); + g_pserver->cronloops = 0; + g_pserver->rdb_child_pid = -1; + g_pserver->aof_child_pid = -1; + g_pserver->rdb_child_type = RDB_CHILD_TYPE_NONE; + g_pserver->rdb_bgsave_scheduled = 0; + g_pserver->child_info_pipe[0] = -1; + g_pserver->child_info_pipe[1] = -1; + g_pserver->child_info_data.magic = 0; aofRewriteBufferReset(); - server.aof_buf = sdsempty(); - server.lastsave = time(NULL); /* At startup we consider the DB saved. */ - server.lastbgsave_try = 0; /* At startup we never tried to BGSAVE. */ - server.rdb_save_time_last = -1; - server.rdb_save_time_start = -1; - server.dirty = 0; + g_pserver->aof_buf = sdsempty(); + g_pserver->lastsave = time(NULL); /* At startup we consider the DB saved. */ + g_pserver->lastbgsave_try = 0; /* At startup we never tried to BGSAVE. */ + g_pserver->rdb_save_time_last = -1; + g_pserver->rdb_save_time_start = -1; + g_pserver->dirty = 0; resetServerStats(); /* A few stats we don't want to reset: server startup time, and peak mem. */ - server.stat_starttime = time(NULL); - server.stat_peak_memory = 0; - server.stat_rdb_cow_bytes = 0; - server.stat_aof_cow_bytes = 0; - server.cron_malloc_stats.zmalloc_used = 0; - server.cron_malloc_stats.process_rss = 0; - server.cron_malloc_stats.allocator_allocated = 0; - server.cron_malloc_stats.allocator_active = 0; - server.cron_malloc_stats.allocator_resident = 0; - server.lastbgsave_status = C_OK; - server.aof_last_write_status = C_OK; - server.aof_last_write_errno = 0; - server.repl_good_slaves_count = 0; + cserver.stat_starttime = time(NULL); + g_pserver->stat_peak_memory = 0; + g_pserver->stat_rdb_cow_bytes = 0; + g_pserver->stat_aof_cow_bytes = 0; + g_pserver->cron_malloc_stats.zmalloc_used = 0; + g_pserver->cron_malloc_stats.process_rss = 0; + g_pserver->cron_malloc_stats.allocator_allocated = 0; + g_pserver->cron_malloc_stats.allocator_active = 0; + g_pserver->cron_malloc_stats.allocator_resident = 0; + g_pserver->lastbgsave_status = C_OK; + g_pserver->aof_last_write_status = C_OK; + g_pserver->aof_last_write_errno = 0; + g_pserver->repl_good_slaves_count = 0; + + g_pserver->mvcc_tstamp = 0; /* Create the timer callback, this is our way to process many background * operations incrementally, like clients timeout, eviction of unaccessed * expired keys and so forth. */ - if (aeCreateTimeEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, 1, serverCron, NULL, NULL) == AE_ERR) { + if (aeCreateTimeEvent(g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].el, 1, serverCron, NULL, NULL) == AE_ERR) { serverPanic("Can't create event loop timers."); exit(1); } - /* Register a readable event for the pipe used to awake the event loop - * when a blocked client in a module needs attention. */ - if (aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, server.module_blocked_pipe[0], AE_READABLE, - moduleBlockedClientPipeReadable,NULL) == AE_ERR) { - serverPanic( - "Error registering the readable event for the module " - "blocked clients subsystem."); - } - - - /* Register a readable event for the pipe used to awake the event loop - * when a blocked client in a module needs attention. */ - if (aeCreateFileEvent(server.rgthreadvar[IDX_EVENT_LOOP_MAIN].el, server.module_blocked_pipe[0], AE_READABLE, - moduleBlockedClientPipeReadable,NULL) == AE_ERR) { - serverPanic( - "Error registering the readable event for the module " - "blocked clients subsystem."); - } - /* Open the AOF file if needed. */ - if (server.aof_state == AOF_ON) { - server.aof_fd = open(server.aof_filename, + if (g_pserver->aof_state == AOF_ON) { + g_pserver->aof_fd = open(g_pserver->aof_filename, O_WRONLY|O_APPEND|O_CREAT,0644); - if (server.aof_fd == -1) { + if (g_pserver->aof_fd == -1) { serverLog(LL_WARNING, "Can't open the append-only file: %s", strerror(errno)); exit(1); @@ -2974,23 +2996,23 @@ void initServer(void) { * no explicit limit in the user provided configuration we set a limit * at 3 GB using maxmemory with 'noeviction' policy'. This avoids * useless crashes of the Redis instance for out of memory. */ - if (server.arch_bits == 32 && server.maxmemory == 0) { + if (sizeof(void*) == 4 && g_pserver->maxmemory == 0) { serverLog(LL_WARNING,"Warning: 32 bit instance detected but no memory limit set. Setting 3 GB maxmemory limit with 'noeviction' policy now."); - server.maxmemory = 3072LL*(1024*1024); /* 3 GB */ - server.maxmemory_policy = MAXMEMORY_NO_EVICTION; + g_pserver->maxmemory = 3072LL*(1024*1024); /* 3 GB */ + g_pserver->maxmemory_policy = MAXMEMORY_NO_EVICTION; } /* Generate UUID */ - static_assert(sizeof(uuid_t) == sizeof(server.uuid), "UUIDs are standardized at 16-bytes"); - uuid_generate((unsigned char*)server.uuid); + static_assert(sizeof(uuid_t) == sizeof(cserver.uuid), "UUIDs are standardized at 16-bytes"); + uuid_generate((unsigned char*)cserver.uuid); - if (server.cluster_enabled) clusterInit(); + if (g_pserver->cluster_enabled) clusterInit(); replicationScriptCacheInit(); scriptingInit(1); slowlogInit(); latencyMonitorInit(); bioInit(); - server.initial_memory_usage = zmalloc_used_memory(); + g_pserver->initial_memory_usage = zmalloc_used_memory(); } /* Parse the flags string description 'strflags' and set them to the @@ -3070,10 +3092,10 @@ void populateCommandTable(void) { serverPanic("Unsupported command flag"); c->id = ACLGetCommandID(c->name); /* Assign the ID used for ACL. */ - retval1 = dictAdd(server.commands, sdsnew(c->name), c); + retval1 = dictAdd(g_pserver->commands, sdsnew(c->name), c); /* Populate an additional dictionary that will be unaffected * by rename-command statements in redis.conf. */ - retval2 = dictAdd(server.orig_commands, sdsnew(c->name), c); + retval2 = dictAdd(g_pserver->orig_commands, sdsnew(c->name), c); serverAssert(retval1 == DICT_OK && retval2 == DICT_OK); } } @@ -3083,7 +3105,7 @@ void resetCommandTableStats(void) { dictEntry *de; dictIterator *di; - di = dictGetSafeIterator(server.commands); + di = dictGetSafeIterator(g_pserver->commands); while((de = dictNext(di)) != NULL) { c = (struct redisCommand *) dictGetVal(de); c->microseconds = 0; @@ -3133,14 +3155,14 @@ void redisOpArrayFree(redisOpArray *oa) { /* ====================== Commands lookup and execution ===================== */ struct redisCommand *lookupCommand(sds name) { - return (struct redisCommand*)dictFetchValue(server.commands, name); + return (struct redisCommand*)dictFetchValue(g_pserver->commands, name); } struct redisCommand *lookupCommandByCString(const char *s) { struct redisCommand *cmd; sds name = sdsnew(s); - cmd = (struct redisCommand*)dictFetchValue(server.commands, name); + cmd = (struct redisCommand*)dictFetchValue(g_pserver->commands, name); sdsfree(name); return cmd; } @@ -3153,9 +3175,9 @@ struct redisCommand *lookupCommandByCString(const char *s) { * rewriteClientCommandVector() in order to set client->cmd pointer * correctly even if the command was renamed. */ struct redisCommand *lookupCommandOrOriginal(sds name) { - struct redisCommand *cmd = (struct redisCommand*)dictFetchValue(server.commands, name); + struct redisCommand *cmd = (struct redisCommand*)dictFetchValue(g_pserver->commands, name); - if (!cmd) cmd = (struct redisCommand*)dictFetchValue(server.orig_commands,name); + if (!cmd) cmd = (struct redisCommand*)dictFetchValue(g_pserver->orig_commands,name); return cmd; } @@ -3174,10 +3196,10 @@ void propagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, int flags) { serverAssert(GlobalLocksAcquired()); - if (server.aof_state != AOF_OFF && flags & PROPAGATE_AOF) + if (g_pserver->aof_state != AOF_OFF && flags & PROPAGATE_AOF) feedAppendOnlyFile(cmd,dbid,argv,argc); if (flags & PROPAGATE_REPL) - replicationFeedSlaves(server.slaves,dbid,argv,argc); + replicationFeedSlaves(g_pserver->slaves,dbid,argv,argc); } /* Used inside commands to schedule the propagation of additional commands @@ -3198,14 +3220,14 @@ void alsoPropagate(struct redisCommand *cmd, int dbid, robj **argv, int argc, robj **argvcopy; int j; - if (server.loading) return; /* No propagation during loading. */ + if (g_pserver->loading) return; /* No propagation during loading. */ argvcopy = (robj**)zmalloc(sizeof(robj*)*argc, MALLOC_LOCAL); for (j = 0; j < argc; j++) { argvcopy[j] = argv[j]; incrRefCount(argv[j]); } - redisOpArrayAppend(&server.also_propagate,cmd,dbid,argvcopy,argc,target); + redisOpArrayAppend(&g_pserver->also_propagate,cmd,dbid,argvcopy,argc,target); } /* It is possible to call the function forceCommandPropagation() inside a @@ -3278,40 +3300,40 @@ void call(client *c, int flags) { /* Sent the command to clients in MONITOR mode, only if the commands are * not generated from reading an AOF. */ - if (listLength(server.monitors) && - !server.loading && + if (listLength(g_pserver->monitors) && + !g_pserver->loading && !(c->cmd->flags & (CMD_SKIP_MONITOR|CMD_ADMIN))) { - replicationFeedMonitors(c,server.monitors,c->db->id,c->argv,c->argc); + replicationFeedMonitors(c,g_pserver->monitors,c->db->id,c->argv,c->argc); } /* Initialization: clear the flags that must be set by the command on * demand, and initialize the array for additional commands propagation. */ c->flags &= ~(CLIENT_FORCE_AOF|CLIENT_FORCE_REPL|CLIENT_PREVENT_PROP); - redisOpArray prev_also_propagate = server.also_propagate; - redisOpArrayInit(&server.also_propagate); + redisOpArray prev_also_propagate = g_pserver->also_propagate; + redisOpArrayInit(&g_pserver->also_propagate); /* Call the command. */ - dirty = server.dirty; + dirty = g_pserver->dirty; start = ustime(); c->cmd->proc(c); duration = ustime()-start; - dirty = server.dirty-dirty; + dirty = g_pserver->dirty-dirty; if (dirty < 0) dirty = 0; /* When EVAL is called loading the AOF we don't want commands called * from Lua to go into the slowlog or to populate statistics. */ - if (server.loading && c->flags & CLIENT_LUA) + if (g_pserver->loading && c->flags & CLIENT_LUA) flags &= ~(CMD_CALL_SLOWLOG | CMD_CALL_STATS); /* If the caller is Lua, we want to force the EVAL caller to propagate * the script if the command flag or client flag are forcing the * propagation. */ - if (c->flags & CLIENT_LUA && server.lua_caller) { + if (c->flags & CLIENT_LUA && g_pserver->lua_caller) { if (c->flags & CLIENT_FORCE_REPL) - server.lua_caller->flags |= CLIENT_FORCE_REPL; + g_pserver->lua_caller->flags |= CLIENT_FORCE_REPL; if (c->flags & CLIENT_FORCE_AOF) - server.lua_caller->flags |= CLIENT_FORCE_AOF; + g_pserver->lua_caller->flags |= CLIENT_FORCE_AOF; } /* Log the command into the Slow log if needed, and populate the @@ -3374,13 +3396,13 @@ void call(client *c, int flags) { /* Handle the alsoPropagate() API to handle commands that want to propagate * multiple separated commands. Note that alsoPropagate() is not affected * by CLIENT_PREVENT_PROP flag. */ - if (server.also_propagate.numops) { + if (g_pserver->also_propagate.numops) { int j; redisOp *rop; if (flags & CMD_CALL_PROPAGATE) { - for (j = 0; j < server.also_propagate.numops; j++) { - rop = &server.also_propagate.ops[j]; + for (j = 0; j < g_pserver->also_propagate.numops; j++) { + rop = &g_pserver->also_propagate.ops[j]; int target = rop->target; /* Whatever the command wish is, we honor the call() flags. */ if (!(flags&CMD_CALL_PROPAGATE_AOF)) target &= ~PROPAGATE_AOF; @@ -3389,13 +3411,13 @@ void call(client *c, int flags) { propagate(rop->cmd,rop->dbid,rop->argv,rop->argc,target); } } - redisOpArrayFree(&server.also_propagate); + redisOpArrayFree(&g_pserver->also_propagate); } ProcessPendingAsyncWrites(); - server.also_propagate = prev_also_propagate; - server.stat_numcommands++; + g_pserver->also_propagate = prev_also_propagate; + g_pserver->stat_numcommands++; } /* If this function gets called we already read a whole @@ -3422,6 +3444,7 @@ int processCommand(client *c, int callFlags) { AssertCorrectThread(c); serverAssert(GlobalLocksAcquired()); + incrementMvccTstamp(); /* Now lookup the command and check ASAP about trivial error conditions * such as wrong arity, bad command name and so forth. */ @@ -3477,10 +3500,10 @@ int processCommand(client *c, int callFlags) { * However we don't perform the redirection if: * 1) The sender of this command is our master. * 2) The command has no key arguments. */ - if (server.cluster_enabled && + if (g_pserver->cluster_enabled && !(c->flags & CLIENT_MASTER) && !(c->flags & CLIENT_LUA && - server.lua_caller->flags & CLIENT_MASTER) && + g_pserver->lua_caller->flags & CLIENT_MASTER) && !(c->cmd->getkeys_proc == NULL && c->cmd->firstkey == 0 && c->cmd->proc != execCommand)) { @@ -3488,7 +3511,7 @@ int processCommand(client *c, int callFlags) { int error_code; clusterNode *n = getNodeByQuery(c,c->cmd,c->argv,c->argc, &hashslot,&error_code); - if (n == NULL || n != server.cluster->myself) { + if (n == NULL || n != g_pserver->cluster->myself) { if (c->cmd->proc == execCommand) { discardTransaction(c); } else { @@ -3505,11 +3528,11 @@ int processCommand(client *c, int callFlags) { * the event loop since there is a busy Lua script running in timeout * condition, to avoid mixing the propagation of scripts with the * propagation of DELs due to eviction. */ - if (server.maxmemory && !server.lua_timedout) { + if (g_pserver->maxmemory && !g_pserver->lua_timedout) { int out_of_memory = freeMemoryIfNeededAndSafe() == C_ERR; /* freeMemoryIfNeeded may flush slave output buffers. This may result * into a slave, that may be the active client, to be freed. */ - if (server.current_client == NULL) return C_ERR; + if (serverTL->current_client == NULL) return C_ERR; /* It was impossible to free enough memory, and the command the client * is trying to execute is denied during OOM conditions or the client @@ -3527,7 +3550,7 @@ int processCommand(client *c, int callFlags) { * and if this is a master instance. */ int deny_write_type = writeCommandsDeniedByDiskError(); if (deny_write_type != DISK_ERROR_TYPE_NONE && - listLength(server.masters) == 0 && + listLength(g_pserver->masters) == 0 && (c->cmd->flags & CMD_WRITE || c->cmd->proc == pingCommand)) { @@ -3538,17 +3561,17 @@ int processCommand(client *c, int callFlags) { addReplySds(c, sdscatprintf(sdsempty(), "-MISCONF Errors writing to the AOF file: %s\r\n", - strerror(server.aof_last_write_errno))); + strerror(g_pserver->aof_last_write_errno))); return C_OK; } /* Don't accept write commands if there are not enough good slaves and * user configured the min-slaves-to-write option. */ - if (listLength(server.masters) == 0 && - server.repl_min_slaves_to_write && - server.repl_min_slaves_max_lag && + if (listLength(g_pserver->masters) == 0 && + g_pserver->repl_min_slaves_to_write && + g_pserver->repl_min_slaves_max_lag && c->cmd->flags & CMD_WRITE && - server.repl_good_slaves_count < server.repl_min_slaves_to_write) + g_pserver->repl_good_slaves_count < g_pserver->repl_min_slaves_to_write) { flagTransaction(c); addReply(c, shared.noreplicaserr); @@ -3557,7 +3580,7 @@ int processCommand(client *c, int callFlags) { /* Don't accept write commands if this is a read only slave. But * accept write commands if this is our master. */ - if (listLength(server.masters) && server.repl_slave_ro && + if (listLength(g_pserver->masters) && g_pserver->repl_slave_ro && !(c->flags & CLIENT_MASTER) && c->cmd->flags & CMD_WRITE) { @@ -3581,7 +3604,7 @@ int processCommand(client *c, int callFlags) { * when slave-serve-stale-data is no and we are a slave with a broken * link with master. */ if (FBrokenLinkToMaster() && - server.repl_serve_stale_data == 0 && + g_pserver->repl_serve_stale_data == 0 && !(c->cmd->flags & CMD_STALE)) { flagTransaction(c); @@ -3591,13 +3614,13 @@ int processCommand(client *c, int callFlags) { /* Loading DB? Return an error if the command has not the * CMD_LOADING flag. */ - if (server.loading && !(c->cmd->flags & CMD_LOADING)) { + if (g_pserver->loading && !(c->cmd->flags & CMD_LOADING)) { addReply(c, shared.loadingerr); return C_OK; } /* Lua script too slow? Only allow a limited number of commands. */ - if (server.lua_timedout && + if (g_pserver->lua_timedout && c->cmd->proc != authCommand && c->cmd->proc != helloCommand && c->cmd->proc != replconfCommand && @@ -3622,8 +3645,8 @@ int processCommand(client *c, int callFlags) { addReply(c,shared.queued); } else { call(c,callFlags); - c->woff = server.master_repl_offset; - if (listLength(server.ready_keys)) + c->woff = g_pserver->master_repl_offset; + if (listLength(g_pserver->ready_keys)) handleClientsBlockedOnKeys(); } return C_OK; @@ -3636,17 +3659,17 @@ int processCommand(client *c, int callFlags) { void closeListeningSockets(int unlink_unix_socket) { int j; - for (int iel = 0; iel < server.cthreads; ++iel) + for (int iel = 0; iel < cserver.cthreads; ++iel) { - for (j = 0; j < server.rgthreadvar[iel].ipfd_count; j++) - close(server.rgthreadvar[iel].ipfd[j]); + for (j = 0; j < g_pserver->rgthreadvar[iel].ipfd_count; j++) + close(g_pserver->rgthreadvar[iel].ipfd[j]); } - if (server.sofd != -1) close(server.sofd); - if (server.cluster_enabled) - for (j = 0; j < server.cfd_count; j++) close(server.cfd[j]); - if (unlink_unix_socket && server.unixsocket) { + if (g_pserver->sofd != -1) close(g_pserver->sofd); + if (g_pserver->cluster_enabled) + for (j = 0; j < g_pserver->cfd_count; j++) close(g_pserver->cfd[j]); + if (unlink_unix_socket && g_pserver->unixsocket) { serverLog(LL_NOTICE,"Removing the unix socket file."); - unlink(server.unixsocket); /* don't care if this fails */ + unlink(g_pserver->unixsocket); /* don't care if this fails */ } } @@ -3662,18 +3685,18 @@ int prepareForShutdown(int flags) { /* Kill the saving child if there is a background saving in progress. We want to avoid race conditions, for instance our saving child may overwrite the synchronous saving did by SHUTDOWN. */ - if (server.rdb_child_pid != -1) { + if (g_pserver->rdb_child_pid != -1) { serverLog(LL_WARNING,"There is a child saving an .rdb. Killing it!"); killRDBChild(); } - if (server.aof_state != AOF_OFF) { + if (g_pserver->aof_state != AOF_OFF) { /* Kill the AOF saving child as the AOF we already have may be longer * but contains the full dataset anyway. */ - if (server.aof_child_pid != -1) { + if (g_pserver->aof_child_pid != -1) { /* If we have AOF enabled but haven't written the AOF yet, don't * shutdown or else the dataset will be lost. */ - if (server.aof_state == AOF_WAIT_REWRITE) { + if (g_pserver->aof_state == AOF_WAIT_REWRITE) { serverLog(LL_WARNING, "Writing initial AOF, can't exit."); return C_ERR; } @@ -3684,11 +3707,11 @@ int prepareForShutdown(int flags) { /* Append only file: flush buffers and fsync() the AOF at exit */ serverLog(LL_NOTICE,"Calling fsync() on the AOF file."); flushAppendOnlyFile(1); - redis_fsync(server.aof_fd); + redis_fsync(g_pserver->aof_fd); } /* Create a new RDB file before exiting. */ - if ((server.saveparamslen > 0 && !nosave) || save) { + if ((g_pserver->saveparamslen > 0 && !nosave) || save) { serverLog(LL_NOTICE,"Saving the final RDB snapshot before exiting."); /* Snapshotting. Perform a SYNC SAVE and exit */ rdbSaveInfo rsi, *rsiptr; @@ -3705,9 +3728,9 @@ int prepareForShutdown(int flags) { } /* Remove the pid file if possible and needed. */ - if (server.daemonize || server.pidfile) { + if (cserver.daemonize || cserver.pidfile) { serverLog(LL_NOTICE,"Removing the pid file."); - unlink(server.pidfile); + unlink(cserver.pidfile); } /* Best effort flush of slave output buffers, so that we hopefully @@ -3717,7 +3740,7 @@ int prepareForShutdown(int flags) { /* Close the listening sockets. Apparently this allows faster restarts. */ closeListeningSockets(1); serverLog(LL_WARNING,"%s is now ready to exit, bye bye...", - server.sentinel_mode ? "Sentinel" : "KeyDB"); + g_pserver->sentinel_mode ? "Sentinel" : "KeyDB"); return C_OK; } @@ -3735,13 +3758,13 @@ int prepareForShutdown(int flags) { * DISK_ERROR_TYPE_RDB: Don't accept writes: RDB errors. */ int writeCommandsDeniedByDiskError(void) { - if (server.stop_writes_on_bgsave_err && - server.saveparamslen > 0 && - server.lastbgsave_status == C_ERR) + if (g_pserver->stop_writes_on_bgsave_err && + g_pserver->saveparamslen > 0 && + g_pserver->lastbgsave_status == C_ERR) { return DISK_ERROR_TYPE_RDB; - } else if (server.aof_state != AOF_OFF && - server.aof_last_write_status == C_ERR) + } else if (g_pserver->aof_state != AOF_OFF && + g_pserver->aof_last_write_status == C_ERR) { return DISK_ERROR_TYPE_AOF; } else { @@ -3847,15 +3870,15 @@ void commandCommand(client *c) { if (c->argc == 2 && !strcasecmp((const char*)ptrFromObj(c->argv[1]),"help")) { const char *help[] = { "(no subcommand) -- Return details about all Redis commands.", -"COUNT -- Return the total number of commands in this Redis server.", +"COUNT -- Return the total number of commands in this Redis g_pserver->", "GETKEYS -- Return the keys from a full Redis command.", "INFO [command-name ...] -- Return details about multiple Redis commands.", NULL }; addReplyHelp(c, help); } else if (c->argc == 1) { - addReplyArrayLen(c, dictSize(server.commands)); - di = dictGetIterator(server.commands); + addReplyArrayLen(c, dictSize(g_pserver->commands)); + di = dictGetIterator(g_pserver->commands); while ((de = dictNext(di)) != NULL) { addReplyCommand(c, (redisCommand*)dictGetVal(de)); } @@ -3864,10 +3887,10 @@ NULL int i; addReplyArrayLen(c, c->argc-2); for (i = 2; i < c->argc; i++) { - addReplyCommand(c, (redisCommand*)dictFetchValue(server.commands, ptrFromObj(c->argv[i]))); + addReplyCommand(c, (redisCommand*)dictFetchValue(g_pserver->commands, ptrFromObj(c->argv[i]))); } } else if (!strcasecmp((const char*)ptrFromObj(c->argv[1]), "count") && c->argc == 2) { - addReplyLongLong(c, dictSize(server.commands)); + addReplyLongLong(c, dictSize(g_pserver->commands)); } else if (!strcasecmp((const char*)ptrFromObj(c->argv[1]),"getkeys") && c->argc >= 3) { struct redisCommand *cmd = (redisCommand*)lookupCommand((sds)ptrFromObj(c->argv[2])); int *keys, numkeys, j; @@ -3932,7 +3955,7 @@ void bytesToHuman(char *s, unsigned long long n) { * on memory corruption problems. */ sds genRedisInfoString(const char *section) { sds info = sdsempty(); - time_t uptime = server.unixtime-server.stat_starttime; + time_t uptime = g_pserver->unixtime-cserver.stat_starttime; int j; struct rusage self_ru, c_ru; int allsections = 0, defsections = 0; @@ -3951,8 +3974,8 @@ sds genRedisInfoString(const char *section) { static struct utsname name; const char *mode; - if (server.cluster_enabled) mode = "cluster"; - else if (server.sentinel_mode) mode = "sentinel"; + if (g_pserver->cluster_enabled) mode = "cluster"; + else if (g_pserver->sentinel_mode) mode = "sentinel"; else mode = "standalone"; if (sections++) info = sdscat(info,"\r\n"); @@ -3964,7 +3987,7 @@ sds genRedisInfoString(const char *section) { } unsigned int lruclock; - atomicGet(server.lruclock,lruclock); + atomicGet(g_pserver->lruclock,lruclock); info = sdscatprintf(info, "# Server\r\n" "redis_version:%s\r\n" @@ -3987,13 +4010,13 @@ sds genRedisInfoString(const char *section) { "lru_clock:%ld\r\n" "executable:%s\r\n" "config_file:%s\r\n", - REDIS_VERSION, + KEYDB_SET_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (unsigned long long) redisBuildId(), mode, name.sysname, name.release, name.machine, - server.arch_bits, + (int)sizeof(void*)*8, aeGetApiName(), REDIS_ATOMIC_API, #ifdef __GNUC__ @@ -4002,15 +4025,15 @@ sds genRedisInfoString(const char *section) { 0,0,0, #endif (long) getpid(), - server.runid, - server.port, + g_pserver->runid, + g_pserver->port, (intmax_t)uptime, (intmax_t)(uptime/(3600*24)), - server.hz, - server.config_hz, + g_pserver->hz, + g_pserver->config_hz, (unsigned long) lruclock, - server.executable ? server.executable : "", - server.configfile ? server.configfile : ""); + cserver.executable ? cserver.executable : "", + cserver.configfile ? cserver.configfile : ""); } /* Clients */ @@ -4024,14 +4047,14 @@ sds genRedisInfoString(const char *section) { "client_recent_max_input_buffer:%zu\r\n" "client_recent_max_output_buffer:%zu\r\n" "blocked_clients:%d\r\n", - listLength(server.clients)-listLength(server.slaves), + listLength(g_pserver->clients)-listLength(g_pserver->slaves), maxin, maxout, - server.blocked_clients); - for (int ithread = 0; ithread < server.cthreads; ++ithread) + g_pserver->blocked_clients); + for (int ithread = 0; ithread < cserver.cthreads; ++ithread) { info = sdscatprintf(info, "thread_%d_clients:%d\r\n", - ithread, server.rgthreadvar[ithread].cclients); + ithread, g_pserver->rgthreadvar[ithread].cclients); } } @@ -4045,25 +4068,25 @@ sds genRedisInfoString(const char *section) { char used_memory_rss_hmem[64]; char maxmemory_hmem[64]; size_t zmalloc_used = zmalloc_used_memory(); - size_t total_system_mem = server.system_memory_size; + size_t total_system_mem = cserver.system_memory_size; const char *evict_policy = evictPolicyToString(); - long long memory_lua = (long long)lua_gc(server.lua,LUA_GCCOUNT,0)*1024; + long long memory_lua = (long long)lua_gc(g_pserver->lua,LUA_GCCOUNT,0)*1024; struct redisMemOverhead *mh = getMemoryOverheadData(); /* Peak memory is updated from time to time by serverCron() so it * may happen that the instantaneous value is slightly bigger than * the peak value. This may confuse users, so we update the peak * if found smaller than the current memory usage. */ - if (zmalloc_used > server.stat_peak_memory) - server.stat_peak_memory = zmalloc_used; + if (zmalloc_used > g_pserver->stat_peak_memory) + g_pserver->stat_peak_memory = zmalloc_used; bytesToHuman(hmem,zmalloc_used); - bytesToHuman(peak_hmem,server.stat_peak_memory); + bytesToHuman(peak_hmem,g_pserver->stat_peak_memory); bytesToHuman(total_system_hmem,total_system_mem); bytesToHuman(used_memory_lua_hmem,memory_lua); bytesToHuman(used_memory_scripts_hmem,mh->lua_caches); - bytesToHuman(used_memory_rss_hmem,server.cron_malloc_stats.process_rss); - bytesToHuman(maxmemory_hmem,server.maxmemory); + bytesToHuman(used_memory_rss_hmem,g_pserver->cron_malloc_stats.process_rss); + bytesToHuman(maxmemory_hmem,g_pserver->maxmemory); if (sections++) info = sdscat(info,"\r\n"); info = sdscatprintf(info, @@ -4110,26 +4133,26 @@ sds genRedisInfoString(const char *section) { "lazyfree_pending_objects:%zu\r\n", zmalloc_used, hmem, - server.cron_malloc_stats.process_rss, + g_pserver->cron_malloc_stats.process_rss, used_memory_rss_hmem, - server.stat_peak_memory, + g_pserver->stat_peak_memory, peak_hmem, mh->peak_perc, mh->overhead_total, mh->startup_allocated, mh->dataset, mh->dataset_perc, - server.cron_malloc_stats.allocator_allocated, - server.cron_malloc_stats.allocator_active, - server.cron_malloc_stats.allocator_resident, + g_pserver->cron_malloc_stats.allocator_allocated, + g_pserver->cron_malloc_stats.allocator_active, + g_pserver->cron_malloc_stats.allocator_resident, (unsigned long)total_system_mem, total_system_hmem, memory_lua, used_memory_lua_hmem, (long long) mh->lua_caches, used_memory_scripts_hmem, - dictSize(server.lua_scripts), - server.maxmemory, + dictSize(g_pserver->lua_scripts), + g_pserver->maxmemory, maxmemory_hmem, evict_policy, mh->allocator_frag, @@ -4146,7 +4169,7 @@ sds genRedisInfoString(const char *section) { mh->clients_normal, mh->aof_buffer, ZMALLOC_LIB, - server.active_defrag_running, + g_pserver->active_defrag_running, lazyfreeGetPendingObjectsCount() ); freeMemoryOverheadData(mh); @@ -4173,26 +4196,26 @@ sds genRedisInfoString(const char *section) { "aof_last_bgrewrite_status:%s\r\n" "aof_last_write_status:%s\r\n" "aof_last_cow_size:%zu\r\n", - server.loading, - server.dirty, - server.rdb_child_pid != -1, - (intmax_t)server.lastsave, - (server.lastbgsave_status == C_OK) ? "ok" : "err", - (intmax_t)server.rdb_save_time_last, - (intmax_t)((server.rdb_child_pid == -1) ? - -1 : time(NULL)-server.rdb_save_time_start), - server.stat_rdb_cow_bytes, - server.aof_state != AOF_OFF, - server.aof_child_pid != -1, - server.aof_rewrite_scheduled, - (intmax_t)server.aof_rewrite_time_last, - (intmax_t)((server.aof_child_pid == -1) ? - -1 : time(NULL)-server.aof_rewrite_time_start), - (server.aof_lastbgrewrite_status == C_OK) ? "ok" : "err", - (server.aof_last_write_status == C_OK) ? "ok" : "err", - server.stat_aof_cow_bytes); - - if (server.aof_state != AOF_OFF) { + g_pserver->loading, + g_pserver->dirty, + g_pserver->rdb_child_pid != -1, + (intmax_t)g_pserver->lastsave, + (g_pserver->lastbgsave_status == C_OK) ? "ok" : "err", + (intmax_t)g_pserver->rdb_save_time_last, + (intmax_t)((g_pserver->rdb_child_pid == -1) ? + -1 : time(NULL)-g_pserver->rdb_save_time_start), + g_pserver->stat_rdb_cow_bytes, + g_pserver->aof_state != AOF_OFF, + g_pserver->aof_child_pid != -1, + g_pserver->aof_rewrite_scheduled, + (intmax_t)g_pserver->aof_rewrite_time_last, + (intmax_t)((g_pserver->aof_child_pid == -1) ? + -1 : time(NULL)-g_pserver->aof_rewrite_time_start), + (g_pserver->aof_lastbgrewrite_status == C_OK) ? "ok" : "err", + (g_pserver->aof_last_write_status == C_OK) ? "ok" : "err", + g_pserver->stat_aof_cow_bytes); + + if (g_pserver->aof_state != AOF_OFF) { info = sdscatprintf(info, "aof_current_size:%lld\r\n" "aof_base_size:%lld\r\n" @@ -4201,30 +4224,30 @@ sds genRedisInfoString(const char *section) { "aof_rewrite_buffer_length:%lu\r\n" "aof_pending_bio_fsync:%llu\r\n" "aof_delayed_fsync:%lu\r\n", - (long long) server.aof_current_size, - (long long) server.aof_rewrite_base_size, - server.aof_rewrite_scheduled, - sdslen(server.aof_buf), + (long long) g_pserver->aof_current_size, + (long long) g_pserver->aof_rewrite_base_size, + g_pserver->aof_rewrite_scheduled, + sdslen(g_pserver->aof_buf), aofRewriteBufferSize(), bioPendingJobsOfType(BIO_AOF_FSYNC), - server.aof_delayed_fsync); + g_pserver->aof_delayed_fsync); } - if (server.loading) { + if (g_pserver->loading) { double perc; time_t eta, elapsed; - off_t remaining_bytes = server.loading_total_bytes- - server.loading_loaded_bytes; + off_t remaining_bytes = g_pserver->loading_total_bytes- + g_pserver->loading_loaded_bytes; - perc = ((double)server.loading_loaded_bytes / - (server.loading_total_bytes+1)) * 100; + perc = ((double)g_pserver->loading_loaded_bytes / + (g_pserver->loading_total_bytes+1)) * 100; - elapsed = time(NULL)-server.loading_start_time; + elapsed = time(NULL)-g_pserver->loading_start_time; if (elapsed == 0) { eta = 1; /* A fake 1 second figure if we don't have enough info */ } else { - eta = (elapsed*remaining_bytes)/(server.loading_loaded_bytes+1); + eta = (elapsed*remaining_bytes)/(g_pserver->loading_loaded_bytes+1); } info = sdscatprintf(info, @@ -4233,9 +4256,9 @@ sds genRedisInfoString(const char *section) { "loading_loaded_bytes:%llu\r\n" "loading_loaded_perc:%.2f\r\n" "loading_eta_seconds:%jd\r\n", - (intmax_t) server.loading_start_time, - (unsigned long long) server.loading_total_bytes, - (unsigned long long) server.loading_loaded_bytes, + (intmax_t) g_pserver->loading_start_time, + (unsigned long long) g_pserver->loading_total_bytes, + (unsigned long long) g_pserver->loading_loaded_bytes, perc, (intmax_t)eta ); @@ -4273,32 +4296,32 @@ sds genRedisInfoString(const char *section) { "active_defrag_misses:%lld\r\n" "active_defrag_key_hits:%lld\r\n" "active_defrag_key_misses:%lld\r\n", - server.stat_numconnections, - server.stat_numcommands, + g_pserver->stat_numconnections, + g_pserver->stat_numcommands, getInstantaneousMetric(STATS_METRIC_COMMAND), - server.stat_net_input_bytes, - server.stat_net_output_bytes, + g_pserver->stat_net_input_bytes, + g_pserver->stat_net_output_bytes, (float)getInstantaneousMetric(STATS_METRIC_NET_INPUT)/1024, (float)getInstantaneousMetric(STATS_METRIC_NET_OUTPUT)/1024, - server.stat_rejected_conn, - server.stat_sync_full, - server.stat_sync_partial_ok, - server.stat_sync_partial_err, - server.stat_expiredkeys, - server.stat_expired_stale_perc*100, - server.stat_expired_time_cap_reached_count, - server.stat_evictedkeys, - server.stat_keyspace_hits, - server.stat_keyspace_misses, - dictSize(server.pubsub_channels), - listLength(server.pubsub_patterns), - server.stat_fork_time, - dictSize(server.migrate_cached_sockets), + g_pserver->stat_rejected_conn, + g_pserver->stat_sync_full, + g_pserver->stat_sync_partial_ok, + g_pserver->stat_sync_partial_err, + g_pserver->stat_expiredkeys, + g_pserver->stat_expired_stale_perc*100, + g_pserver->stat_expired_time_cap_reached_count, + g_pserver->stat_evictedkeys, + g_pserver->stat_keyspace_hits, + g_pserver->stat_keyspace_misses, + dictSize(g_pserver->pubsub_channels), + listLength(g_pserver->pubsub_patterns), + g_pserver->stat_fork_time, + dictSize(g_pserver->migrate_cached_sockets), getSlaveKeyWithExpireCount(), - server.stat_active_defrag_hits, - server.stat_active_defrag_misses, - server.stat_active_defrag_key_hits, - server.stat_active_defrag_key_misses); + g_pserver->stat_active_defrag_hits, + g_pserver->stat_active_defrag_misses, + g_pserver->stat_active_defrag_key_hits, + g_pserver->stat_active_defrag_key_misses); } /* Replication */ @@ -4307,11 +4330,11 @@ sds genRedisInfoString(const char *section) { info = sdscatprintf(info, "# Replication\r\n" "role:%s\r\n", - listLength(server.masters) == 0 ? "master" : "slave"); - if (listLength(server.masters)) { + listLength(g_pserver->masters) == 0 ? "master" : "slave"); + if (listLength(g_pserver->masters)) { listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); int cmasters = 0; while ((ln = listNext(&li))) @@ -4338,7 +4361,7 @@ sds genRedisInfoString(const char *section) { (mi->repl_state == REPL_STATE_CONNECTED) ? "up" : "down", mi->master ? - ((int)(server.unixtime-mi->master->lastinteraction)) : -1, + ((int)(g_pserver->unixtime-mi->master->lastinteraction)) : -1, mi->repl_state == REPL_STATE_TRANSFER, slave_repl_offset ); @@ -4349,42 +4372,42 @@ sds genRedisInfoString(const char *section) { "master_sync_last_io_seconds_ago:%d\r\n" , (long long) (mi->repl_transfer_size - mi->repl_transfer_read), - (int)(server.unixtime-mi->repl_transfer_lastio) + (int)(g_pserver->unixtime-mi->repl_transfer_lastio) ); } if (mi->repl_state != REPL_STATE_CONNECTED) { info = sdscatprintf(info, "master_link_down_since_seconds:%jd\r\n", - (intmax_t)server.unixtime-mi->repl_down_since); + (intmax_t)g_pserver->unixtime-mi->repl_down_since); } } info = sdscatprintf(info, "slave_priority:%d\r\n" "slave_read_only:%d\r\n", - server.slave_priority, - server.repl_slave_ro); + g_pserver->slave_priority, + g_pserver->repl_slave_ro); } info = sdscatprintf(info, "connected_slaves:%lu\r\n", - listLength(server.slaves)); + listLength(g_pserver->slaves)); /* If min-slaves-to-write is active, write the number of slaves * currently considered 'good'. */ - if (server.repl_min_slaves_to_write && - server.repl_min_slaves_max_lag) { + if (g_pserver->repl_min_slaves_to_write && + g_pserver->repl_min_slaves_max_lag) { info = sdscatprintf(info, "min_slaves_good_slaves:%d\r\n", - server.repl_good_slaves_count); + g_pserver->repl_good_slaves_count); } - if (listLength(server.slaves)) { + if (listLength(g_pserver->slaves)) { int slaveid = 0; listNode *ln; listIter li; - listRewind(server.slaves,&li); + listRewind(g_pserver->slaves,&li); while((ln = listNext(&li))) { client *slave = (client*)listNodeValue(ln); const char *state = NULL; @@ -4430,14 +4453,14 @@ sds genRedisInfoString(const char *section) { "repl_backlog_size:%lld\r\n" "repl_backlog_first_byte_offset:%lld\r\n" "repl_backlog_histlen:%lld\r\n", - server.replid, - server.replid2, - server.master_repl_offset, - server.second_replid_offset, - server.repl_backlog != NULL, - server.repl_backlog_size, - server.repl_backlog_off, - server.repl_backlog_histlen); + g_pserver->replid, + g_pserver->replid2, + g_pserver->master_repl_offset, + g_pserver->second_replid_offset, + g_pserver->repl_backlog != NULL, + g_pserver->repl_backlog_size, + g_pserver->repl_backlog_off, + g_pserver->repl_backlog_histlen); } /* CPU */ @@ -4455,7 +4478,7 @@ sds genRedisInfoString(const char *section) { (long)self_ru.ru_utime.tv_sec, (long)self_ru.ru_utime.tv_usec, (long)c_ru.ru_stime.tv_sec, (long)c_ru.ru_stime.tv_usec, (long)c_ru.ru_utime.tv_sec, (long)c_ru.ru_utime.tv_usec, - server.cthreads, + cserver.cthreads, fastlock_getlongwaitcount()); } @@ -4467,7 +4490,7 @@ sds genRedisInfoString(const char *section) { struct redisCommand *c; dictEntry *de; dictIterator *di; - di = dictGetSafeIterator(server.commands); + di = dictGetSafeIterator(g_pserver->commands); while((de = dictNext(di)) != NULL) { c = (struct redisCommand *) dictGetVal(de); if (!c->calls) continue; @@ -4485,22 +4508,22 @@ sds genRedisInfoString(const char *section) { info = sdscatprintf(info, "# Cluster\r\n" "cluster_enabled:%d\r\n", - server.cluster_enabled); + g_pserver->cluster_enabled); } /* Key space */ if (allsections || defsections || !strcasecmp(section,"keyspace")) { if (sections++) info = sdscat(info,"\r\n"); info = sdscatprintf(info, "# Keyspace\r\n"); - for (j = 0; j < server.dbnum; j++) { + for (j = 0; j < cserver.dbnum; j++) { long long keys, vkeys; - keys = dictSize(server.db[j].pdict); - vkeys = dictSize(server.db[j].expires); + keys = dictSize(g_pserver->db[j].pdict); + vkeys = dictSize(g_pserver->db[j].expires); if (keys || vkeys) { info = sdscatprintf(info, "db%d:keys=%lld,expires=%lld,avg_ttl=%lld\r\n", - j, keys, vkeys, server.db[j].avg_ttl); + j, keys, vkeys, g_pserver->db[j].avg_ttl); } } } @@ -4523,7 +4546,7 @@ void monitorCommand(client *c) { if (c->flags & CLIENT_SLAVE) return; c->flags |= (CLIENT_SLAVE|CLIENT_MONITOR); - listAddNodeTail(server.monitors,c); + listAddNodeTail(g_pserver->monitors,c); addReply(c,shared.ok); } @@ -4557,10 +4580,10 @@ void linuxMemoryWarnings(void) { void createPidFile(void) { /* If pidfile requested, but no pidfile defined, use * default pidfile path */ - if (!server.pidfile) server.pidfile = zstrdup(CONFIG_DEFAULT_PID_FILE); + if (!cserver.pidfile) cserver.pidfile = zstrdup(CONFIG_DEFAULT_PID_FILE); /* Try to write the pid file in a best-effort way. */ - FILE *fp = fopen(server.pidfile,"w"); + FILE *fp = fopen(cserver.pidfile,"w"); if (fp) { fprintf(fp,"%d\n",(int)getpid()); fclose(fp); @@ -4586,7 +4609,7 @@ void daemonize(void) { void version(void) { printf("Redis server v=%s sha=%s:%d malloc=%s bits=%d build=%llx\n", - REDIS_VERSION, + KEYDB_REAL_VERSION, redisGitSHA1(), atoi(redisGitDirty()) > 0, ZMALLOC_LIB, @@ -4617,30 +4640,30 @@ void redisAsciiArt(void) { char *buf = (char*)zmalloc(1024*16, MALLOC_LOCAL); const char *mode; - if (server.cluster_enabled) mode = "cluster"; - else if (server.sentinel_mode) mode = "sentinel"; + if (g_pserver->cluster_enabled) mode = "cluster"; + else if (g_pserver->sentinel_mode) mode = "sentinel"; else mode = "standalone"; /* Show the ASCII logo if: log file is stdout AND stdout is a * tty AND syslog logging is disabled. Also show logo if the user * forced us to do so via redis.conf. */ - int show_logo = ((!server.syslog_enabled && - server.logfile[0] == '\0' && + int show_logo = ((!g_pserver->syslog_enabled && + g_pserver->logfile[0] == '\0' && isatty(fileno(stdout))) || - server.always_show_logo); + g_pserver->always_show_logo); if (!show_logo) { serverLog(LL_NOTICE, "Running mode=%s, port=%d.", - mode, server.port + mode, g_pserver->port ); } else { snprintf(buf,1024*16,ascii_logo, - REDIS_VERSION, + KEYDB_REAL_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (sizeof(long) == 8) ? "64" : "32", - mode, server.port, + mode, g_pserver->port, (long) getpid() ); serverLogRaw(LL_NOTICE|LL_RAW,buf); @@ -4666,17 +4689,17 @@ static void sigShutdownHandler(int sig) { * If we receive the signal the second time, we interpret this as * the user really wanting to quit ASAP without waiting to persist * on disk. */ - if (server.shutdown_asap && sig == SIGINT) { + if (g_pserver->shutdown_asap && sig == SIGINT) { serverLogFromHandler(LL_WARNING, "You insist... exiting now."); rdbRemoveTempFile(getpid()); exit(1); /* Exit with an error since this was not a clean shutdown. */ - } else if (server.loading) { + } else if (g_pserver->loading) { serverLogFromHandler(LL_WARNING, "Received shutdown signal during loading, exiting now."); exit(0); } serverLogFromHandler(LL_WARNING, msg); - server.shutdown_asap = 1; + g_pserver->shutdown_asap = 1; } void setupSignalHandlers(void) { @@ -4718,17 +4741,17 @@ int checkForSentinelMode(int argc, char **argv) { /* Function called at startup to load RDB or AOF file in memory. */ void loadDataFromDisk(void) { long long start = ustime(); - if (server.aof_state == AOF_ON) { - if (loadAppendOnlyFile(server.aof_filename) == C_OK) + if (g_pserver->aof_state == AOF_ON) { + if (loadAppendOnlyFile(g_pserver->aof_filename) == C_OK) serverLog(LL_NOTICE,"DB loaded from append only file: %.3f seconds",(float)(ustime()-start)/1000000); - } else if (server.rdb_filename != NULL || server.rdb_s3bucketpath != NULL) { + } else if (g_pserver->rdb_filename != NULL || g_pserver->rdb_s3bucketpath != NULL) { rdbSaveInfo rsi = RDB_SAVE_INFO_INIT; if (rdbLoad(&rsi) == C_OK) { serverLog(LL_NOTICE,"DB loaded from disk: %.3f seconds", (float)(ustime()-start)/1000000); /* Restore the replication ID / offset from the RDB file. */ - if ((listLength(server.masters) || (server.cluster_enabled && nodeIsSlave(server.cluster->myself)))&& + if ((listLength(g_pserver->masters) || (g_pserver->cluster_enabled && nodeIsSlave(g_pserver->cluster->myself)))&& rsi.repl_id_is_set && rsi.repl_offset != -1 && /* Note that older implementations may save a repl_stream_db @@ -4736,12 +4759,12 @@ void loadDataFromDisk(void) { * in function rdbPopulateSaveInfo. */ rsi.repl_stream_db != -1) { - memcpy(server.replid,rsi.repl_id,sizeof(server.replid)); - server.master_repl_offset = rsi.repl_offset; + memcpy(g_pserver->replid,rsi.repl_id,sizeof(g_pserver->replid)); + g_pserver->master_repl_offset = rsi.repl_offset; listIter li; listNode *ln; - listRewind(server.masters, &li); + listRewind(g_pserver->masters, &li); while ((ln = listNext(&li))) { redisMaster *mi = (redisMaster*)listNodeValue(ln); @@ -4768,13 +4791,13 @@ void redisOutOfMemoryHandler(size_t allocation_size) { void redisSetProcTitle(const char *title) { #ifdef USE_SETPROCTITLE const char *server_mode = ""; - if (server.cluster_enabled) server_mode = " [cluster]"; - else if (server.sentinel_mode) server_mode = " [sentinel]"; + if (g_pserver->cluster_enabled) server_mode = " [cluster]"; + else if (g_pserver->sentinel_mode) server_mode = " [sentinel]"; setproctitle("%s %s:%d%s", title, - server.bindaddr_count ? server.bindaddr[0] : "*", - server.port, + g_pserver->bindaddr_count ? g_pserver->bindaddr[0] : "*", + g_pserver->port, server_mode); #else UNUSED(title); @@ -4878,17 +4901,31 @@ int redisIsSupervised(int mode) { uint64_t getMvccTstamp() { - return (server.mstime << 16); + return g_pserver->mvcc_tstamp; +} + +void incrementMvccTstamp() +{ + uint64_t msPrev = g_pserver->mvcc_tstamp >> 20; + if (msPrev >= (uint64_t)g_pserver->mstime) // we can be greater if the count overflows + { + atomicIncr(g_pserver->mvcc_tstamp, 1); + } + else + { + g_pserver->mvcc_tstamp = ((uint64_t)g_pserver->mstime) << 20; + } } void *workerThreadMain(void *parg) { int iel = (int)((int64_t)parg); serverLog(LOG_INFO, "Thread %d alive.", iel); - serverTL = server.rgthreadvar+iel; // set the TLS threadsafe global + serverTL = g_pserver->rgthreadvar+iel; // set the TLS threadsafe global + moduleAcquireGIL(true); // Normally afterSleep acquires this, but that won't be called on the first run int isMainThread = (iel == IDX_EVENT_LOOP_MAIN); - aeEventLoop *el = server.rgthreadvar[iel].el; + aeEventLoop *el = g_pserver->rgthreadvar[iel].el; aeSetBeforeSleepProc(el, isMainThread ? beforeSleep : beforeSleepLite, isMainThread ? 0 : AE_SLEEP_THREADSAFE); aeSetAfterSleepProc(el, afterSleep, AE_SLEEP_THREADSAFE); aeMain(el); @@ -4945,13 +4982,13 @@ int main(int argc, char **argv) { char hashseed[16]; getRandomHexChars(hashseed,sizeof(hashseed)); dictSetHashFunctionSeed((uint8_t*)hashseed); - server.sentinel_mode = checkForSentinelMode(argc,argv); + g_pserver->sentinel_mode = checkForSentinelMode(argc,argv); initServerConfig(); for (int iel = 0; iel < MAX_EVENT_LOOPS; ++iel) { - initServerThread(server.rgthreadvar+iel, iel == IDX_EVENT_LOOP_MAIN); + initServerThread(g_pserver->rgthreadvar+iel, iel == IDX_EVENT_LOOP_MAIN); } - serverTL = &server.rgthreadvar[IDX_EVENT_LOOP_MAIN]; + serverTL = &g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN]; aeAcquireLock(); // We own the lock on boot ACLInit(); /* The ACL subsystem must be initialized ASAP because the @@ -4960,15 +4997,15 @@ int main(int argc, char **argv) { /* Store the executable path and arguments in a safe place in order * to be able to restart the server later. */ - server.executable = getAbsolutePath(argv[0]); - server.exec_argv = (char**)zmalloc(sizeof(char*)*(argc+1), MALLOC_LOCAL); - server.exec_argv[argc] = NULL; - for (j = 0; j < argc; j++) server.exec_argv[j] = zstrdup(argv[j]); + cserver.executable = getAbsolutePath(argv[0]); + cserver.exec_argv = (char**)zmalloc(sizeof(char*)*(argc+1), MALLOC_LOCAL); + cserver.exec_argv[argc] = NULL; + for (j = 0; j < argc; j++) cserver.exec_argv[j] = zstrdup(argv[j]); /* We need to init sentinel right now as parsing the configuration file * in sentinel mode will have the effect of populating the sentinel * data structures with master nodes to monitor. */ - if (server.sentinel_mode) { + if (g_pserver->sentinel_mode) { initSentinelConfig(); initSentinel(); } @@ -5005,11 +5042,11 @@ int main(int argc, char **argv) { /* First argument is the config file name? */ if (argv[j][0] != '-' || argv[j][1] != '-') { configfile = argv[j]; - server.configfile = getAbsolutePath(configfile); - /* Replace the config file in server.exec_argv with + cserver.configfile = getAbsolutePath(configfile); + /* Replace the config file in g_pserver->exec_argv with * its absolute path. */ - zfree(server.exec_argv[j]); - server.exec_argv[j] = zstrdup(server.configfile); + zfree(cserver.exec_argv[j]); + cserver.exec_argv[j] = zstrdup(cserver.configfile); j++; } @@ -5035,7 +5072,7 @@ int main(int argc, char **argv) { } j++; } - if (server.sentinel_mode && configfile && *configfile == '-') { + if (g_pserver->sentinel_mode && configfile && *configfile == '-') { serverLog(LL_WARNING, "Sentinel config from STDIN not allowed."); serverLog(LL_WARNING, @@ -5050,37 +5087,37 @@ int main(int argc, char **argv) { serverLog(LL_WARNING, "oO0OoO0OoO0Oo KeyDB is starting oO0OoO0OoO0Oo"); serverLog(LL_WARNING, "KeyDB version=%s, bits=%d, commit=%s, modified=%d, pid=%d, just started", - REDIS_VERSION, + KEYDB_REAL_VERSION, (sizeof(long) == 8) ? 64 : 32, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (int)getpid()); if (argc == 1) { - serverLog(LL_WARNING, "WARNING: no config file specified, using the default config. In order to specify a config file use %s /path/to/%s.conf", argv[0], server.sentinel_mode ? "sentinel" : "redis"); + serverLog(LL_WARNING, "WARNING: no config file specified, using the default config. In order to specify a config file use %s /path/to/%s.conf", argv[0], g_pserver->sentinel_mode ? "sentinel" : "redis"); } else { serverLog(LL_WARNING, "Configuration loaded"); } - if (server.cthreads > (int)std::thread::hardware_concurrency()) { + if (cserver.cthreads > (int)std::thread::hardware_concurrency()) { serverLog(LL_WARNING, "WARNING: server-threads is greater than this machine's core count. Truncating to %u threads", std::thread::hardware_concurrency()); - server.cthreads = (int)std::thread::hardware_concurrency(); - server.cthreads = std::max(server.cthreads, 1); // in case of any weird sign overflows + cserver.cthreads = (int)std::thread::hardware_concurrency(); + cserver.cthreads = std::max(cserver.cthreads, 1); // in case of any weird sign overflows } - server.supervised = redisIsSupervised(server.supervised_mode); - int background = server.daemonize && !server.supervised; + cserver.supervised = redisIsSupervised(cserver.supervised_mode); + int background = cserver.daemonize && !cserver.supervised; if (background) daemonize(); initServer(); - initNetworking(server.cthreads > 1 /* fReusePort */); + initNetworking(cserver.cthreads > 1 /* fReusePort */); - if (background || server.pidfile) createPidFile(); + if (background || cserver.pidfile) createPidFile(); redisSetProcTitle(argv[0]); redisAsciiArt(); checkTcpBacklogSettings(); - if (!server.sentinel_mode) { + if (!g_pserver->sentinel_mode) { /* Things not needed when running in Sentinel mode. */ serverLog(LL_WARNING,"Server initialized"); #ifdef __linux__ @@ -5089,7 +5126,7 @@ int main(int argc, char **argv) { moduleLoadFromQueue(); ACLLoadUsersAtStartup(); loadDataFromDisk(); - if (server.cluster_enabled) { + if (g_pserver->cluster_enabled) { if (verifyClusterConfigWithData() == C_ERR) { serverLog(LL_WARNING, "You can't have keys in a DB different than DB 0 when in " @@ -5097,39 +5134,40 @@ int main(int argc, char **argv) { exit(1); } } - if (server.rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd_count > 0) + if (g_pserver->rgthreadvar[IDX_EVENT_LOOP_MAIN].ipfd_count > 0) serverLog(LL_NOTICE,"Ready to accept connections"); - if (server.sofd > 0) - serverLog(LL_NOTICE,"The server is now ready to accept connections at %s", server.unixsocket); + if (g_pserver->sofd > 0) + serverLog(LL_NOTICE,"The server is now ready to accept connections at %s", g_pserver->unixsocket); } else { sentinelIsRunning(); } - if (server.rdb_filename == nullptr) + if (g_pserver->rdb_filename == nullptr) { - if (server.rdb_s3bucketpath == nullptr) - server.rdb_filename = zstrdup(CONFIG_DEFAULT_RDB_FILENAME); + if (g_pserver->rdb_s3bucketpath == nullptr) + g_pserver->rdb_filename = zstrdup(CONFIG_DEFAULT_RDB_FILENAME); else - server.repl_diskless_sync = TRUE; + g_pserver->repl_diskless_sync = TRUE; } - if (server.cthreads > 4) { - serverLog(LL_WARNING, "Warning: server-threads is set to %d. This is above the maximum recommend value of 4, please ensure you've verified this is actually faster on your machine.", server.cthreads); + if (cserver.cthreads > 4) { + serverLog(LL_WARNING, "Warning: server-threads is set to %d. This is above the maximum recommend value of 4, please ensure you've verified this is actually faster on your machine.", cserver.cthreads); } /* Warning the user about suspicious maxmemory setting. */ - if (server.maxmemory > 0 && server.maxmemory < 1024*1024) { - serverLog(LL_WARNING,"WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want?", server.maxmemory); + if (g_pserver->maxmemory > 0 && g_pserver->maxmemory < 1024*1024) { + serverLog(LL_WARNING,"WARNING: You specified a maxmemory value that is less than 1MB (current value is %llu bytes). Are you sure this is what you really want?", g_pserver->maxmemory); } aeReleaseLock(); //Finally we can dump the lock + moduleReleaseGIL(true); - serverAssert(server.cthreads > 0 && server.cthreads <= MAX_EVENT_LOOPS); + serverAssert(cserver.cthreads > 0 && cserver.cthreads <= MAX_EVENT_LOOPS); pthread_t rgthread[MAX_EVENT_LOOPS]; - for (int iel = 0; iel < server.cthreads; ++iel) + for (int iel = 0; iel < cserver.cthreads; ++iel) { pthread_create(rgthread + iel, NULL, workerThreadMain, (void*)((int64_t)iel)); - if (server.fThreadAffinity) + if (cserver.fThreadAffinity) { #ifdef __linux__ cpu_set_t cpuset; diff --git a/src/server.h b/src/server.h index 653563b09..e1db21e47 100644 --- a/src/server.h +++ b/src/server.h @@ -262,7 +262,7 @@ class robj_roptr #define LIMIT_PENDING_QUERYBUF (4*1024*1024) /* 4mb */ /* When configuring the server eventloop, we setup it so that the total number - * of file descriptors we can handle are server.maxclients + RESERVED_FDS + + * of file descriptors we can handle are g_pserver->maxclients + RESERVED_FDS + * a few more to stay safe. Since RESERVED_FDS defaults to 32, we add 96 * in order to make sure of not over provisioning more than 128 fds. */ #define CONFIG_FDSET_INCR (CONFIG_MIN_RESERVED_FDS+96) @@ -329,7 +329,7 @@ class robj_roptr #define CLIENT_DIRTY_CAS (1<<5) /* Watched keys modified. EXEC will fail. */ #define CLIENT_CLOSE_AFTER_REPLY (1<<6) /* Close after writing entire reply. */ #define CLIENT_UNBLOCKED (1<<7) /* This client was unblocked and is stored in - server.unblocked_clients */ + g_pserver->unblocked_clients */ #define CLIENT_LUA (1<<8) /* This is a non connected client used by Lua */ #define CLIENT_ASKING (1<<9) /* Client issued the ASKING command */ #define CLIENT_CLOSE_ASAP (1<<10)/* Close this client ASAP */ @@ -379,7 +379,7 @@ class robj_roptr buffer configuration. Just the first three: normal, slave, pubsub. */ -/* Slave replication state. Used in server.repl_state for slaves to remember +/* Slave replication state. Used in g_pserver->repl_state for slaves to remember * what to do next. */ #define REPL_STATE_NONE 0 /* No active replication */ #define REPL_STATE_CONNECT 1 /* Must connect to master */ @@ -544,12 +544,12 @@ class robj_roptr #define NOTIFY_ALL (NOTIFY_GENERIC | NOTIFY_STRING | NOTIFY_LIST | NOTIFY_SET | NOTIFY_HASH | NOTIFY_ZSET | NOTIFY_EXPIRED | NOTIFY_EVICTED | NOTIFY_STREAM | NOTIFY_KEY_MISS) /* A flag */ /* Get the first bind addr or NULL */ -#define NET_FIRST_BIND_ADDR (server.bindaddr_count ? server.bindaddr[0] : NULL) +#define NET_FIRST_BIND_ADDR (g_pserver->bindaddr_count ? g_pserver->bindaddr[0] : NULL) /* Using the following macro you can run code inside serverCron() with the * specified period, specified in milliseconds. - * The actual resolution depends on server.hz. */ -#define run_with_period(_ms_) if ((_ms_ <= 1000/server.hz) || !(server.cronloops%((_ms_)/(1000/server.hz)))) + * The actual resolution depends on g_pserver->hz. */ +#define run_with_period(_ms_) if ((_ms_ <= 1000/g_pserver->hz) || !(g_pserver->cronloops%((_ms_)/(1000/g_pserver->hz)))) /* We can print the stacktrace, so our assert is defined this way: */ #define serverAssertWithInfo(_c,_o,_e) ((_e)?(void)0 : (_serverAssertWithInfo(_c,_o,#_e,__FILE__,__LINE__),_exit(1))) @@ -709,10 +709,8 @@ typedef struct redisObject { unsigned lru:LRU_BITS; /* LRU time (relative to global lru_clock) or * LFU data (least significant 8 bits frequency * and most significant 16 bits access time). */ -#ifdef ENABLE_MVCC - uint64_t mvcc_tstamp; -#endif mutable int refcount; + uint64_t mvcc_tstamp; void *m_ptr; } robj; @@ -829,17 +827,17 @@ typedef struct blockingState { handled in module.c. */ } blockingState; -/* The following structure represents a node in the server.ready_keys list, +/* The following structure represents a node in the g_pserver->ready_keys list, * where we accumulate all the keys that had clients blocked with a blocking * operation such as B[LR]POP, but received new data in the context of the * last executed command. * * After the execution of every command or script, we run this list to check * if as a result we should serve data to clients blocked, unblocking them. - * Note that server.ready_keys will not have duplicates as there dictionary + * Note that g_pserver->ready_keys will not have duplicates as there dictionary * also called ready_keys in every structure representing a Redis database, * where we make sure to remember if a given key was already added in the - * server.ready_keys list. */ + * g_pserver->ready_keys list. */ typedef struct readyList { redisDb *db; robj *key; @@ -1099,7 +1097,7 @@ struct redisMemOverhead { * top-level master. */ typedef struct rdbSaveInfo { /* Used saving and loading. */ - int repl_stream_db; /* DB to select in server.master client. */ + int repl_stream_db; /* DB to select in g_pserver->master client. */ /* Used only loading. */ int repl_id_is_set; /* True if repl_id field is set. */ @@ -1146,6 +1144,10 @@ struct redisServerThreadVars { list *unblocked_clients; /* list of clients to unblock before next loop NOT THREADSAFE */ list *clients_pending_asyncwrite; int cclients; + client *current_client; /* Current client */ + int module_blocked_pipe[2]; /* Pipe used to awake the event loop if a + client blocked on a module command needs + to be processed. */ struct fastlock lockPendingWrite; }; @@ -1173,15 +1175,56 @@ struct redisMaster { time_t repl_down_since; /* Unix time at which link with master went down */ unsigned char master_uuid[UUID_BINARY_LEN]; /* Used during sync with master, this is our master's UUID */ - /* After we've connected with our master use the UUID in server.master */ + /* After we've connected with our master use the UUID in g_pserver->master */ }; -struct redisServer { - /* General */ +// Const vars are not changed after worker threads are launched +struct redisServerConst { pid_t pid; /* Main process pid. */ + time_t stat_starttime; /* Server start time */ char *configfile; /* Absolute config file path, or NULL */ char *executable; /* Absolute executable file path. */ char **exec_argv; /* Executable argv vector (copy). */ + + int cthreads; /* Number of main worker threads */ + int fThreadAffinity; /* Should we pin threads to cores? */ + char *pidfile; /* PID file path */ + + /* Fast pointers to often looked up command */ + struct redisCommand *delCommand, *multiCommand, *lpushCommand, + *lpopCommand, *rpopCommand, *zpopminCommand, + *zpopmaxCommand, *sremCommand, *execCommand, + *expireCommand, *pexpireCommand, *xclaimCommand, + *xgroupCommand, *rreplayCommand; + + /* Configuration */ + char *default_masteruser; /* AUTH with this user and masterauth with master */ + char *default_masterauth; /* AUTH with this password with master */ + int verbosity; /* Loglevel in redis.conf */ + int maxidletime; /* Client timeout in seconds */ + int tcpkeepalive; /* Set SO_KEEPALIVE if non-zero. */ + int active_defrag_enabled; + size_t active_defrag_ignore_bytes; /* minimum amount of fragmentation waste to start active defrag */ + int active_defrag_threshold_lower; /* minimum percentage of fragmentation to start active defrag */ + int active_defrag_threshold_upper; /* maximum percentage of fragmentation at which we use maximum effort */ + int active_defrag_cycle_min; /* minimal effort for defrag in CPU percentage */ + int active_defrag_cycle_max; /* maximal effort for defrag in CPU percentage */ + unsigned long active_defrag_max_scan_fields; /* maximum number of fields of set/hash/zset/list to process from within the main dict scan */ + size_t client_max_querybuf_len; /* Limit for client query buffer length */ + int dbnum; /* Total number of configured DBs */ + int supervised; /* 1 if supervised, 0 otherwise. */ + int supervised_mode; /* See SUPERVISED_* */ + int daemonize; /* True if running as a daemon */ + clientBufferLimitsConfig client_obuf_limits[CLIENT_TYPE_OBUF_COUNT]; + + /* System hardware info */ + size_t system_memory_size; /* Total memory in system as reported by OS */ + + unsigned char uuid[UUID_BINARY_LEN]; /* This server's UUID - populated on boot */ +}; + +struct redisServer { + /* General */ int dynamic_hz; /* Change hz value depending on # of clients. */ int config_hz; /* Configured HZ value. May be different than the actual 'hz' field value if dynamic-hz @@ -1191,16 +1234,12 @@ struct redisServer { dict *commands; /* Command table */ dict *orig_commands; /* Command table before command renaming. */ - int cthreads; /* Number of main worker threads */ - int fThreadAffinity; /* Should we pin threads to cores? */ struct redisServerThreadVars rgthreadvar[MAX_EVENT_LOOPS]; unsigned int lruclock; /* Clock for LRU eviction */ int shutdown_asap; /* SHUTDOWN needed ASAP */ int activerehashing; /* Incremental rehash in serverCron() */ int active_defrag_running; /* Active defragmentation running (holds current scan aggressiveness) */ - char *pidfile; /* PID file path */ - int arch_bits; /* 32 or 64 depending on sizeof(long) */ int cronloops; /* Number of times the cron function run */ char runid[CONFIG_RUN_ID_SIZE+1]; /* ID always different at every exec. */ int sentinel_mode; /* True if this instance is a Sentinel. */ @@ -1211,14 +1250,11 @@ struct redisServer { dict *sharedapi; /* Like moduleapi but containing the APIs that modules share with each other. */ list *loadmodule_queue; /* List of modules to load at startup. */ - int module_blocked_pipe[2]; /* Pipe used to awake the event loop if a - client blocked on a module command needs - to be processed. */ /* Networking */ int port; /* TCP listening port */ int tcp_backlog; /* TCP listen() backlog */ char *bindaddr[CONFIG_BINDADDR_MAX]; /* Addresses we should bind to */ - int bindaddr_count; /* Number of addresses in server.bindaddr[] */ + int bindaddr_count; /* Number of addresses in g_pserver->bindaddr[] */ char *unixsocket; /* UNIX socket path */ mode_t unixsocketperm; /* UNIX socket permission */ int sofd; /* Unix socket file descriptor */ @@ -1227,7 +1263,6 @@ struct redisServer { list *clients; /* List of active clients */ list *clients_to_close; /* Clients to close asynchronously */ list *slaves, *monitors; /* List of slaves and MONITORs */ - client *current_client; /* Current client, only used on crash report */ rax *clients_index; /* Active clients dictionary by client ID. */ int clients_paused; /* True if clients are currently paused */ mstime_t clients_pause_end_time; /* Time when we undo clients_paused */ @@ -1241,14 +1276,10 @@ struct redisServer { off_t loading_loaded_bytes; time_t loading_start_time; off_t loading_process_events_interval_bytes; - /* Fast pointers to often looked up command */ - struct redisCommand *delCommand, *multiCommand, *lpushCommand, - *lpopCommand, *rpopCommand, *zpopminCommand, - *zpopmaxCommand, *sremCommand, *execCommand, - *expireCommand, *pexpireCommand, *xclaimCommand, - *xgroupCommand, *rreplayCommand; + + int active_expire_enabled; /* Can be disabled for testing purposes. */ + /* Fields used only for stats */ - time_t stat_starttime; /* Server start time */ long long stat_numcommands; /* Number of processed commands */ long long stat_numconnections; /* Number of connections received */ long long stat_expiredkeys; /* Number of expired keys */ @@ -1286,26 +1317,7 @@ struct redisServer { long long samples[STATS_METRIC_SAMPLES]; int idx; } inst_metric[STATS_METRIC_COUNT]; - /* Configuration */ - char *default_masteruser; /* AUTH with this user and masterauth with master */ - char *default_masterauth; /* AUTH with this password with master */ - int verbosity; /* Loglevel in redis.conf */ - int maxidletime; /* Client timeout in seconds */ - int tcpkeepalive; /* Set SO_KEEPALIVE if non-zero. */ - int active_expire_enabled; /* Can be disabled for testing purposes. */ - int active_defrag_enabled; - size_t active_defrag_ignore_bytes; /* minimum amount of fragmentation waste to start active defrag */ - int active_defrag_threshold_lower; /* minimum percentage of fragmentation to start active defrag */ - int active_defrag_threshold_upper; /* maximum percentage of fragmentation at which we use maximum effort */ - int active_defrag_cycle_min; /* minimal effort for defrag in CPU percentage */ - int active_defrag_cycle_max; /* maximal effort for defrag in CPU percentage */ - unsigned long active_defrag_max_scan_fields; /* maximum number of fields of set/hash/zset/list to process from within the main dict scan */ - size_t client_max_querybuf_len; /* Limit for client query buffer length */ - int dbnum; /* Total number of configured DBs */ - int supervised; /* 1 if supervised, 0 otherwise. */ - int supervised_mode; /* See SUPERVISED_* */ - int daemonize; /* True if running as a daemon */ - clientBufferLimitsConfig client_obuf_limits[CLIENT_TYPE_OBUF_COUNT]; + /* AOF persistence */ int aof_state; /* AOF_(ON|OFF|WAIT_REWRITE) */ int aof_fsync; /* Kind of fsync() policy */ @@ -1395,7 +1407,7 @@ struct redisServer { time_t repl_backlog_time_limit; /* Time without slaves after the backlog gets released. */ time_t repl_no_slaves_since; /* We have no slaves since that time. - Only valid if server.slaves len is 0. */ + Only valid if g_pserver->slaves len is 0. */ int repl_min_slaves_to_write; /* Min number of slaves to write. */ int repl_min_slaves_max_lag; /* Max lag of slaves to write. */ int repl_good_slaves_count; /* Number of slaves with lag <= max_lag. */ @@ -1513,8 +1525,6 @@ struct redisServer { int assert_line; int bug_report_start; /* True if bug report header was already logged. */ int watchdog_period; /* Software watchdog period in ms. 0 = off */ - /* System hardware info */ - size_t system_memory_size; /* Total memory in system as reported by OS */ /* Mutexes used to protect atomic variables when atomic builtins are * not available. */ @@ -1523,9 +1533,13 @@ struct redisServer { pthread_mutex_t unixtime_mutex; int fActiveReplica; /* Can this replica also be a master? */ - unsigned char uuid[UUID_BINARY_LEN]; /* This server's UUID - populated on boot */ struct fastlock flock; + + // Format: + // Lower 20 bits: a counter incrementing for each command executed in the same millisecond + // Upper 44 bits: mstime (least significant 44-bits) enough for ~500 years before rollover from date of addition + uint64_t mvcc_tstamp; }; typedef struct pubsubPattern { @@ -1619,7 +1633,9 @@ typedef struct { * Extern declarations *----------------------------------------------------------------------------*/ -extern struct redisServer server; +//extern struct redisServer server; +extern redisServer *g_pserver; +extern struct redisServerConst cserver; extern __thread struct redisServerThreadVars *serverTL; // thread local server vars extern struct sharedObjectsStruct shared; extern dictType objectKeyPointerValueDictType; @@ -1649,7 +1665,7 @@ moduleType *moduleTypeLookupModuleByID(uint64_t id); void moduleTypeNameByID(char *name, uint64_t moduleid); void moduleFreeContext(struct RedisModuleCtx *ctx); void unblockClientFromModule(client *c); -void moduleHandleBlockedClients(void); +void moduleHandleBlockedClients(int iel); void moduleBlockedClientTimedOut(client *c); void moduleBlockedClientPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask); size_t moduleCount(void); @@ -2136,6 +2152,7 @@ void objectSetLRUOrLFU(robj *val, long long lfu_freq, long long lru_idle, long long lru_clock); #define LOOKUP_NONE 0 #define LOOKUP_NOTOUCH (1<<0) +#define LOOKUP_UPDATEMVCC (1<<1) void dbAdd(redisDb *db, robj *key, robj *val); void dbOverwrite(redisDb *db, robj *key, robj *val); int dbMerge(redisDb *db, robj *key, robj *val, int fReplace); @@ -2447,14 +2464,17 @@ struct redisMaster *MasterInfoFromClient(client *c); /* MVCC */ uint64_t getMvccTstamp(); - -#if defined(__GNUC__) -#ifndef __cplusplus -void *calloc(size_t count, size_t size) __attribute__ ((deprecated)); -void free(void *ptr) __attribute__ ((deprecated)); -void *malloc(size_t size) __attribute__ ((deprecated)); -void *realloc(void *ptr, size_t size) __attribute__ ((deprecated)); -#endif +void incrementMvccTstamp(); + +#if defined(__GNUC__) && !defined(NO_DEPRECATE_FREE) + [[deprecated]] +void *calloc(size_t count, size_t size); + [[deprecated]] +void free(void *ptr); + [[deprecated]] +void *malloc(size_t size); + [[deprecated]] +void *realloc(void *ptr, size_t size); #endif /* Debugging stuff */ @@ -2483,18 +2503,18 @@ static inline int GlobalLocksAcquired(void) // Used in asserts to verify all gl inline int ielFromEventLoop(const aeEventLoop *eventLoop) { int iel = 0; - for (; iel < server.cthreads; ++iel) + for (; iel < cserver.cthreads; ++iel) { - if (server.rgthreadvar[iel].el == eventLoop) + if (g_pserver->rgthreadvar[iel].el == eventLoop) break; } - serverAssert(iel < server.cthreads); + serverAssert(iel < cserver.cthreads); return iel; } inline int FCorrectThread(client *c) { - return (serverTL != NULL && (server.rgthreadvar[c->iel].el == serverTL->el)) + return (serverTL != NULL && (g_pserver->rgthreadvar[c->iel].el == serverTL->el)) || (c->iel == IDX_EVENT_LOOP_MAIN && moduleGILAcquiredByModule()) || (c->fd == -1); } diff --git a/src/slowlog.cpp b/src/slowlog.cpp index 6a2e0338a..4f338b341 100644 --- a/src/slowlog.cpp +++ b/src/slowlog.cpp @@ -87,7 +87,7 @@ slowlogEntry *slowlogCreateEntry(client *c, robj **argv, int argc, long long dur } se->time = time(NULL); se->duration = duration; - se->id = server.slowlog_entry_id++; + se->id = g_pserver->slowlog_entry_id++; se->peerid = sdsnew(getClientPeerId(c)); se->cname = c->name ? sdsnew(szFromObj(c->name)) : sdsempty(); return se; @@ -112,29 +112,29 @@ void slowlogFreeEntry(const void *septr) { /* Initialize the slow log. This function should be called a single time * at server startup. */ void slowlogInit(void) { - server.slowlog = listCreate(); - server.slowlog_entry_id = 0; - listSetFreeMethod(server.slowlog,slowlogFreeEntry); + g_pserver->slowlog = listCreate(); + g_pserver->slowlog_entry_id = 0; + listSetFreeMethod(g_pserver->slowlog,slowlogFreeEntry); } /* Push a new entry into the slow log. * This function will make sure to trim the slow log accordingly to the * configured max length. */ void slowlogPushEntryIfNeeded(client *c, robj **argv, int argc, long long duration) { - if (server.slowlog_log_slower_than < 0) return; /* Slowlog disabled */ - if (duration >= server.slowlog_log_slower_than) - listAddNodeHead(server.slowlog, + if (g_pserver->slowlog_log_slower_than < 0) return; /* Slowlog disabled */ + if (duration >= g_pserver->slowlog_log_slower_than) + listAddNodeHead(g_pserver->slowlog, slowlogCreateEntry(c,argv,argc,duration)); /* Remove old entries if needed. */ - while (listLength(server.slowlog) > server.slowlog_max_len) - listDelNode(server.slowlog,listLast(server.slowlog)); + while (listLength(g_pserver->slowlog) > g_pserver->slowlog_max_len) + listDelNode(g_pserver->slowlog,listLast(g_pserver->slowlog)); } /* Remove all the entries from the current slow log. */ void slowlogReset(void) { - while (listLength(server.slowlog) > 0) - listDelNode(server.slowlog,listLast(server.slowlog)); + while (listLength(g_pserver->slowlog) > 0) + listDelNode(g_pserver->slowlog,listLast(g_pserver->slowlog)); } /* The SLOWLOG command. Implements all the subcommands needed to handle the @@ -154,7 +154,7 @@ NULL slowlogReset(); addReply(c,shared.ok); } else if (c->argc == 2 && !strcasecmp(szFromObj(c->argv[1]),"len")) { - addReplyLongLong(c,listLength(server.slowlog)); + addReplyLongLong(c,listLength(g_pserver->slowlog)); } else if ((c->argc == 2 || c->argc == 3) && !strcasecmp(szFromObj(c->argv[1]),"get")) { @@ -168,7 +168,7 @@ NULL getLongFromObjectOrReply(c,c->argv[2],&count,NULL) != C_OK) return; - listRewind(server.slowlog,&li); + listRewind(g_pserver->slowlog,&li); totentries = addReplyDeferredLen(c); while(count-- && (ln = listNext(&li))) { int j; diff --git a/src/sort.cpp b/src/sort.cpp index c14f81261..6b517b25a 100644 --- a/src/sort.cpp +++ b/src/sort.cpp @@ -143,7 +143,7 @@ int sortCompare(const void *s1, const void *s2) { const redisSortObject *so1 = (redisSortObject*)s1, *so2 = (redisSortObject*)s2; int cmp; - if (!server.sort_alpha) { + if (!g_pserver->sort_alpha) { /* Numeric sorting. Here it's trivial as we precomputed scores */ if (so1->u.score > so2->u.score) { cmp = 1; @@ -157,7 +157,7 @@ int sortCompare(const void *s1, const void *s2) { } } else { /* Alphanumeric sorting */ - if (server.sort_bypattern) { + if (g_pserver->sort_bypattern) { if (!so1->u.cmpobj || !so2->u.cmpobj) { /* At least one compare object is NULL */ if (so1->u.cmpobj == so2->u.cmpobj) @@ -168,7 +168,7 @@ int sortCompare(const void *s1, const void *s2) { cmp = 1; } else { /* We have both the objects, compare them. */ - if (server.sort_store) { + if (g_pserver->sort_store) { cmp = compareStringObjects(so1->u.cmpobj,so2->u.cmpobj); } else { /* Here we can use strcoll() directly as we are sure that @@ -178,14 +178,14 @@ int sortCompare(const void *s1, const void *s2) { } } else { /* Compare elements directly. */ - if (server.sort_store) { + if (g_pserver->sort_store) { cmp = compareStringObjects(so1->obj,so2->obj); } else { cmp = collateStringObjects(so1->obj,so2->obj); } } } - return server.sort_desc ? -cmp : cmp; + return g_pserver->sort_desc ? -cmp : cmp; } /* The SORT command is the most complex command in Redis. Warning: this code @@ -239,7 +239,7 @@ void sortCommand(client *c) { } else { /* If BY is specified with a real patter, we can't accept * it in cluster mode. */ - if (server.cluster_enabled) { + if (g_pserver->cluster_enabled) { addReplyError(c,"BY option of SORT denied in Cluster mode."); syntax_error++; break; @@ -247,7 +247,7 @@ void sortCommand(client *c) { } j++; } else if (!strcasecmp(szFromObj(c->argv[j]),"get") && leftargs >= 1) { - if (server.cluster_enabled) { + if (g_pserver->cluster_enabled) { addReplyError(c,"GET option of SORT denied in Cluster mode."); syntax_error++; break; @@ -496,10 +496,10 @@ void sortCommand(client *c) { } } - server.sort_desc = desc; - server.sort_alpha = alpha; - server.sort_bypattern = sortby ? 1 : 0; - server.sort_store = storekey ? 1 : 0; + g_pserver->sort_desc = desc; + g_pserver->sort_alpha = alpha; + g_pserver->sort_bypattern = sortby ? 1 : 0; + g_pserver->sort_store = storekey ? 1 : 0; if (sortby && (start != 0 || end != vectorlen-1)) pqsort(vector,vectorlen,sizeof(redisSortObject),sortCompare, start,end); else @@ -574,11 +574,11 @@ void sortCommand(client *c) { setKey(c->db,storekey,sobj); notifyKeyspaceEvent(NOTIFY_LIST,"sortstore",storekey, c->db->id); - server.dirty += outputlen; + g_pserver->dirty += outputlen; } else if (dbDelete(c->db,storekey)) { signalModifiedKey(c->db,storekey); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",storekey,c->db->id); - server.dirty++; + g_pserver->dirty++; } decrRefCount(sobj); addReplyLongLong(c,outputlen); diff --git a/src/t_hash.cpp b/src/t_hash.cpp index f249ec049..a7d35a926 100644 --- a/src/t_hash.cpp +++ b/src/t_hash.cpp @@ -44,7 +44,7 @@ void hashTypeTryConversion(robj *o, robj **argv, int start, int end) { for (i = start; i <= end; i++) { if (sdsEncodedObject(argv[i]) && - sdslen(szFromObj(argv[i])) > server.hash_max_ziplist_value) + sdslen(szFromObj(argv[i])) > g_pserver->hash_max_ziplist_value) { hashTypeConvert(o, OBJ_ENCODING_HT); break; @@ -234,7 +234,7 @@ int hashTypeSet(robj *o, sds field, sds value, int flags) { o->m_ptr = zl; /* Check if the ziplist needs to be converted to a hash table */ - if (hashTypeLength(o) > server.hash_max_ziplist_entries) + if (hashTypeLength(o) > g_pserver->hash_max_ziplist_entries) hashTypeConvert(o, OBJ_ENCODING_HT); } else if (o->encoding == OBJ_ENCODING_HT) { dictEntry *de = dictFind((dict*)ptrFromObj(o),field); @@ -523,7 +523,7 @@ void hsetnxCommand(client *c) { addReply(c, shared.cone); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; } } @@ -553,7 +553,7 @@ void hsetCommand(client *c) { } signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hset",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; } void hincrbyCommand(client *c) { @@ -588,7 +588,7 @@ void hincrbyCommand(client *c) { addReplyLongLong(c,value); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hincrby",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; } void hincrbyfloatCommand(client *c) { @@ -627,7 +627,7 @@ void hincrbyfloatCommand(client *c) { addReplyBulkCBuffer(c,buf,len); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_HASH,"hincrbyfloat",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; /* Always replicate HINCRBYFLOAT as an HSET command with the final value * in order to make sure that differences in float pricision or formatting @@ -726,7 +726,7 @@ void hdelCommand(client *c) { if (keyremoved) notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1], c->db->id); - server.dirty += deleted; + g_pserver->dirty += deleted; } addReplyLongLong(c,deleted); } diff --git a/src/t_list.cpp b/src/t_list.cpp index 4e92a9907..a65aea8ad 100644 --- a/src/t_list.cpp +++ b/src/t_list.cpp @@ -181,8 +181,8 @@ void listTypeConvert(robj *subject, int enc) { serverAssertWithInfo(NULL,subject,subject->encoding==OBJ_ENCODING_ZIPLIST); if (enc == OBJ_ENCODING_QUICKLIST) { - size_t zlen = server.list_max_ziplist_size; - int depth = server.list_compress_depth; + size_t zlen = g_pserver->list_max_ziplist_size; + int depth = g_pserver->list_compress_depth; subject->m_ptr = quicklistCreateFromZiplist(zlen, depth, (unsigned char*)ptrFromObj(subject)); subject->encoding = OBJ_ENCODING_QUICKLIST; } else { @@ -206,8 +206,8 @@ void pushGenericCommand(client *c, int where) { for (j = 2; j < c->argc; j++) { if (!lobj) { lobj = createQuicklistObject(); - quicklistSetOptions((quicklist*)ptrFromObj(lobj), server.list_max_ziplist_size, - server.list_compress_depth); + quicklistSetOptions((quicklist*)ptrFromObj(lobj), g_pserver->list_max_ziplist_size, + g_pserver->list_compress_depth); dbAdd(c->db,c->argv[1],lobj); } listTypePush(lobj,c->argv[j],where); @@ -220,7 +220,7 @@ void pushGenericCommand(client *c, int where) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_LIST,event,c->argv[1],c->db->id); } - server.dirty += pushed; + g_pserver->dirty += pushed; } void lpushCommand(client *c) { @@ -250,7 +250,7 @@ void pushxGenericCommand(client *c, int where) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_LIST,event,c->argv[1],c->db->id); } - server.dirty += pushed; + g_pserver->dirty += pushed; } void lpushxCommand(client *c) { @@ -295,7 +295,7 @@ void linsertCommand(client *c) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_LIST,"linsert", c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; } else { /* Notify client of a failed insert */ addReplyLongLong(c,-1); @@ -357,7 +357,7 @@ void lsetCommand(client *c) { addReply(c,shared.ok); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_LIST,"lset",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; } } else { serverPanic("Unknown list encoding"); @@ -383,7 +383,7 @@ void popGenericCommand(client *c, int where) { dbDelete(c->db,c->argv[1]); } signalModifiedKey(c->db,c->argv[1]); - server.dirty++; + g_pserver->dirty++; } } @@ -483,7 +483,7 @@ void ltrimCommand(client *c) { notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1],c->db->id); } signalModifiedKey(c->db,c->argv[1]); - server.dirty++; + g_pserver->dirty++; addReply(c,shared.ok); } @@ -511,7 +511,7 @@ void lremCommand(client *c) { while (listTypeNext(li,&entry)) { if (listTypeEqual(&entry,obj)) { listTypeDelete(li, &entry); - server.dirty++; + g_pserver->dirty++; removed++; if (toremove && removed == toremove) break; } @@ -551,8 +551,8 @@ static void rpoplpushHandlePush(client *c, robj *dstkey, robj *dstobj, robj *val /* Create the list if the key does not exist */ if (!dstobj) { dstobj = createQuicklistObject(); - quicklistSetOptions((quicklist*)ptrFromObj(dstobj), server.list_max_ziplist_size, - server.list_compress_depth); + quicklistSetOptions((quicklist*)ptrFromObj(dstobj), g_pserver->list_max_ziplist_size, + g_pserver->list_compress_depth); dbAdd(c->db,dstkey,dstobj); } signalModifiedKey(c->db,dstkey); @@ -595,7 +595,7 @@ void rpoplpushCommand(client *c) { } signalModifiedKey(c->db,touchedkey); decrRefCount(touchedkey); - server.dirty++; + g_pserver->dirty++; if (c->cmd->proc == brpoplpushCommand) { rewriteClientCommandVector(c,3,shared.rpoplpush,c->argv[1],c->argv[2]); } @@ -636,7 +636,7 @@ int serveClientBlockedOnList(client *receiver, robj *key, robj *dstkey, redisDb shared.rpop; argv[1] = key; propagate((where == LIST_HEAD) ? - server.lpopCommand : server.rpopCommand, + cserver.lpopCommand : cserver.rpopCommand, db->id,argv,2,PROPAGATE_AOF|PROPAGATE_REPL); /* BRPOP/BLPOP */ @@ -659,7 +659,7 @@ int serveClientBlockedOnList(client *receiver, robj *key, robj *dstkey, redisDb /* Propagate the RPOP operation. */ argv[0] = shared.rpop; argv[1] = key; - propagate(server.rpopCommand, + propagate(cserver.rpopCommand, db->id,argv,2, PROPAGATE_AOF| PROPAGATE_REPL); @@ -669,7 +669,7 @@ int serveClientBlockedOnList(client *receiver, robj *key, robj *dstkey, redisDb argv[0] = shared.lpush; argv[1] = dstkey; argv[2] = value; - propagate(server.lpushCommand, + propagate(cserver.lpushCommand, db->id,argv,3, PROPAGATE_AOF| PROPAGATE_REPL); @@ -721,7 +721,7 @@ void blockingPopGenericCommand(client *c, int where) { c->argv[j],c->db->id); } signalModifiedKey(c->db,c->argv[j]); - server.dirty++; + g_pserver->dirty++; /* Replicate it as an [LR]POP instead of B[LR]POP. */ rewriteClientCommandVector(c,2, diff --git a/src/t_set.cpp b/src/t_set.cpp index 9c281f6b6..0c97c8d79 100644 --- a/src/t_set.cpp +++ b/src/t_set.cpp @@ -66,7 +66,7 @@ int setTypeAdd(robj *subject, const char *value) { if (success) { /* Convert to regular set when the intset contains * too many entries. */ - if (intsetLen((intset*)subject->m_ptr) > server.set_max_intset_entries) + if (intsetLen((intset*)subject->m_ptr) > g_pserver->set_max_intset_entries) setTypeConvert(subject,OBJ_ENCODING_HT); return 1; } @@ -288,7 +288,7 @@ void saddCommand(client *c) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[1],c->db->id); } - server.dirty += added; + g_pserver->dirty += added; addReplyLongLong(c,added); } @@ -315,7 +315,7 @@ void sremCommand(client *c) { if (keyremoved) notifyKeyspaceEvent(NOTIFY_GENERIC,"del",c->argv[1], c->db->id); - server.dirty += deleted; + g_pserver->dirty += deleted; } addReplyLongLong(c,deleted); } @@ -365,11 +365,11 @@ void smoveCommand(client *c) { signalModifiedKey(c->db,c->argv[1]); signalModifiedKey(c->db,c->argv[2]); - server.dirty++; + g_pserver->dirty++; /* An extra key has changed when ele was successfully added to dstset */ if (setTypeAdd(dstset,szFromObj(ele))) { - server.dirty++; + g_pserver->dirty++; notifyKeyspaceEvent(NOTIFY_SET,"sadd",c->argv[2],c->db->id); } addReply(c,shared.cone); @@ -434,7 +434,7 @@ void spopWithCountCommand(client *c) { /* Generate an SPOP keyspace notification */ notifyKeyspaceEvent(NOTIFY_SET,"spop",c->argv[1],c->db->id); - server.dirty += count; + g_pserver->dirty += count; /* CASE 1: * The number of requested elements is greater than or equal to @@ -450,7 +450,7 @@ void spopWithCountCommand(client *c) { /* Propagate this command as an DEL operation */ rewriteClientCommandVector(c,2,shared.del,c->argv[1]); signalModifiedKey(c->db,c->argv[1]); - server.dirty++; + g_pserver->dirty++; return; } @@ -492,7 +492,7 @@ void spopWithCountCommand(client *c) { /* Replicate/AOF this command as an SREM operation */ propargv[2] = objele; - alsoPropagate(server.sremCommand,c->db->id,propargv,3, + alsoPropagate(cserver.sremCommand,c->db->id,propargv,3, PROPAGATE_AOF|PROPAGATE_REPL); decrRefCount(objele); } @@ -535,7 +535,7 @@ void spopWithCountCommand(client *c) { /* Replicate/AOF this command as an SREM operation */ propargv[2] = objele; - alsoPropagate(server.sremCommand,c->db->id,propargv,3, + alsoPropagate(cserver.sremCommand,c->db->id,propargv,3, PROPAGATE_AOF|PROPAGATE_REPL); decrRefCount(objele); } @@ -552,7 +552,7 @@ void spopWithCountCommand(client *c) { decrRefCount(propargv[0]); preventCommandPropagation(c); signalModifiedKey(c->db,c->argv[1]); - server.dirty++; + g_pserver->dirty++; } void spopCommand(client *c) { @@ -605,7 +605,7 @@ void spopCommand(client *c) { /* Set has been modified */ signalModifiedKey(c->db,c->argv[1]); - server.dirty++; + g_pserver->dirty++; } /* handle the "SRANDMEMBER key " variant. The normal version of the @@ -814,7 +814,7 @@ void sinterGenericCommand(client *c, robj **setkeys, if (dstkey) { if (dbDelete(c->db,dstkey)) { signalModifiedKey(c->db,dstkey); - server.dirty++; + g_pserver->dirty++; } addReply(c,shared.czero); } else { @@ -917,7 +917,7 @@ void sinterGenericCommand(client *c, robj **setkeys, dstkey,c->db->id); } signalModifiedKey(c->db,dstkey); - server.dirty++; + g_pserver->dirty++; } else { setDeferredSetLen(c,replylen,cardinality); } @@ -1072,7 +1072,7 @@ void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum, sdsfree(ele); } setTypeReleaseIterator(si); - server.lazyfree_lazy_server_del ? freeObjAsync(dstset) : + g_pserver->lazyfree_lazy_server_del ? freeObjAsync(dstset) : decrRefCount(dstset); } else { /* If we have a target key where to store the resulting set @@ -1092,7 +1092,7 @@ void sunionDiffGenericCommand(client *c, robj **setkeys, int setnum, dstkey,c->db->id); } signalModifiedKey(c->db,dstkey); - server.dirty++; + g_pserver->dirty++; } zfree(sets); } diff --git a/src/t_stream.cpp b/src/t_stream.cpp index 994a7e109..d67dd3f39 100644 --- a/src/t_stream.cpp +++ b/src/t_stream.cpp @@ -241,18 +241,18 @@ int streamAppendItem(stream *s, robj **argv, int64_t numfields, streamID *added_ * if we need to switch to the next one. 'lp' will be set to NULL if * the current node is full. */ if (lp != NULL) { - if (server.stream_node_max_bytes && - lp_bytes > server.stream_node_max_bytes) + if (g_pserver->stream_node_max_bytes && + lp_bytes > g_pserver->stream_node_max_bytes) { lp = NULL; - } else if (server.stream_node_max_entries) { + } else if (g_pserver->stream_node_max_entries) { int64_t count = lpGetInteger(lpFirst(lp)); - if (count > server.stream_node_max_entries) lp = NULL; + if (count > g_pserver->stream_node_max_entries) lp = NULL; } } int flags = STREAM_ITEM_FLAG_NONE; - if (lp == NULL || lp_bytes > server.stream_node_max_bytes) { + if (lp == NULL || lp_bytes > g_pserver->stream_node_max_bytes) { master_id = id; streamEncodeID(rax_key,&id); /* Create the listpack having the master entry ID and fields. */ @@ -819,7 +819,7 @@ void streamPropagateXCLAIM(client *c, robj *key, streamCG *group, robj *groupnam argv[11] = createStringObject("JUSTID",6); argv[12] = createStringObject("LASTID",6); argv[13] = createObjectFromStreamID(&group->last_id); - propagate(server.xclaimCommand,c->db->id,argv,14,PROPAGATE_AOF|PROPAGATE_REPL); + propagate(cserver.xclaimCommand,c->db->id,argv,14,PROPAGATE_AOF|PROPAGATE_REPL); decrRefCount(argv[0]); decrRefCount(argv[3]); decrRefCount(argv[4]); @@ -846,7 +846,7 @@ void streamPropagateGroupID(client *c, robj *key, streamCG *group, robj *groupna argv[2] = key; argv[3] = groupname; argv[4] = createObjectFromStreamID(&group->last_id); - propagate(server.xgroupCommand,c->db->id,argv,5,PROPAGATE_AOF|PROPAGATE_REPL); + propagate(cserver.xgroupCommand,c->db->id,argv,5,PROPAGATE_AOF|PROPAGATE_REPL); decrRefCount(argv[0]); decrRefCount(argv[1]); decrRefCount(argv[4]); @@ -1242,7 +1242,7 @@ void xaddCommand(client *c) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STREAM,"xadd",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; if (maxlen >= 0) { /* Notify xtrim event if needed. */ @@ -1260,7 +1260,7 @@ void xaddCommand(client *c) { /* We need to signal to blocked clients that there is new data on this * stream. */ - if (server.blocked_clients_by_type[BLOCKED_STREAM]) + if (g_pserver->blocked_clients_by_type[BLOCKED_STREAM]) signalKeyAsReady(c->db, c->argv[1]); } @@ -1534,7 +1534,7 @@ void xreadCommand(client *c) { streamReplyWithRange(c,s,&start,NULL,count,0, groups ? groups[i] : NULL, consumer, flags, &spi); - if (groups) server.dirty++; + if (groups) g_pserver->dirty++; } } @@ -1798,7 +1798,7 @@ NULL streamCG *cg = streamCreateCG(s,grpname,sdslen(grpname),&id); if (cg) { addReply(c,shared.ok); - server.dirty++; + g_pserver->dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-create", c->argv[2],c->db->id); } else { @@ -1814,14 +1814,14 @@ NULL } cg->last_id = id; addReply(c,shared.ok); - server.dirty++; + g_pserver->dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-setid",c->argv[2],c->db->id); } else if (!strcasecmp(opt,"DESTROY") && c->argc == 4) { if (cg) { raxRemove(s->cgroups,(unsigned char*)grpname,sdslen(grpname),NULL); streamFreeCG(cg); addReply(c,shared.cone); - server.dirty++; + g_pserver->dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-destroy", c->argv[2],c->db->id); } else { @@ -1832,7 +1832,7 @@ NULL * that were yet associated with such a consumer. */ long long pending = streamDelConsumer(cg,szFromObj(c->argv[4])); addReplyLongLong(c,pending); - server.dirty++; + g_pserver->dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xgroup-delconsumer", c->argv[2],c->db->id); } else if (!strcasecmp(opt,"HELP")) { @@ -1872,7 +1872,7 @@ void xsetidCommand(client *c) { } s->last_id = id; addReply(c,shared.ok); - server.dirty++; + g_pserver->dirty++; notifyKeyspaceEvent(NOTIFY_STREAM,"xsetid",c->argv[1],c->db->id); } @@ -1915,7 +1915,7 @@ void xackCommand(client *c) { raxRemove(nack->consumer->pel,buf,sizeof(buf),NULL); streamFreeNACK(nack); acknowledged++; - server.dirty++; + g_pserver->dirty++; } } addReplyLongLong(c,acknowledged); @@ -2309,12 +2309,12 @@ void xclaimCommand(client *c) { /* Propagate this change. */ streamPropagateXCLAIM(c,c->argv[1],group,c->argv[2],c->argv[j],nack); propagate_last_id = 0; /* Will be propagated by XCLAIM itself. */ - server.dirty++; + g_pserver->dirty++; } } if (propagate_last_id) { streamPropagateGroupID(c,c->argv[1],group,c->argv[2]); - server.dirty++; + g_pserver->dirty++; } setDeferredArrayLen(c,arraylenptr,arraylen); preventCommandPropagation(c); @@ -2352,7 +2352,7 @@ void xdelCommand(client *c) { if (deleted) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STREAM,"xdel",c->argv[1],c->db->id); - server.dirty += deleted; + g_pserver->dirty += deleted; } addReplyLongLong(c,deleted); } @@ -2429,7 +2429,7 @@ void xtrimCommand(client *c) { if (deleted) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STREAM,"xtrim",c->argv[1],c->db->id); - server.dirty += deleted; + g_pserver->dirty += deleted; if (approx_maxlen) streamRewriteApproxMaxlen(c,s,maxlen_arg_idx); } addReplyLongLong(c,deleted); diff --git a/src/t_string.cpp b/src/t_string.cpp index 5066e1ef1..aea74b48d 100644 --- a/src/t_string.cpp +++ b/src/t_string.cpp @@ -84,7 +84,7 @@ void setGenericCommand(client *c, int flags, robj *key, robj *val, robj *expire, return; } setKey(c->db,key,val); - server.dirty++; + g_pserver->dirty++; if (expire) setExpire(c,c->db,key,mstime()+milliseconds); notifyKeyspaceEvent(NOTIFY_STRING,"set",key,c->db->id); if (expire) notifyKeyspaceEvent(NOTIFY_GENERIC, @@ -178,7 +178,7 @@ void getsetCommand(client *c) { c->argv[2] = tryObjectEncoding(c->argv[2]); setKey(c->db,c->argv[1],c->argv[2]); notifyKeyspaceEvent(NOTIFY_STRING,"set",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; } void setrangeCommand(client *c) { @@ -236,7 +236,7 @@ void setrangeCommand(client *c) { signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STRING, "setrange",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; } addReplyLongLong(c,sdslen((sds)ptrFromObj(o))); } @@ -325,7 +325,7 @@ void msetGenericCommand(client *c, int nx) { setKey(c->db,c->argv[j],c->argv[j+1]); notifyKeyspaceEvent(NOTIFY_STRING,"set",c->argv[j],c->db->id); } - server.dirty += (c->argc-1)/2; + g_pserver->dirty += (c->argc-1)/2; addReply(c, nx ? shared.cone : shared.ok); } @@ -369,7 +369,7 @@ void incrDecrCommand(client *c, long long incr) { } signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STRING,"incrby",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; addReply(c,shared.colon); addReply(c,newObj); addReply(c,shared.crlf); @@ -419,7 +419,7 @@ void incrbyfloatCommand(client *c) { dbAdd(c->db,c->argv[1],newObj); signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STRING,"incrbyfloat",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; addReplyBulk(c,newObj); /* Always replicate INCRBYFLOAT as a SET command with the final value @@ -460,7 +460,7 @@ void appendCommand(client *c) { } signalModifiedKey(c->db,c->argv[1]); notifyKeyspaceEvent(NOTIFY_STRING,"append",c->argv[1],c->db->id); - server.dirty++; + g_pserver->dirty++; addReplyLongLong(c,totlen); } diff --git a/src/t_zset.cpp b/src/t_zset.cpp index c222f7390..ec0c764dd 100644 --- a/src/t_zset.cpp +++ b/src/t_zset.cpp @@ -1243,8 +1243,8 @@ void zsetConvertToZiplistIfNeeded(robj *zobj, size_t maxelelen) { if (zobj->encoding == OBJ_ENCODING_ZIPLIST) return; zset *set = (zset*)zobj->m_ptr; - if (set->zsl->length <= server.zset_max_ziplist_entries && - maxelelen <= server.zset_max_ziplist_value) + if (set->zsl->length <= g_pserver->zset_max_ziplist_entries && + maxelelen <= g_pserver->zset_max_ziplist_value) zsetConvert(zobj,OBJ_ENCODING_ZIPLIST); } @@ -1357,9 +1357,9 @@ int zsetAdd(robj *zobj, double score, sds ele, int *flags, double *newscore) { /* Optimize: check if the element is too large or the list * becomes too long *before* executing zzlInsert. */ zobj->m_ptr = zzlInsert((unsigned char*)zobj->m_ptr,ele,score); - if (zzlLength((unsigned char*)zobj->m_ptr) > server.zset_max_ziplist_entries) + if (zzlLength((unsigned char*)zobj->m_ptr) > g_pserver->zset_max_ziplist_entries) zsetConvert(zobj,OBJ_ENCODING_SKIPLIST); - if (sdslen(ele) > server.zset_max_ziplist_value) + if (sdslen(ele) > g_pserver->zset_max_ziplist_value) zsetConvert(zobj,OBJ_ENCODING_SKIPLIST); if (newscore) *newscore = score; *flags |= ZADD_ADDED; @@ -1600,8 +1600,8 @@ void zaddGenericCommand(client *c, int flags) { zobj = lookupKeyWrite(c->db,key); if (zobj == NULL) { if (xx) goto reply_to_client; /* No key + XX option: nothing to do. */ - if (server.zset_max_ziplist_entries == 0 || - server.zset_max_ziplist_value < sdslen(szFromObj(c->argv[scoreidx+1]))) + if (g_pserver->zset_max_ziplist_entries == 0 || + g_pserver->zset_max_ziplist_value < sdslen(szFromObj(c->argv[scoreidx+1]))) { zobj = createZsetObject(); } else { @@ -1631,7 +1631,7 @@ void zaddGenericCommand(client *c, int flags) { if (!(retflags & ZADD_NOP)) processed++; score = newscore; } - server.dirty += (added+updated); + g_pserver->dirty += (added+updated); reply_to_client: if (incr) { /* ZINCRBY or INCR option. */ @@ -1682,7 +1682,7 @@ void zremCommand(client *c) { if (keyremoved) notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id); signalModifiedKey(c->db,key); - server.dirty += deleted; + g_pserver->dirty += deleted; } addReplyLongLong(c,deleted); } @@ -1784,7 +1784,7 @@ void zremrangeGenericCommand(client *c, int rangetype) { if (keyremoved) notifyKeyspaceEvent(NOTIFY_GENERIC,"del",key,c->db->id); } - server.dirty += deleted; + g_pserver->dirty += deleted; addReplyLongLong(c,deleted); cleanup: @@ -2387,14 +2387,14 @@ void zunionInterGenericCommand(client *c, robj *dstkey, int op) { notifyKeyspaceEvent(NOTIFY_ZSET, (op == SET_OP_UNION) ? "zunionstore" : "zinterstore", dstkey,c->db->id); - server.dirty++; + g_pserver->dirty++; } else { decrRefCount(dstobj); addReply(c,shared.czero); if (touched) { signalModifiedKey(c->db,dstkey); notifyKeyspaceEvent(NOTIFY_GENERIC,"del",dstkey,c->db->id); - server.dirty++; + g_pserver->dirty++; } } zfree(src); @@ -3211,7 +3211,7 @@ void genericZpopCommand(client *c, robj **keyv, int keyc, int where, int emitkey } serverAssertWithInfo(c,zobj,zsetDel(zobj,ele)); - server.dirty++; + g_pserver->dirty++; if (arraylen == 0) { /* Do this only for the first iteration. */ const char *events[2] = {"zpopmin","zpopmax"}; diff --git a/src/version.h b/src/version.h index b08b5d0f9..ad9d014f7 100644 --- a/src/version.h +++ b/src/version.h @@ -1 +1,2 @@ -#define REDIS_VERSION "0.9.4-r3" +#define KEYDB_REAL_VERSION "0.9.5" +extern const char *KEYDB_SET_VERSION; // Unlike real version, this can be overriden by the config diff --git a/src/zmalloc.h b/src/zmalloc.h index 3954c8736..6770a48a1 100644 --- a/src/zmalloc.h +++ b/src/zmalloc.h @@ -120,4 +120,8 @@ int zmalloc_test(int argc, char **argv); } #endif +#ifdef __cplusplus +#include "new.h" +#endif + #endif /* __ZMALLOC_H */