大家好,又见面了,我是全栈君,祝每个程序员都可以多学几门语言。
ngx_max_module = 0; for (i = 0; ngx_modules[i]; i++) { ngx_modules[i]->index = ngx_max_module++; }
for (i = 0; ngx_modules[i]; i++) { if (ngx_modules[i]->type != NGX_CORE_MODULE) { continue; } module = ngx_modules[i]->ctx; if (module->create_conf) { rv = module->create_conf(cycle);// if (rv == NULL) { ngx_destroy_pool(pool); return NULL; } cycle->conf_ctx[ngx_modules[i]->index] = rv; } }
ngx_conf_param(&conf)
enum { parse_file = 0, parse_block, parse_param } type;
rc = ngx_conf_read_token(cf); //读取token /* 返回值: * ngx_conf_read_token() may return * * NGX_ERROR there is error * NGX_OK the token terminated by ";" was found * NGX_CONF_BLOCK_START the token terminated by "{" was found * NGX_CONF_BLOCK_DONE the "}" was found * NGX_CONF_FILE_DONE the configuration file is done */ rc = ngx_conf_handler(cf, rc); //处理token
............. cmd = ngx_modules[i]->commands; ......... rv = cmd->set(cf, cmd, conf); ................
struct ngx_module_s { ngx_uint_t ctx_index; ngx_uint_t index;............ void *ctx; ngx_command_t *commands; ngx_uint_t type; .............};
ngx_module_t ngx_events_module = { NGX_MODULE_V1, &ngx_events_module_ctx, /* module context */ ngx_events_commands, /* module directives */ NGX_CORE_MODULE, /* module type */ .................... };
struct ngx_command_s { ngx_str_t name; ngx_uint_t type; char *(*set)(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); ngx_uint_t conf; ngx_uint_t offset; void *post;};
static ngx_command_t ngx_events_commands[] = { { ngx_string("events"), NGX_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS, ngx_events_block, ........ };
//将cmd指向commandcmd = ngx_modules[i]->commands;//回调函数,对每一个模块进行详细配置rv = cmd->set(cf, cmd, conf);
static char * ngx_events_block(ngx_conf_t *cf , ngx_command_t *cmd, void *conf) { char * rv; void *** ctx; ngx_uint_t i; ngx_conf_t pcf; ngx_event_module_t * m; if (*(void **) conf) { return "is duplicate" ; } /* count the number of the event modules and set up their indices */ //初始化全部事件模块的ctx_index ngx_event_max_module = 0; for (i = 0; ngx_modules[i]; i++) { if (ngx_modules[i ]->type != NGX_EVENT_MODULE) { continue; } ngx_modules[ i]->ctx_index = ngx_event_max_module++; } //创建配置须要的结构体空间 ctx = ngx_pcalloc(cf ->pool, sizeof( void *)); if (ctx == NULL) { return NGX_CONF_ERROR; } * ctx = ngx_pcalloc(cf ->pool, ngx_event_max_module * sizeof(void *)); if (*ctx == NULL) { return NGX_CONF_ERROR; } *( void **) conf = ctx; //调用全部事件模块的create_conf for (i = 0; ngx_modules[i]; i++) { if (ngx_modules[i ]->type != NGX_EVENT_MODULE) { continue; } m = ngx_modules[i ]->ctx; if (m ->create_conf) { (* ctx)[ngx_modules[i ]->ctx_index] = m->create_conf (cf-> cycle); if ((*ctx )[ngx_modules[i]-> ctx_index] == NULL ) { return NGX_CONF_ERROR; } } } pcf = *cf ; cf->ctx = ctx; cf->module_type = NGX_EVENT_MODULE; cf->cmd_type = NGX_EVENT_CONF; //为模块event全部模块解析配置 rv = ngx_conf_parse(cf , NULL); * cf = pcf ; if (rv != NGX_CONF_OK) return rv ; //为event全部模块调用init_conf for (i = 0; ngx_modules[i]; i++) { if (ngx_modules[i ]->type != NGX_EVENT_MODULE) { continue; } m = ngx_modules[i ]->ctx; if (m ->init_conf) { rv = m ->init_conf( cf->cycle , (*ctx)[ngx_modules[ i]->ctx_index ]); if (rv != NGX_CONF_OK) { return rv ; } } } return NGX_CONF_OK; }
static ngx_command_t ngx_event_core_commands[] = { //连接池的大小,即每一个worker进程中支持的最大连接数 //他与以下的connections配置项的意义是反复的 { ngx_string("worker_connections"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_connections, 0, 0, NULL },//连接池的大小,与上一项配置反复 { ngx_string("connections"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_connections, 0, 0, NULL },//确定哪一个事件模块作为事件驱动机制 { ngx_string("use"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_use, 0, 0, NULL }, //对于epoll事件驱动模式来说,当接收一个新链接事件时候, //调用accept尽可能多的接收连接 { ngx_string("multi_accept"), NGX_EVENT_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, 0, offsetof(ngx_event_conf_t, multi_accept), NULL }, //确定是否使用负载均衡锁,默认开启 { ngx_string("accept_mutex"), NGX_EVENT_CONF|NGX_CONF_FLAG, ngx_conf_set_flag_slot, 0, offsetof(ngx_event_conf_t, accept_mutex), NULL }, //启动负载均衡锁以后,延迟accept_mutex_delay毫秒以后再进行连接 { ngx_string("accept_mutex_delay"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_conf_set_msec_slot, 0, offsetof(ngx_event_conf_t, accept_mutex_delay), NULL }, //对于来自指定ip的连接须要打印debug级别的日志. { ngx_string("debug_connection"), NGX_EVENT_CONF|NGX_CONF_TAKE1, ngx_event_debug_connection, 0, 0, NULL }, ngx_null_command};
我们看到了ngx_event_core_commands
定义了于配置相关的信息,他会选择各种机制进行对应配置,所以必须优先进行初始化才干保证模块的正常执行.
ngx_event_conf_t
的结构体中.
typedef struct { //连接池大小 ngx_uint_t connections; //选用的事件模块在全部事件模块的编号,即ctx_index ngx_uint_t use; //标志位,若为1,则在接收一个连接事件时候,一次性尽可能建立多个连接 ngx_flag_t multi_accept; //标志位,为1时候採用负载均衡锁 ngx_flag_t accept_mutex; //负载均衡锁会使有些worker进程在拿不到锁的时候延迟一段时间 //这段时间就是用accept_mutex_delay表示的 ngx_msec_t accept_mutex_delay; //所选用的事件模块的名字,他与use是匹配的. u_char *name; } ngx_event_conf_t;
上下文(ctx)都要会指向一个ngx_event_module_t结构体(即每一个event模块都要实现一个
ngx_event_module_t接口
),用于详细定义该模块操作.
ngx_event_module_t ngx_event_core_module_ctx = { &event_core_name, ngx_event_core_create_conf, /* create configuration */ ngx_event_core_init_conf, /* init configuration */ { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }};
ngx_module_t ngx_event_core_module = { NGX_MODULE_V1, &ngx_event_core_module_ctx, /* module context */ ngx_event_core_commands, /* module directives */ NGX_EVENT_MODULE, /* module type */ NULL, /* init master */ ngx_event_module_init, /* init module */ ngx_event_process_init, /* init process */ NULL, /* init thread */ NULL, /* exit thread */ NULL, /* exit process */ NULL, /* exit master */ NGX_MODULE_V1_PADDING };
//调用全部模块的init_module for (i = 0; ngx_modules[i]; i++) { if (ngx_modules[i]->init_module) { if (ngx_modules[i]->init_module(cycle) != NGX_OK) { /* fatal */ exit(1); } } }
//src/event/ngx_event.c static ngx_int_t ngx_event_module_init(ngx_cycle_t *cycle) { void ***cf; u_char *shared; size_t size, cl; ngx_shm_t shm; ngx_time_t *tp; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; //推断ngx_events_module是否调用过初始化conf操作 cf = ngx_get_conf(cycle->conf_ctx, ngx_events_module); if (cf == NULL) { ngx_log_error(NGX_LOG_EMERG, cycle->log, 0, "no \"events\" section in configuration"); return NGX_ERROR; } //获取ngx_event_core_module模块的配置结构 ecf = (*cf)[ngx_event_core_module.ctx_index]; //查看是否是event中的模块,比如use 。。。。 if (!ngx_test_config && ngx_process <= NGX_PROCESS_MASTER) { ngx_log_error(NGX_LOG_NOTICE, cycle->log, 0, "using the \"%s\" event method", ecf->name); } //获取ngx_core_module模块的配置结构 ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); //从ngx_core_module模块的配置结构中获取timer_resolution參数 ngx_timer_resolution = ccf->timer_resolution; #if !(NGX_WIN32) { ngx_int_t limit; struct rlimit rlmt; //获取当前进程可以打开的最大文件数 man getrlimit if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "getrlimit(RLIMIT_NOFILE) failed, ignored"); } else { //假设ngx_event_core_module模块连接数大于当前(软)限制 //而且ngx_core_module最大连接数无限制 //或者ngx_event_core_module连接数大于ngx_core_module最大连接数 if (ecf->connections > (ngx_uint_t) rlmt.rlim_cur && (ccf->rlimit_nofile == NGX_CONF_UNSET || ecf->connections > (ngx_uint_t) ccf->rlimit_nofile)) { limit = (ccf->rlimit_nofile == NGX_CONF_UNSET) ? (ngx_int_t) rlmt.rlim_cur : ccf->rlimit_nofile; ngx_log_error(NGX_LOG_WARN, cycle->log, 0, "%ui worker_connections are more than " "open file resource limit: %i", ecf->connections, limit); } } } #endif /* !(NGX_WIN32) */ //假设关闭了master进程,就返回 //由于关闭了master进程就是单进程工作方式, //之后的操作时创建共享内存实现锁等工作,单进程不须要。 if (ccf->master == 0) { return NGX_OK; } //假设已经存在accept相互排斥体了,不须要再反复创建了 if (ngx_accept_mutex_ptr) { return NGX_OK; } /* cl should be equal or bigger than cache line size */ cl = 128; //这里创建size大小的共享内存,这块共享内存将被均分成三段 size = cl /* ngx_accept_mutex */ + cl /* ngx_connection_counter */ + cl; /* ngx_temp_number */ //准备共享内存,大小为size,命名nginx_shared_zone, shm.size = size; shm.name.len = sizeof("nginx_shared_zone"); shm.name.data = (u_char *) "nginx_shared_zone"; shm.log = cycle->log; //创建共享内存,起始地址保存在shm.addr if (ngx_shm_alloc(&shm) != NGX_OK) { return NGX_ERROR; } //获取起始地址保存 shared = shm.addr; //accept相互排斥体取得共享内存的第一段cl大小内存 ngx_accept_mutex_ptr = (ngx_atomic_t *) shared; ngx_accept_mutex.spin = (ngx_uint_t) -1; /*创建accept相互排斥体 accept相互排斥体的实现依赖是否支持原子操作,假设有对应的原子操作; 就是用取得的这段共享内存来实现accept相互排斥体;否则,将使用文件锁 来实现accept相互排斥体。 accept相互排斥体的作用是:避免惊群和实现worker进程的负载均衡。 */ if (ngx_shmtx_create(&ngx_accept_mutex, shared, cycle->lock_file.data) != NGX_OK) { return NGX_ERROR; } //获取内存的第二段cl大小的地址 ngx_connection_counter = (ngx_atomic_t *) (shared + 1 * cl); (void) ngx_atomic_cmp_set(ngx_connection_counter, 0, 1); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "counter: %p, %d", ngx_connection_counter, *ngx_connection_counter); //获取内存的第三段cl大小的地址 ngx_temp_number = (ngx_atomic_t *) (shared + 2 * cl); tp = ngx_timeofday(); ngx_random_number = (tp->msec << 16) + ngx_pid; return NGX_OK; }
//src/event/ngx_event.cstatic ngx_int_tngx_event_process_init(ngx_cycle_t *cycle){ ngx_uint_t m, i; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *next, *old; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; ngx_event_module_t *module; //和之前一样,获取响应模块的配置结构 ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module); //master进程打开,worker进程大于1,已经创建了accept_mutex //才打开accept相互排斥体 if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) { ngx_use_accept_mutex = 1; //使用相互排斥体 ngx_accept_mutex_held = 0; //是否获得accept相互排斥体 ngx_accept_mutex_delay = ecf->accept_mutex_delay;//争抢相互排斥体失败后,等待下次争抢时间间隔 } else { ngx_use_accept_mutex = 0; }#if (NGX_THREADS) //线程先不讲#endif //初始化计数器,此处将会创建一颗红黑树,来维护计时器,之后会详细解说 if (ngx_event_timer_init(cycle->log) == NGX_ERROR) { return NGX_ERROR; } for (m = 0; ngx_modules[m]; m++) { //这里之前讲过,跳过非NGX_EVENT_MODULE模块 if (ngx_modules[m]->type != NGX_EVENT_MODULE) { continue; } //非use配置指令指定的模块跳过,linux默认epoll if (ngx_modules[m]->ctx_index != ecf->use) { continue; } module = ngx_modules[m]->ctx; /*调用详细时间模块的init函数 因为nginx实现了非常多事件模块,比方:epoll、poll、select、dqueue、aio (这些模块位于src/event/modules文件夹中),所以nginx对时间模块进行了一层抽象, 方便了不同的系统使用不同的事件模型,也便于扩展新的时间模型,我们的重点应该 放在epoll上。 此处的init回调,事实上就是调用了ngx_epoll_init函数。module->actions结构封装了 epoll的全部接口函数。nginx就是通过actions结构将epoll注冊到事件抽象层中。 actions的类型是ngx_event_action_t,位于src/event/ngx_event.h 这些详细的内容会在下一节中重点解说。 */ if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { /* fatal */ exit(2); } break; }//此处省略部分内容 //创建全局的ngx_connection_t数组,保存全部的connection //因为这个过程是在各个worker进程中运行的,所以每一个worker都有自己的connection数组 cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log); if (cycle->connections == NULL) { return NGX_ERROR; } c = cycle->connections; //创建一个读事件数组 cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->read_events == NULL) { return NGX_ERROR; } rev = cycle->read_events; for (i = 0; i < cycle->connection_n; i++) { rev[i].closed = 1; rev[i].instance = 1;#if (NGX_THREADS) rev[i].lock = &c[i].lock; rev[i].own_lock = &c[i].lock;#endif } //创建一个写事件数组 cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->write_events == NULL) { return NGX_ERROR; } wev = cycle->write_events; for (i = 0; i < cycle->connection_n; i++) { wev[i].closed = 1;#if (NGX_THREADS) wev[i].lock = &c[i].lock; wev[i].own_lock = &c[i].lock;#endif } i = cycle->connection_n; next = NULL; //初始化整个connection数组 do { i--; c[i].data = next; c[i].read = &cycle->read_events[i]; c[i].write = &cycle->write_events[i]; c[i].fd = (ngx_socket_t) -1; next = &c[i];#if (NGX_THREADS) c[i].lock = 0;#endif } while (i); cycle->free_connections = next; cycle->free_connection_n = cycle->connection_n; /* for each listening socket */ //为每一个监听套接字从connection数组中分配一个连接,即一个slot ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { //从conneciton中取得一个新的连接solt c = ngx_get_connection(ls[i].fd, cycle->log); if (c == NULL) { return NGX_ERROR; } c->log = &ls[i].log; c->listening = &ls[i]; ls[i].connection = c; rev = c->read; rev->log = c->log; rev->accept = 1; //读时间发生,调用accept#if (NGX_HAVE_DEFERRED_ACCEPT) //省略#endif if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ls[i].previous) { /* * delete the old accept events that were bound to * the old cycle read events array */ old = ls[i].previous->connection; if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT) == NGX_ERROR) { return NGX_ERROR; } old->fd = (ngx_socket_t) -1; } } //注冊监听套接口毒事件的回调函数 ngx_event_accept rev->handler = ngx_event_accept; //使用了accept_mutex,临时不将监听套接字放入epoll中,而是 //等到worker抢到accept相互排斥体后,再放入epoll,避免惊群的发生 if (ngx_use_accept_mutex) { continue; } if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (ngx_add_conn(c) == NGX_ERROR) { return NGX_ERROR; } } else { //没有使用accept相互排斥体,那么就将此监听套接字放入epoll中。 if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } }#endif } return NGX_OK;}
发布者:全栈程序员-用户IM,转载请注明出处:https://javaforall.cn/118092.html原文链接:https://javaforall.cn
【正版授权,激活自己账号】: Jetbrains全家桶Ide使用,1年售后保障,每天仅需1毛
【官方授权 正版激活】: 官方授权 正版激活 支持Jetbrains家族下所有IDE 使用个人JB账号...