GlusterFS源代码解析 —— GlusterFS 内存分配方式

时间:2022-09-23 07:25:06

原文地址:http://blog.csdn.net/wangyuling1234567890/article/details/24564891

GlusterFS 的内存分配主要有两种方式,一种是内存池分配,一种是普通内存分配。

不了解内存池使用的请查阅相关文档,这里不再解释。

内存池分配使用一个mem_pool对象来管理,看过内核代码的话对内存池的结构就不会陌生了。

内核代码中好多管理就是利用内核list链表来进行。

内存池结构例如以下:

struct mem_pool {
struct list_head list; //内存池中未使用内存块链表
int hot_count; //内存池中已经使用内存块数量
int cold_count; //内存池中剩余未使用内存块数量
gf_lock_t lock;
unsigned long padded_sizeof_type;//内存池中每一个块实际占用内存大小
void *pool; //内存池中第一个可用内存块地址
void *pool_end; //内存池中最后一个可用内存块地址
int real_sizeof_type; //内存池中每一个块可用内存大小
uint64_t alloc_count; //内存池总申请次数:申请到次数 + 未申请到内存块次数
uint64_t pool_misses; //内存池申请内存块失败次数
int max_alloc;
int curr_stdalloc; //内存池申请内存块失败,又一次从系统申请内存块个数。 内存释放时该值减一
int max_stdalloc;
char *name; //内存池名称
struct list_head global_list; //用来挂在全局内存池链表 THIS->ctx->mempool_list 上
};

看内存分配的代码时。假设了解了GlusterFS内存分配结构,就没有什么难度了。其内存分配机构例如以下:

内存池中每一个内存块结构:

GlusterFS源代码解析 —— GlusterFS 内存分配方式

普通内存分配结构(申请N字节内存时实际申请内存结构):

GlusterFS源代码解析 —— GlusterFS 内存分配方式

Mem-pool.h

/*
Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS. This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/ #ifndef _MEM_POOL_H_
#define _MEM_POOL_H_ #include "list.h"
#include "locking.h"
#include "logging.h"
#include "mem-types.h"
#include <stdlib.h>
#include <inttypes.h>
#include <string.h>
#include <stdarg.h> struct mem_acct {
uint32_t num_types;
struct mem_acct_rec *rec;
}; struct mem_acct_rec {
size_t size;
size_t max_size;
uint32_t num_allocs;
uint32_t total_allocs;
uint32_t max_num_allocs;
gf_lock_t lock;
}; void *
__gf_calloc (size_t cnt, size_t size, uint32_t type); void *
__gf_malloc (size_t size, uint32_t type); void *
__gf_realloc (void *ptr, size_t size); int
gf_vasprintf (char **string_ptr, const char *format, va_list arg); int
gf_asprintf (char **string_ptr, const char *format, ...); void
__gf_free (void *ptr); static inline
void* __gf_default_malloc (size_t size)
{
void *ptr = NULL; ptr = malloc (size);
if (!ptr)
gf_log_nomem ("", GF_LOG_ALERT, size); return ptr;
} static inline
void* __gf_default_calloc (int cnt, size_t size)
{
void *ptr = NULL; ptr = calloc (cnt, size); /* 在堆上分配cnt个size大小空间,并初始化为0 */
if (!ptr)
gf_log_nomem ("", GF_LOG_ALERT, (cnt * size)); return ptr;
} static inline
void* __gf_default_realloc (void *oldptr, size_t size)
{
void *ptr = NULL; ptr = realloc (oldptr, size);
if (!ptr)
gf_log_nomem ("", GF_LOG_ALERT, size); return ptr;
} #define MALLOC(size) __gf_default_malloc(size)
#define CALLOC(cnt,size) __gf_default_calloc(cnt,size)
#define REALLOC(ptr,size) __gf_default_realloc(ptr,size) /* 指针赋值0xeeeeeeee是什么意思 ?? ?? */
#define FREE(ptr) \
if (ptr != NULL) { \
free ((void *)ptr); \
ptr = (void *)0xeeeeeeee; \
} #define GF_CALLOC(nmemb, size, type) __gf_calloc (nmemb, size, type) #define GF_MALLOC(size, type) __gf_malloc (size, type) #define GF_REALLOC(ptr, size) __gf_realloc (ptr, size) #define GF_FREE(free_ptr) __gf_free (free_ptr) static inline
char *gf_strndup (const char *src, size_t len)
{
char *dup_str = NULL; if (!src) {
goto out;
} dup_str = GF_CALLOC (1, len + 1, gf_common_mt_strdup);
if (!dup_str) {
goto out;
} memcpy (dup_str, src, len);
out:
return dup_str;
} static inline
char * gf_strdup (const char *src)
{ char *dup_str = NULL;
size_t len = 0; len = strlen (src) + 1; dup_str = GF_CALLOC(1, len, gf_common_mt_strdup); if (!dup_str)
return NULL; memcpy (dup_str, src, len); return dup_str;
} static inline void *
gf_memdup (const void *src, void *dst, size_t size)
{
void *dup_mem = NULL; dup_mem = GF_CALLOC(1, size, gf_common_mt_strdup);
if (!dup_mem)
goto out; memcpy (dup_mem, src, size); out:
return dup_mem;
} struct mem_pool {
struct list_head list; //内存池中未使用内存块链表
int hot_count; //内存池中已经使用内存块数量
int cold_count; //内存池中剩余未使用内存块数量
gf_lock_t lock;
unsigned long padded_sizeof_type;//内存池中每一个块实际占用内存大小
void *pool; //内存池中第一个可用内存块地址
void *pool_end; //内存池中最后一个可用内存块地址
int real_sizeof_type; //内存池中每一个块可用内存大小
uint64_t alloc_count; //内存池总申请次数:申请到次数 + 未申请到内存块次数
uint64_t pool_misses; //内存池申请内存块失败次数
int max_alloc;
int curr_stdalloc; //内存池申请内存块失败,又一次从系统申请内存块个数。内存释放时该值减一
int max_stdalloc;
char *name; //内存池名称
struct list_head global_list; //用来挂在全局内存池链表 THIS->ctx->mempool_list 上
}; struct mem_pool *
mem_pool_new_fn (unsigned long sizeof_type, unsigned long count, char *name); #define mem_pool_new(type,count) mem_pool_new_fn (sizeof(type), count, #type) void mem_put (void *ptr);
void *mem_get (struct mem_pool *pool);
void *mem_get0 (struct mem_pool *pool); void mem_pool_destroy (struct mem_pool *pool); void gf_mem_acct_enable_set (void *ctx); #endif /* _MEM_POOL_H */

Mem-pool.c

/*
Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS. This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/ #include "mem-pool.h"
#include "logging.h"
#include "xlator.h"
#include <stdlib.h>
#include <stdarg.h> /*
* gf mem_pool 中内存块申请内存结构:
{
struct list_head;
struct mem_pool*;
int in_use; //该内存块是否被使用: 1表示被使用, 0表示未使用
char mem_size[N]; //实际可供使用的内存大小
}
*
* 每次内存池申请 mem_size 大小的内存,gf方式申请都会申请上面一个结构体大小的内存。 */ #define GF_MEM_POOL_LIST_BOUNDARY (sizeof(struct list_head))
#define GF_MEM_POOL_PTR (sizeof(struct mem_pool*))
#define GF_MEM_POOL_PAD_BOUNDARY (GF_MEM_POOL_LIST_BOUNDARY + GF_MEM_POOL_PTR + sizeof(int))
#define mem_pool_chunkhead2ptr(head) ((head) + GF_MEM_POOL_PAD_BOUNDARY)
#define mem_pool_ptr2chunkhead(ptr) ((ptr) - GF_MEM_POOL_PAD_BOUNDARY)
#define is_mem_chunk_in_use(ptr) (*ptr == 1)
#define mem_pool_from_ptr(ptr) ((ptr) + GF_MEM_POOL_LIST_BOUNDARY) /*
* __gf_*alloc: 普通 内存申请结构:
{
GF_MEM_HEADER_SIZE; //4 + sizeof (size_t) + sizeof (xlator_t *) + 4 + 8
4 + mem_size 大小N + xlator_t指针 + GF_MEM_HEADER_MAGIC +
GF_MEM_TRAILER_SIZE; //8字节 char mem_size[N]; //实际可供使用的内存大小
char mem_end[GF_MEM_TRAILER_SIZE]; // 8字节,填充魔术字 0xBAADF00D,标志内存结尾
}
*
* 每次申请 mem_size 大小的内存,都会申请上面一个结构体大小的内存。
*/
#define GF_MEM_HEADER_SIZE (4 + sizeof (size_t) + sizeof (xlator_t *) + 4 + 8)
#define GF_MEM_TRAILER_SIZE 8 /* 这些魔术字表示什么意思 ??? */
#define GF_MEM_HEADER_MAGIC 0xCAFEBABE
#define GF_MEM_TRAILER_MAGIC 0xBAADF00D #define GLUSTERFS_ENV_MEM_ACCT_STR "GLUSTERFS_DISABLE_MEM_ACCT" void
gf_mem_acct_enable_set (void *data)
{
glusterfs_ctx_t *ctx = NULL; ctx = data; GF_ASSERT (ctx); ctx->mem_acct_enable = 1; return;
} void
gf_mem_set_acct_info (xlator_t *xl, char **alloc_ptr,
size_t size, uint32_t type)
{
char *ptr = NULL; if (!alloc_ptr)
return; ptr = (char *) (*alloc_ptr); GF_ASSERT (xl != NULL); GF_ASSERT (xl->mem_acct.rec != NULL); GF_ASSERT (type <= xl->mem_acct.num_types); LOCK(&xl->mem_acct.rec[type].lock);
{
xl->mem_acct.rec[type].size += size;
xl->mem_acct.rec[type].num_allocs++;
xl->mem_acct.rec[type].total_allocs++;
xl->mem_acct.rec[type].max_size =
max (xl->mem_acct.rec[type].max_size,
xl->mem_acct.rec[type].size);
xl->mem_acct.rec[type].max_num_allocs =
max (xl->mem_acct.rec[type].max_num_allocs,
xl->mem_acct.rec[type].num_allocs);
}
UNLOCK(&xl->mem_acct.rec[type].lock); *(uint32_t *)(ptr) = type;
ptr = ptr + 4;
memcpy (ptr, &size, sizeof(size_t));
ptr += sizeof (size_t);
memcpy (ptr, &xl, sizeof(xlator_t *));
ptr += sizeof (xlator_t *);
*(uint32_t *)(ptr) = GF_MEM_HEADER_MAGIC;
ptr = ptr + 4;
ptr = ptr + 8; //padding
*(uint32_t *) (ptr + size) = GF_MEM_TRAILER_MAGIC; *alloc_ptr = (void *)ptr;
return;
} void *
__gf_calloc (size_t nmemb, size_t size, uint32_t type)
{
size_t tot_size = 0;
size_t req_size = 0;
char *ptr = NULL;
xlator_t *xl = NULL; if (!THIS->ctx->mem_acct_enable)
return CALLOC (nmemb, size); xl = THIS; req_size = nmemb * size;
tot_size = req_size + GF_MEM_HEADER_SIZE + GF_MEM_TRAILER_SIZE; ptr = calloc (1, tot_size); if (!ptr) {
gf_log_nomem ("", GF_LOG_ALERT, tot_size);
return NULL;
}
gf_mem_set_acct_info (xl, &ptr, req_size, type); return (void *)ptr;
} void *
__gf_malloc (size_t size, uint32_t type)
{
size_t tot_size = 0;
char *ptr = NULL;
xlator_t *xl = NULL; if (!THIS->ctx->mem_acct_enable)
return MALLOC (size); xl = THIS; tot_size = size + GF_MEM_HEADER_SIZE + GF_MEM_TRAILER_SIZE; ptr = malloc (tot_size);
if (!ptr) {
gf_log_nomem ("", GF_LOG_ALERT, tot_size);
return NULL;
}
gf_mem_set_acct_info (xl, &ptr, size, type); return (void *)ptr;
} void *
__gf_realloc (void *ptr, size_t size)
{
size_t tot_size = 0;
char *orig_ptr = NULL;
xlator_t *xl = NULL;
uint32_t type = 0; if (!THIS->ctx->mem_acct_enable)
return REALLOC (ptr, size); tot_size = size + GF_MEM_HEADER_SIZE + GF_MEM_TRAILER_SIZE; orig_ptr = (char *)ptr - 8 - 4; GF_ASSERT (*(uint32_t *)orig_ptr == GF_MEM_HEADER_MAGIC); orig_ptr = orig_ptr - sizeof(xlator_t *);
xl = *((xlator_t **)orig_ptr); orig_ptr = (char *)ptr - GF_MEM_HEADER_SIZE;
type = *(uint32_t *)orig_ptr; ptr = realloc (orig_ptr, tot_size);
if (!ptr) {
gf_log_nomem ("", GF_LOG_ALERT, tot_size);
return NULL;
} gf_mem_set_acct_info (xl, (char **)&ptr, size, type); return (void *)ptr;
} /*
* 把參数列表解析到出參 string_ptr 中,并返回字符串长度
*
* 注意:自己主动为出參分配内存
*/
int
gf_vasprintf (char **string_ptr, const char *format, va_list arg)
{
va_list arg_save;
char *str = NULL;
int size = 0;
int rv = 0; if (!string_ptr || !format)
return -1; va_copy (arg_save, arg); size = vsnprintf (NULL, 0, format, arg);
size++;
str = GF_MALLOC (size, gf_common_mt_asprintf);
if (str == NULL) {
/* log is done in GF_MALLOC itself */
return -1;
}
rv = vsnprintf (str, size, format, arg_save); *string_ptr = str;
return (rv);
} /*
* 为出參string_ptr分配内存,并把变參内容格式化存放在string_ptr中
*
* 返回出參长度
*/
int
gf_asprintf (char **string_ptr, const char *format, ...)
{
va_list arg;
int rv = 0; va_start (arg, format);
rv = gf_vasprintf (string_ptr, format, arg);
va_end (arg); return rv;
} /*
*
*
*/
void
__gf_free (void *free_ptr)
{
size_t req_size = 0;
char *ptr = NULL;
uint32_t type = 0;
xlator_t *xl = NULL; if (!THIS->ctx->mem_acct_enable) {
FREE (free_ptr);
return;
} if (!free_ptr)
return; ptr = (char *)free_ptr - 8 - 4; /* 指针退到内存头魔术字处,用来推断是否正确分配的内存 */ //Possible corruption, assert here
GF_ASSERT (GF_MEM_HEADER_MAGIC == *(uint32_t *)ptr); *(uint32_t *)ptr = 0; /* 指针置0什么意思? */ ptr = ptr - sizeof(xlator_t *); /* 指针地址能够为负 ? */
memcpy (&xl, ptr, sizeof(xlator_t *)); /* 能够向NULL内存拷贝内容 ? ??*/ //gf_free expects xl to be available
GF_ASSERT (xl != NULL); if (!xl->mem_acct.rec) {
ptr = (char *)free_ptr - GF_MEM_HEADER_SIZE;
goto free;
} ptr = ptr - sizeof(size_t);
memcpy (&req_size, ptr, sizeof (size_t));
ptr = ptr - 4;
type = *(uint32_t *)ptr; // This points to a memory overrun
GF_ASSERT (GF_MEM_TRAILER_MAGIC ==
*(uint32_t *)((char *)free_ptr + req_size)); *(uint32_t *) ((char *)free_ptr + req_size) = 0; LOCK (&xl->mem_acct.rec[type].lock);
{
xl->mem_acct.rec[type].size -= req_size;
xl->mem_acct.rec[type].num_allocs--;
}
UNLOCK (&xl->mem_acct.rec[type].lock);
free:
FREE (ptr);
} /*
* 分配内存池,有count个块.
* 每一个块大小为sizeof_type(可用大小) + GF_MEM_POOL_PAD_BOUNDARY(存放管理信息)
* 内存块挂在mem_pool->pool链表上,内存池挂在: THIS->ctx->mempool_list 上
*
* 内存池名字: THIS->name:name
*/
struct mem_pool *
mem_pool_new_fn (unsigned long sizeof_type,
unsigned long count, char *name)
{
struct mem_pool *mem_pool = NULL;
unsigned long padded_sizeof_type = 0;
void *pool = NULL;
int i = 0;
int ret = 0;
struct list_head *list = NULL;
glusterfs_ctx_t *ctx = NULL; if (!sizeof_type || !count) {
gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
return NULL;
}
padded_sizeof_type = sizeof_type + GF_MEM_POOL_PAD_BOUNDARY; mem_pool = GF_CALLOC (sizeof (*mem_pool), 1, gf_common_mt_mem_pool);
if (!mem_pool)
return NULL; ret = gf_asprintf (&mem_pool->name, "%s:%s", THIS->name, name);
if (ret < 0)
return NULL; if (!mem_pool->name) {
GF_FREE (mem_pool);
return NULL;
} LOCK_INIT (&mem_pool->lock);
INIT_LIST_HEAD (&mem_pool->list);
INIT_LIST_HEAD (&mem_pool->global_list); mem_pool->padded_sizeof_type = padded_sizeof_type;
mem_pool->cold_count = count;
mem_pool->real_sizeof_type = sizeof_type; pool = GF_CALLOC (count, padded_sizeof_type, gf_common_mt_long);
if (!pool) {
GF_FREE (mem_pool->name);
GF_FREE (mem_pool);
return NULL;
} for (i = 0; i < count; i++) {
list = pool + (i * (padded_sizeof_type));
INIT_LIST_HEAD (list);
list_add_tail (list, &mem_pool->list);
} mem_pool->pool = pool;
mem_pool->pool_end = pool + (count * (padded_sizeof_type)); /* add this pool to the global list */
ctx = THIS->ctx;
if (!ctx)
goto out; list_add (&mem_pool->global_list, &ctx->mempool_list); out:
return mem_pool;
} void*
mem_get0 (struct mem_pool *mem_pool)
{
void *ptr = NULL; if (!mem_pool) {
gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
return NULL;
} ptr = mem_get(mem_pool); if (ptr)
memset(ptr, 0, mem_pool->real_sizeof_type); return ptr;
} /*
* 从内存池获取一个内存块
*
* 假设内存池内存块用完,则从系统内存分配一块。并把该内存块归属在该mem_pool。但不增加链表
*
*/
void *
mem_get (struct mem_pool *mem_pool)
{
struct list_head *list = NULL;
void *ptr = NULL;
int *in_use = NULL;
struct mem_pool **pool_ptr = NULL; if (!mem_pool) {
gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
return NULL;
} LOCK (&mem_pool->lock);
{
mem_pool->alloc_count++;
if (mem_pool->cold_count) {
list = mem_pool->list.next;
list_del (list); mem_pool->hot_count++;
mem_pool->cold_count--; if (mem_pool->max_alloc < mem_pool->hot_count)
mem_pool->max_alloc = mem_pool->hot_count; ptr = list;
in_use = (ptr + GF_MEM_POOL_LIST_BOUNDARY +
GF_MEM_POOL_PTR);
*in_use = 1; // 设置内存块被使用状态 goto fwd_addr_out;
} /* This is a problem area. If we've run out of
* chunks in our slab above, we need to allocate
* enough memory to service this request.
* The problem is, these individual chunks will fail
* the first address range check in __is_member. Now, since
* we're not allocating a full second slab, we wont have
* enough info perform the range check in __is_member.
*
* I am working around this by performing a regular allocation
* , just the way the caller would've done when not using the
* mem-pool. That also means, we're not padding the size with
* the list_head structure because, this will not be added to
* the list of chunks that belong to the mem-pool allocated
* initially.
*
* This is the best we can do without adding functionality for
* managing multiple slabs. That does not interest us at present
* because it is too much work knowing that a better slab
* allocator is coming RSN.
*/ /* 未从内存池申请到内存块,则又一次从系统申请内存
* 该内存归属到mem_pool中,但未增加 mem_pool 的 list 链表中
*/
{
mem_pool->pool_misses++;
mem_pool->curr_stdalloc++;
if (mem_pool->max_stdalloc < mem_pool->curr_stdalloc)
mem_pool->max_stdalloc = mem_pool->curr_stdalloc;
ptr = GF_CALLOC (1, mem_pool->padded_sizeof_type,
gf_common_mt_mem_pool);
gf_log_callingfn ("mem-pool", GF_LOG_DEBUG, "Mem pool is full. "
"Callocing mem");
}
/* Memory coming from the heap need not be transformed from a
* chunkhead to a usable pointer since it is not coming from
* the pool.
*/
}
fwd_addr_out:
pool_ptr = mem_pool_from_ptr (ptr);
*pool_ptr = (struct mem_pool *)mem_pool;
ptr = mem_pool_chunkhead2ptr (ptr);
UNLOCK (&mem_pool->lock); return ptr;
} static int
__is_member (struct mem_pool *pool, void *ptr)
{
if (!pool || !ptr) {
gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
return -1;
} if (ptr < pool->pool || ptr >= pool->pool_end)
return 0; if ((mem_pool_ptr2chunkhead (ptr) - pool->pool)
% pool->padded_sizeof_type)
return -1; return 1;
} /*
* 释放一个内存块
*
* 若该内存块是从mem_pool 中取得。则放回到mem_pool的list中。
* 假设是从系统内存分配,则直接释放,并把 mem_pool->curr_stdalloc 计数减一
*/
void
mem_put (void *ptr)
{
struct list_head *list = NULL;
int *in_use = NULL;
void *head = NULL;
struct mem_pool **tmp = NULL;
struct mem_pool *pool = NULL; if (!ptr) {
gf_log_callingfn ("mem-pool", GF_LOG_ERROR, "invalid argument");
return;
} list = head = mem_pool_ptr2chunkhead (ptr);
tmp = mem_pool_from_ptr (head);
if (!tmp) {
gf_log_callingfn ("mem-pool", GF_LOG_ERROR,
"ptr header is corrupted");
return;
} pool = *tmp;
if (!pool) {
gf_log_callingfn ("mem-pool", GF_LOG_ERROR,
"mem-pool ptr is NULL");
return;
}
LOCK (&pool->lock);
{ switch (__is_member (pool, ptr))
{
case 1:
in_use = (head + GF_MEM_POOL_LIST_BOUNDARY +
GF_MEM_POOL_PTR);
if (!is_mem_chunk_in_use(in_use)) {
gf_log_callingfn ("mem-pool", GF_LOG_CRITICAL,
"mem_put called on freed ptr %p of mem "
"pool %p", ptr, pool);
break;
}
pool->hot_count--;
pool->cold_count++;
*in_use = 0;
list_add (list, &pool->list);
break;
case -1:
/* For some reason, the address given is within
* the address range of the mem-pool but does not align
* with the expected start of a chunk that includes
* the list headers also. Sounds like a problem in
* layers of clouds up above us. ;)
*/
abort ();
break;
case 0:
/* The address is outside the range of the mem-pool. We
* assume here that this address was allocated at a
* point when the mem-pool was out of chunks in mem_get
* or the programmer has made a mistake by calling the
* wrong de-allocation interface. We do
* not have enough info to distinguish between the two
* situations.
*/
pool->curr_stdalloc--;
GF_FREE (list);
break;
default:
/* log error */
break;
}
}
UNLOCK (&pool->lock);
} /*
* 注销内存池
*
* 假设 pool->curr_stdalloc 不为0。则存在内存泄漏 !!!
*/
void
mem_pool_destroy (struct mem_pool *pool)
{
if (!pool)
return; gf_log (THIS->name, GF_LOG_INFO, "size=%lu max=%d total=%"PRIu64,
pool->padded_sizeof_type, pool->max_alloc, pool->alloc_count); list_del (&pool->global_list); LOCK_DESTROY (&pool->lock);
GF_FREE (pool->name);
GF_FREE (pool->pool);
GF_FREE (pool); return;
}

GlusterFS源代码解析 —— GlusterFS 内存分配方式的更多相关文章

  1. GlusterFS源代码解析 —— GlusterFS 简单介绍

    原文地址:http://blog.csdn.net/wangyuling1234567890/article/details/24564185 -- -- 本系列博客源代码是基于GlusterFS 3 ...

  2. GlusterFS源代码解析 —— GlusterFS 日志

    Logging.c: /* Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com> This file is part ...

  3. c&sol;c&plus;&plus;内存分配方式(转)

    原文链接:http://blog.csdn.net/jing0611/article/details/4030237 1.内存分配方式 内存分配方式有三种: [1]从静态存储区域分配.内存在 程序编译 ...

  4. 内存分配方式,堆区,栈区,new&sol;delete&sol;malloc&sol;free

    1.内存分配方式 内存分配方式有三种: [1]从静态存储区域分配.内存在程序编译的时候就已经分配好,这块内存在程序的整个运行期间都存在.例如全局变量,static变量. [2]在栈上创建.在执行函数时 ...

  5. &lbrack;转载&rsqb;C语言程序的内存分配方式

    "声明一个数组时,编译器将根据声明所指定的元素数量为数量为数组保留内存空间."其实就是编译器在编译的过程中,会加入几条汇编指令在程序里处理内存分配,并不是说编译时就分配了内存,不要 ...

  6. 转 C&sol;C&plus;&plus;内存分配方式与存储区

    C/C++内存分配方式与存储区 C/C++内存分配有三种方式:[1]从静态存储区域分配.内存在程序编译的时候就已经分配好,这块内存在程序的整个运行期间都存在.例如全局变量,static变量.[2]在栈 ...

  7. C和C&plus;&plus;内存分配方式记录

    C. C++中内存分配方式可以分为三种: (1)从静态存储区域分配:内存在程序编译时就已经分配好,这块内存在程序的整个运行期间都存在.速度快.不容易出错,因为有系统会善后.例如全局变量,static变 ...

  8. C&sol;C&plus;&plus; 内存分配方式,堆区,栈区,new&sol;delete&sol;malloc&sol;free

    内存分配方式 内存分配方式有三种: [1] 从静态存储区域分配.内存在程序编译的时候就已经分配好,这块内存在程序的整个运行期间都存在.例如全局变量, static 变量. [2] 在栈上创建.在执行函 ...

  9. C&plus;&plus;内存分配方式——(别人的博客)

    http://www.cnblogs.com/easonpan/archive/2012/04/26/2471153.html http://blog.csdn.net/chen825919148/a ...

随机推荐

  1. 工作中遇到的问题--实现CustomerSetting的实时更新

    首先在项目运行时就初始化CustomerSettings的值,采用@Bean,默认是singtone模式,只会加载一次. @Configuration@Order(3)@EnableWebMvcSec ...

  2. IIS 发布MVC 提示开启目录浏览

    修改应用池 托管管道模式为 集成

  3. 第五篇 Getting Started with ORACLE EBS(开始学习ORACLE EBS)

    第一篇介绍了ERP软件是供应链管理软件.告诉你这个软件改善或提升企业管理的切入点和着力点.有了着力点才能给力. 第二篇介绍了什么是咨询以及咨询工作共通的章法,告诉了你咨询的套路是什么,就像练习一套拳, ...

  4. &lbrack;017&rsqb;string类使用注意事项

    最近自己写着玩,写了一个这样的函数: void foo(const string& iStr) { ; i < iStr.length(); ++i) { string str = iS ...

  5. winform 导出TXT 分类: WinForm 2014-05-15 15&colon;29 128人阅读 评论&lpar;0&rpar; 收藏

    截图: 代码实现:(导出txt按钮事件) using System.IO; using System.Data.OleDb; private void btnOutTxt_Click(object s ...

  6. 转:更改 centos yum 源

    centos下可以通过yum很方便快捷的安装所需的软件和库,如果yum的源不好,安装速度会非常慢,centos默认官方源似乎都是国外的,所以速度无法保证,我一直使用163的源,感觉速度不错.下面就说说 ...

  7. HGE引擎 - 绘制,声音,碰撞处理

    原帖地址:http://blog.csdn.net/i_dovelemon/article/details/8818037 另外,年代久远,该引擎官网早已上不去了!!! 1.库的安装和下载 从官网上h ...

  8. laravel使用Schema创建数据表

    1.简介 迁移就像数据库的版本控制,允许团队简单轻松的编辑并共享应用的数据库表结构,迁移通常和Laravel的schema构建器结对从而可以很容易地构建应用的数据库表结构.如果你曾经告知小组成员需要手 ...

  9. golang sort包使用

    https://studygolang.com/static/pkgdoc/pkg/sort.htm#StringSlice.Search package main import ( "fm ...

  10. Idea checkstyle插件的使用

    File->Setting 选择Plugins,查询是否已经安装了checkstyle,如果没有安装,可以点击下面的“Browse repositories...”按钮 查询到checkstyl ...