nalloc.c (3173B)
1 /* nalloc.c: a simple single-arena allocator for command-line-lifetime allocation */ 2 #include "rc.h" 3 4 static struct Block { 5 size_t used, size; 6 char *mem; 7 Block *n; 8 } *fl, *ul; 9 10 /* alignto() works only with power of 2 blocks and assumes 2's complement arithmetic */ 11 #define alignto(m, n) ((m + n - 1) & ~(n - 1)) 12 #define BLOCKSIZE ((size_t) 4096) 13 14 /* Allocate a block from the free list or malloc one if none in the fl fit */ 15 16 static void getblock(size_t n) { 17 Block *r, *p; 18 for (r = fl, p = NULL; r != NULL; p = r, r = r->n) 19 if (n <= r->size) 20 break; /* look for a block which fits the request */ 21 if (r != NULL) { /* if one is found, take it off the free list */ 22 if (p != NULL) 23 p->n = r->n; 24 else 25 fl = r->n; 26 } else { /* else allocate a new block */ 27 r = enew(Block); 28 r->mem = ealloc(r->size = alignto(n, BLOCKSIZE)); 29 } 30 r->used = 0; 31 r->n = ul; 32 ul = r; 33 } 34 35 /* 36 A fast single-arena allocator. Looks at the current block, and if 37 there is not enough room, it goes to getblock() for more. "ul" 38 stands for "used list", and the head of the list is the current 39 block. "ulp" is a register cache for "ul"; this routine is hacked 40 for speed. (sigh, optimizing RISC compilers still can't cache the 41 address of a global in a register) 42 */ 43 44 extern void *nalloc(size_t n) { 45 size_t base; 46 Block *ulp; 47 n = alignto(n, sizeof(align_t)); 48 ulp = ul; 49 if (ulp != NULL && n + (base = ulp->used) < ulp->size) { 50 ulp->used = base + n; 51 return &ulp->mem[base]; 52 } else { 53 getblock(n); 54 assert(ul->used == 0); 55 (ulp = ul)->used = n; 56 return &ulp->mem[0]; 57 } 58 } 59 60 /* 61 Frees memory from nalloc space by putting it on the free list. 62 Returns free blocks to the system, retaining at least MAXMEM bytes 63 worth of blocks for nalloc. 64 */ 65 66 #define MAXMEM 500000 67 68 extern void nfree() { 69 size_t count; 70 Block *r; 71 if (ul == NULL) 72 return; 73 for (r = ul; r->n != NULL; r = r->n) 74 ; /* get to end of used list */ 75 r->n = fl; /* tack free list onto it */ 76 fl = ul; /* and make it the free list */ 77 ul = NULL; /* finally, zero out the used list */ 78 for (r = fl, count = r->size; r->n != NULL; r = r->n, count += r->size) { 79 if (count >= MAXMEM) { 80 Block *tmp = r; 81 r = r->n; 82 tmp->n = NULL; /* terminate the free list */ 83 while (r != NULL) { /* free memory off the tail of the free list */ 84 tmp = r->n; 85 efree(r->mem); 86 efree(r); 87 r = tmp; 88 } 89 return; 90 } 91 } 92 } 93 94 /* 95 "Allocates" a new arena by zeroing out the old one. Up to the 96 calling routine to keep the old value of the block around. 97 */ 98 99 extern Block *newblock() { 100 Block *old = ul; 101 ul = NULL; 102 return old; 103 } 104 105 /* "Restores" an arena to its saved value. */ 106 107 extern void restoreblock(Block *old) { 108 nfree(); 109 ul = old; 110 } 111 112 /* generic memory allocation functions */ 113 114 extern void *ealloc(size_t n) { 115 void *p; 116 117 assert(n); 118 p = malloc(n); 119 if (p == NULL) { 120 uerror("malloc"); 121 rc_exit(1); 122 } 123 return p; 124 } 125 126 extern void *erealloc(void *p, size_t n) { 127 if (p == NULL) /* erealloc() has POSIX realloc() semantics */ 128 return ealloc(n); 129 if ((p = realloc(p, n)) == NULL) { 130 uerror("realloc"); 131 rc_exit(1); 132 } 133 return p; 134 } 135 136 extern void efree(void *p) { 137 if (p != NULL) 138 free(p); 139 }