Commit 01585b4b authored by Mike Hibler's avatar Mike Hibler
Browse files

Don't be quite so stupid with memory allocation. I was realloc'ing the

array for every 32-byte entry.  When building a hashfile with 100,000+
entries, this caused quite a bit of moving memory around!

It is still stupid, I just realloc after every 256KB or so.  Need to
eliminate the need to have the whole thing contiguous and maybe the need
to have the whole thing in memory at once!
parent e49e2f9a
......@@ -333,6 +333,12 @@ readhashinfo(char *name, struct hashinfo **hinfop)
return 0;
}
/*
* We realloc the region array in big chunks so we don't thrash so much.
* This is the number of ~32 byte regions per memory chunk
*/
#define REGPERBLK 8192 /* ~256KB -- must be power of 2 */
static void
addhash(struct hashinfo **hinfop, int chunkno, uint32_t start, uint32_t size,
unsigned char hash[HASH_MAXSIZE])
......@@ -351,17 +357,19 @@ addhash(struct hashinfo **hinfop, int chunkno, uint32_t start, uint32_t size,
if (hinfo == 0) {
nreg = 0;
hinfo = calloc(1, sizeof(*hinfo) + sizeof(struct hashregion));
hinfo = calloc(1, sizeof(*hinfo));
} else {
nreg = hinfo->nregions;
hinfo = realloc(hinfo, sizeof(*hinfo) +
(nreg+1) * sizeof(struct hashregion));
}
if (hinfo == 0) {
fprintf(stderr, "out of memory for hash map\n");
exit(1);
if ((nreg % REGPERBLK) == 0) {
hinfo = realloc(hinfo, sizeof(*hinfo) +
(nreg+REGPERBLK) * sizeof(struct hashregion));
if (hinfo == 0) {
fprintf(stderr, "out of memory for hash map\n");
exit(1);
}
*hinfop = hinfo;
}
*hinfop = hinfo;
hinfo->regions[nreg].chunkno = chunkno;
hinfo->regions[nreg].region.start = start;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment