|
1 /*-************************************* |
|
2 * Dependencies |
|
3 ***************************************/ |
|
4 #include <stdio.h> /* fprintf */ |
|
5 #include <stdlib.h> /* malloc, free, qsort */ |
|
6 #include <string.h> /* memset */ |
|
7 #include <time.h> /* clock */ |
|
8 |
|
9 #include "mem.h" /* read */ |
|
10 #include "pool.h" |
|
11 #include "threading.h" |
|
12 #include "cover.h" |
|
13 #include "zstd_internal.h" /* includes zstd.h */ |
|
14 #ifndef ZDICT_STATIC_LINKING_ONLY |
|
15 #define ZDICT_STATIC_LINKING_ONLY |
|
16 #endif |
|
17 #include "zdict.h" |
|
18 |
|
19 |
|
20 /*-************************************* |
|
21 * Constants |
|
22 ***************************************/ |
|
23 #define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB)) |
|
24 #define FASTCOVER_MAX_F 31 |
|
25 #define FASTCOVER_MAX_ACCEL 10 |
|
26 #define DEFAULT_SPLITPOINT 0.75 |
|
27 #define DEFAULT_F 20 |
|
28 #define DEFAULT_ACCEL 1 |
|
29 |
|
30 |
|
31 /*-************************************* |
|
32 * Console display |
|
33 ***************************************/ |
|
34 static int g_displayLevel = 2; |
|
35 #define DISPLAY(...) \ |
|
36 { \ |
|
37 fprintf(stderr, __VA_ARGS__); \ |
|
38 fflush(stderr); \ |
|
39 } |
|
40 #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ |
|
41 if (displayLevel >= l) { \ |
|
42 DISPLAY(__VA_ARGS__); \ |
|
43 } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ |
|
44 #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) |
|
45 |
|
46 #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ |
|
47 if (displayLevel >= l) { \ |
|
48 if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ |
|
49 g_time = clock(); \ |
|
50 DISPLAY(__VA_ARGS__); \ |
|
51 } \ |
|
52 } |
|
53 #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) |
|
54 static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; |
|
55 static clock_t g_time = 0; |
|
56 |
|
57 |
|
58 /*-************************************* |
|
59 * Hash Functions |
|
60 ***************************************/ |
|
61 static const U64 prime6bytes = 227718039650203ULL; |
|
62 static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } |
|
63 static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } |
|
64 |
|
65 static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; |
|
66 static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } |
|
67 static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } |
|
68 |
|
69 |
|
70 /** |
|
71 * Hash the d-byte value pointed to by p and mod 2^f |
|
72 */ |
|
73 static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) { |
|
74 if (d == 6) { |
|
75 return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1); |
|
76 } |
|
77 return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1); |
|
78 } |
|
79 |
|
80 |
|
81 /*-************************************* |
|
82 * Acceleration |
|
83 ***************************************/ |
|
84 typedef struct { |
|
85 unsigned finalize; /* Percentage of training samples used for ZDICT_finalizeDictionary */ |
|
86 unsigned skip; /* Number of dmer skipped between each dmer counted in computeFrequency */ |
|
87 } FASTCOVER_accel_t; |
|
88 |
|
89 |
|
90 static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = { |
|
91 { 100, 0 }, /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */ |
|
92 { 100, 0 }, /* accel = 1 */ |
|
93 { 50, 1 }, /* accel = 2 */ |
|
94 { 34, 2 }, /* accel = 3 */ |
|
95 { 25, 3 }, /* accel = 4 */ |
|
96 { 20, 4 }, /* accel = 5 */ |
|
97 { 17, 5 }, /* accel = 6 */ |
|
98 { 14, 6 }, /* accel = 7 */ |
|
99 { 13, 7 }, /* accel = 8 */ |
|
100 { 11, 8 }, /* accel = 9 */ |
|
101 { 10, 9 }, /* accel = 10 */ |
|
102 }; |
|
103 |
|
104 |
|
105 /*-************************************* |
|
106 * Context |
|
107 ***************************************/ |
|
108 typedef struct { |
|
109 const BYTE *samples; |
|
110 size_t *offsets; |
|
111 const size_t *samplesSizes; |
|
112 size_t nbSamples; |
|
113 size_t nbTrainSamples; |
|
114 size_t nbTestSamples; |
|
115 size_t nbDmers; |
|
116 U32 *freqs; |
|
117 unsigned d; |
|
118 unsigned f; |
|
119 FASTCOVER_accel_t accelParams; |
|
120 } FASTCOVER_ctx_t; |
|
121 |
|
122 |
|
123 /*-************************************* |
|
124 * Helper functions |
|
125 ***************************************/ |
|
126 /** |
|
127 * Selects the best segment in an epoch. |
|
128 * Segments of are scored according to the function: |
|
129 * |
|
130 * Let F(d) be the frequency of all dmers with hash value d. |
|
131 * Let S_i be hash value of the dmer at position i of segment S which has length k. |
|
132 * |
|
133 * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) |
|
134 * |
|
135 * Once the dmer with hash value d is in the dictionay we set F(d) = 0. |
|
136 */ |
|
137 static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx, |
|
138 U32 *freqs, U32 begin, U32 end, |
|
139 ZDICT_cover_params_t parameters, |
|
140 U16* segmentFreqs) { |
|
141 /* Constants */ |
|
142 const U32 k = parameters.k; |
|
143 const U32 d = parameters.d; |
|
144 const U32 f = ctx->f; |
|
145 const U32 dmersInK = k - d + 1; |
|
146 |
|
147 /* Try each segment (activeSegment) and save the best (bestSegment) */ |
|
148 COVER_segment_t bestSegment = {0, 0, 0}; |
|
149 COVER_segment_t activeSegment; |
|
150 |
|
151 /* Reset the activeDmers in the segment */ |
|
152 /* The activeSegment starts at the beginning of the epoch. */ |
|
153 activeSegment.begin = begin; |
|
154 activeSegment.end = begin; |
|
155 activeSegment.score = 0; |
|
156 |
|
157 /* Slide the activeSegment through the whole epoch. |
|
158 * Save the best segment in bestSegment. |
|
159 */ |
|
160 while (activeSegment.end < end) { |
|
161 /* Get hash value of current dmer */ |
|
162 const size_t index = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); |
|
163 |
|
164 /* Add frequency of this index to score if this is the first occurence of index in active segment */ |
|
165 if (segmentFreqs[index] == 0) { |
|
166 activeSegment.score += freqs[index]; |
|
167 } |
|
168 /* Increment end of segment and segmentFreqs*/ |
|
169 activeSegment.end += 1; |
|
170 segmentFreqs[index] += 1; |
|
171 /* If the window is now too large, drop the first position */ |
|
172 if (activeSegment.end - activeSegment.begin == dmersInK + 1) { |
|
173 /* Get hash value of the dmer to be eliminated from active segment */ |
|
174 const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); |
|
175 segmentFreqs[delIndex] -= 1; |
|
176 /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */ |
|
177 if (segmentFreqs[delIndex] == 0) { |
|
178 activeSegment.score -= freqs[delIndex]; |
|
179 } |
|
180 /* Increment start of segment */ |
|
181 activeSegment.begin += 1; |
|
182 } |
|
183 |
|
184 /* If this segment is the best so far save it */ |
|
185 if (activeSegment.score > bestSegment.score) { |
|
186 bestSegment = activeSegment; |
|
187 } |
|
188 } |
|
189 |
|
190 /* Zero out rest of segmentFreqs array */ |
|
191 while (activeSegment.begin < end) { |
|
192 const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); |
|
193 segmentFreqs[delIndex] -= 1; |
|
194 activeSegment.begin += 1; |
|
195 } |
|
196 |
|
197 { |
|
198 /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ |
|
199 U32 pos; |
|
200 for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { |
|
201 const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); |
|
202 freqs[i] = 0; |
|
203 } |
|
204 } |
|
205 |
|
206 return bestSegment; |
|
207 } |
|
208 |
|
209 |
|
210 static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, |
|
211 size_t maxDictSize, unsigned f, |
|
212 unsigned accel) { |
|
213 /* k, d, and f are required parameters */ |
|
214 if (parameters.d == 0 || parameters.k == 0) { |
|
215 return 0; |
|
216 } |
|
217 /* d has to be 6 or 8 */ |
|
218 if (parameters.d != 6 && parameters.d != 8) { |
|
219 return 0; |
|
220 } |
|
221 /* k <= maxDictSize */ |
|
222 if (parameters.k > maxDictSize) { |
|
223 return 0; |
|
224 } |
|
225 /* d <= k */ |
|
226 if (parameters.d > parameters.k) { |
|
227 return 0; |
|
228 } |
|
229 /* 0 < f <= FASTCOVER_MAX_F*/ |
|
230 if (f > FASTCOVER_MAX_F || f == 0) { |
|
231 return 0; |
|
232 } |
|
233 /* 0 < splitPoint <= 1 */ |
|
234 if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) { |
|
235 return 0; |
|
236 } |
|
237 /* 0 < accel <= 10 */ |
|
238 if (accel > 10 || accel == 0) { |
|
239 return 0; |
|
240 } |
|
241 return 1; |
|
242 } |
|
243 |
|
244 |
|
245 /** |
|
246 * Clean up a context initialized with `FASTCOVER_ctx_init()`. |
|
247 */ |
|
248 static void |
|
249 FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) |
|
250 { |
|
251 if (!ctx) return; |
|
252 |
|
253 free(ctx->freqs); |
|
254 ctx->freqs = NULL; |
|
255 |
|
256 free(ctx->offsets); |
|
257 ctx->offsets = NULL; |
|
258 } |
|
259 |
|
260 |
|
261 /** |
|
262 * Calculate for frequency of hash value of each dmer in ctx->samples |
|
263 */ |
|
264 static void |
|
265 FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx) |
|
266 { |
|
267 const unsigned f = ctx->f; |
|
268 const unsigned d = ctx->d; |
|
269 const unsigned skip = ctx->accelParams.skip; |
|
270 const unsigned readLength = MAX(d, 8); |
|
271 size_t i; |
|
272 assert(ctx->nbTrainSamples >= 5); |
|
273 assert(ctx->nbTrainSamples <= ctx->nbSamples); |
|
274 for (i = 0; i < ctx->nbTrainSamples; i++) { |
|
275 size_t start = ctx->offsets[i]; /* start of current dmer */ |
|
276 size_t const currSampleEnd = ctx->offsets[i+1]; |
|
277 while (start + readLength <= currSampleEnd) { |
|
278 const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); |
|
279 freqs[dmerIndex]++; |
|
280 start = start + skip + 1; |
|
281 } |
|
282 } |
|
283 } |
|
284 |
|
285 |
|
286 /** |
|
287 * Prepare a context for dictionary building. |
|
288 * The context is only dependent on the parameter `d` and can used multiple |
|
289 * times. |
|
290 * Returns 1 on success or zero on error. |
|
291 * The context must be destroyed with `FASTCOVER_ctx_destroy()`. |
|
292 */ |
|
293 static int |
|
294 FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, |
|
295 const void* samplesBuffer, |
|
296 const size_t* samplesSizes, unsigned nbSamples, |
|
297 unsigned d, double splitPoint, unsigned f, |
|
298 FASTCOVER_accel_t accelParams) |
|
299 { |
|
300 const BYTE* const samples = (const BYTE*)samplesBuffer; |
|
301 const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); |
|
302 /* Split samples into testing and training sets */ |
|
303 const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; |
|
304 const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; |
|
305 const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; |
|
306 const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; |
|
307 |
|
308 /* Checks */ |
|
309 if (totalSamplesSize < MAX(d, sizeof(U64)) || |
|
310 totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) { |
|
311 DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", |
|
312 (U32)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20)); |
|
313 return 0; |
|
314 } |
|
315 |
|
316 /* Check if there are at least 5 training samples */ |
|
317 if (nbTrainSamples < 5) { |
|
318 DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples); |
|
319 return 0; |
|
320 } |
|
321 |
|
322 /* Check if there's testing sample */ |
|
323 if (nbTestSamples < 1) { |
|
324 DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples); |
|
325 return 0; |
|
326 } |
|
327 |
|
328 /* Zero the context */ |
|
329 memset(ctx, 0, sizeof(*ctx)); |
|
330 DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, |
|
331 (U32)trainingSamplesSize); |
|
332 DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, |
|
333 (U32)testSamplesSize); |
|
334 |
|
335 ctx->samples = samples; |
|
336 ctx->samplesSizes = samplesSizes; |
|
337 ctx->nbSamples = nbSamples; |
|
338 ctx->nbTrainSamples = nbTrainSamples; |
|
339 ctx->nbTestSamples = nbTestSamples; |
|
340 ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; |
|
341 ctx->d = d; |
|
342 ctx->f = f; |
|
343 ctx->accelParams = accelParams; |
|
344 |
|
345 /* The offsets of each file */ |
|
346 ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t)); |
|
347 if (ctx->offsets == NULL) { |
|
348 DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n"); |
|
349 FASTCOVER_ctx_destroy(ctx); |
|
350 return 0; |
|
351 } |
|
352 |
|
353 /* Fill offsets from the samplesSizes */ |
|
354 { U32 i; |
|
355 ctx->offsets[0] = 0; |
|
356 assert(nbSamples >= 5); |
|
357 for (i = 1; i <= nbSamples; ++i) { |
|
358 ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; |
|
359 } |
|
360 } |
|
361 |
|
362 /* Initialize frequency array of size 2^f */ |
|
363 ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32)); |
|
364 if (ctx->freqs == NULL) { |
|
365 DISPLAYLEVEL(1, "Failed to allocate frequency table \n"); |
|
366 FASTCOVER_ctx_destroy(ctx); |
|
367 return 0; |
|
368 } |
|
369 |
|
370 DISPLAYLEVEL(2, "Computing frequencies\n"); |
|
371 FASTCOVER_computeFrequency(ctx->freqs, ctx); |
|
372 |
|
373 return 1; |
|
374 } |
|
375 |
|
376 |
|
377 /** |
|
378 * Given the prepared context build the dictionary. |
|
379 */ |
|
380 static size_t |
|
381 FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, |
|
382 U32* freqs, |
|
383 void* dictBuffer, size_t dictBufferCapacity, |
|
384 ZDICT_cover_params_t parameters, |
|
385 U16* segmentFreqs) |
|
386 { |
|
387 BYTE *const dict = (BYTE *)dictBuffer; |
|
388 size_t tail = dictBufferCapacity; |
|
389 /* Divide the data up into epochs of equal size. |
|
390 * We will select at least one segment from each epoch. |
|
391 */ |
|
392 const U32 epochs = MAX(1, (U32)(dictBufferCapacity / parameters.k)); |
|
393 const U32 epochSize = (U32)(ctx->nbDmers / epochs); |
|
394 size_t epoch; |
|
395 DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs, |
|
396 epochSize); |
|
397 /* Loop through the epochs until there are no more segments or the dictionary |
|
398 * is full. |
|
399 */ |
|
400 for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) { |
|
401 const U32 epochBegin = (U32)(epoch * epochSize); |
|
402 const U32 epochEnd = epochBegin + epochSize; |
|
403 size_t segmentSize; |
|
404 /* Select a segment */ |
|
405 COVER_segment_t segment = FASTCOVER_selectSegment( |
|
406 ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs); |
|
407 |
|
408 /* If the segment covers no dmers, then we are out of content */ |
|
409 if (segment.score == 0) { |
|
410 break; |
|
411 } |
|
412 |
|
413 /* Trim the segment if necessary and if it is too small then we are done */ |
|
414 segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); |
|
415 if (segmentSize < parameters.d) { |
|
416 break; |
|
417 } |
|
418 |
|
419 /* We fill the dictionary from the back to allow the best segments to be |
|
420 * referenced with the smallest offsets. |
|
421 */ |
|
422 tail -= segmentSize; |
|
423 memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); |
|
424 DISPLAYUPDATE( |
|
425 2, "\r%u%% ", |
|
426 (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); |
|
427 } |
|
428 DISPLAYLEVEL(2, "\r%79s\r", ""); |
|
429 return tail; |
|
430 } |
|
431 |
|
432 |
|
433 /** |
|
434 * Parameters for FASTCOVER_tryParameters(). |
|
435 */ |
|
436 typedef struct FASTCOVER_tryParameters_data_s { |
|
437 const FASTCOVER_ctx_t* ctx; |
|
438 COVER_best_t* best; |
|
439 size_t dictBufferCapacity; |
|
440 ZDICT_cover_params_t parameters; |
|
441 } FASTCOVER_tryParameters_data_t; |
|
442 |
|
443 |
|
444 /** |
|
445 * Tries a set of parameters and updates the COVER_best_t with the results. |
|
446 * This function is thread safe if zstd is compiled with multithreaded support. |
|
447 * It takes its parameters as an *OWNING* opaque pointer to support threading. |
|
448 */ |
|
449 static void FASTCOVER_tryParameters(void *opaque) |
|
450 { |
|
451 /* Save parameters as local variables */ |
|
452 FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque; |
|
453 const FASTCOVER_ctx_t *const ctx = data->ctx; |
|
454 const ZDICT_cover_params_t parameters = data->parameters; |
|
455 size_t dictBufferCapacity = data->dictBufferCapacity; |
|
456 size_t totalCompressedSize = ERROR(GENERIC); |
|
457 /* Initialize array to keep track of frequency of dmer within activeSegment */ |
|
458 U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16)); |
|
459 /* Allocate space for hash table, dict, and freqs */ |
|
460 BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); |
|
461 U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); |
|
462 if (!segmentFreqs || !dict || !freqs) { |
|
463 DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); |
|
464 goto _cleanup; |
|
465 } |
|
466 /* Copy the frequencies because we need to modify them */ |
|
467 memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32)); |
|
468 /* Build the dictionary */ |
|
469 { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, |
|
470 parameters, segmentFreqs); |
|
471 const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); |
|
472 dictBufferCapacity = ZDICT_finalizeDictionary( |
|
473 dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, |
|
474 ctx->samples, ctx->samplesSizes, nbFinalizeSamples, parameters.zParams); |
|
475 if (ZDICT_isError(dictBufferCapacity)) { |
|
476 DISPLAYLEVEL(1, "Failed to finalize dictionary\n"); |
|
477 goto _cleanup; |
|
478 } |
|
479 } |
|
480 /* Check total compressed size */ |
|
481 totalCompressedSize = COVER_checkTotalCompressedSize(parameters, ctx->samplesSizes, |
|
482 ctx->samples, ctx->offsets, |
|
483 ctx->nbTrainSamples, ctx->nbSamples, |
|
484 dict, dictBufferCapacity); |
|
485 _cleanup: |
|
486 COVER_best_finish(data->best, totalCompressedSize, parameters, dict, |
|
487 dictBufferCapacity); |
|
488 free(data); |
|
489 free(segmentFreqs); |
|
490 free(dict); |
|
491 free(freqs); |
|
492 } |
|
493 |
|
494 |
|
495 static void |
|
496 FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, |
|
497 ZDICT_cover_params_t* coverParams) |
|
498 { |
|
499 coverParams->k = fastCoverParams.k; |
|
500 coverParams->d = fastCoverParams.d; |
|
501 coverParams->steps = fastCoverParams.steps; |
|
502 coverParams->nbThreads = fastCoverParams.nbThreads; |
|
503 coverParams->splitPoint = fastCoverParams.splitPoint; |
|
504 coverParams->zParams = fastCoverParams.zParams; |
|
505 } |
|
506 |
|
507 |
|
508 static void |
|
509 FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, |
|
510 ZDICT_fastCover_params_t* fastCoverParams, |
|
511 unsigned f, unsigned accel) |
|
512 { |
|
513 fastCoverParams->k = coverParams.k; |
|
514 fastCoverParams->d = coverParams.d; |
|
515 fastCoverParams->steps = coverParams.steps; |
|
516 fastCoverParams->nbThreads = coverParams.nbThreads; |
|
517 fastCoverParams->splitPoint = coverParams.splitPoint; |
|
518 fastCoverParams->f = f; |
|
519 fastCoverParams->accel = accel; |
|
520 fastCoverParams->zParams = coverParams.zParams; |
|
521 } |
|
522 |
|
523 |
|
524 ZDICTLIB_API size_t |
|
525 ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, |
|
526 const void* samplesBuffer, |
|
527 const size_t* samplesSizes, unsigned nbSamples, |
|
528 ZDICT_fastCover_params_t parameters) |
|
529 { |
|
530 BYTE* const dict = (BYTE*)dictBuffer; |
|
531 FASTCOVER_ctx_t ctx; |
|
532 ZDICT_cover_params_t coverParams; |
|
533 FASTCOVER_accel_t accelParams; |
|
534 /* Initialize global data */ |
|
535 g_displayLevel = parameters.zParams.notificationLevel; |
|
536 /* Assign splitPoint and f if not provided */ |
|
537 parameters.splitPoint = 1.0; |
|
538 parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; |
|
539 parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel; |
|
540 /* Convert to cover parameter */ |
|
541 memset(&coverParams, 0 , sizeof(coverParams)); |
|
542 FASTCOVER_convertToCoverParams(parameters, &coverParams); |
|
543 /* Checks */ |
|
544 if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, |
|
545 parameters.accel)) { |
|
546 DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); |
|
547 return ERROR(GENERIC); |
|
548 } |
|
549 if (nbSamples == 0) { |
|
550 DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); |
|
551 return ERROR(GENERIC); |
|
552 } |
|
553 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { |
|
554 DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", |
|
555 ZDICT_DICTSIZE_MIN); |
|
556 return ERROR(dstSize_tooSmall); |
|
557 } |
|
558 /* Assign corresponding FASTCOVER_accel_t to accelParams*/ |
|
559 accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; |
|
560 /* Initialize context */ |
|
561 if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, |
|
562 coverParams.d, parameters.splitPoint, parameters.f, |
|
563 accelParams)) { |
|
564 DISPLAYLEVEL(1, "Failed to initialize context\n"); |
|
565 return ERROR(GENERIC); |
|
566 } |
|
567 /* Build the dictionary */ |
|
568 DISPLAYLEVEL(2, "Building dictionary\n"); |
|
569 { |
|
570 /* Initialize array to keep track of frequency of dmer within activeSegment */ |
|
571 U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16)); |
|
572 const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, |
|
573 dictBufferCapacity, coverParams, segmentFreqs); |
|
574 const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); |
|
575 const size_t dictionarySize = ZDICT_finalizeDictionary( |
|
576 dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, |
|
577 samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); |
|
578 if (!ZSTD_isError(dictionarySize)) { |
|
579 DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", |
|
580 (U32)dictionarySize); |
|
581 } |
|
582 FASTCOVER_ctx_destroy(&ctx); |
|
583 free(segmentFreqs); |
|
584 return dictionarySize; |
|
585 } |
|
586 } |
|
587 |
|
588 |
|
589 ZDICTLIB_API size_t |
|
590 ZDICT_optimizeTrainFromBuffer_fastCover( |
|
591 void* dictBuffer, size_t dictBufferCapacity, |
|
592 const void* samplesBuffer, |
|
593 const size_t* samplesSizes, unsigned nbSamples, |
|
594 ZDICT_fastCover_params_t* parameters) |
|
595 { |
|
596 ZDICT_cover_params_t coverParams; |
|
597 FASTCOVER_accel_t accelParams; |
|
598 /* constants */ |
|
599 const unsigned nbThreads = parameters->nbThreads; |
|
600 const double splitPoint = |
|
601 parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; |
|
602 const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; |
|
603 const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; |
|
604 const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; |
|
605 const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; |
|
606 const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; |
|
607 const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); |
|
608 const unsigned kIterations = |
|
609 (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); |
|
610 const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f; |
|
611 const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel; |
|
612 /* Local variables */ |
|
613 const int displayLevel = parameters->zParams.notificationLevel; |
|
614 unsigned iteration = 1; |
|
615 unsigned d; |
|
616 unsigned k; |
|
617 COVER_best_t best; |
|
618 POOL_ctx *pool = NULL; |
|
619 /* Checks */ |
|
620 if (splitPoint <= 0 || splitPoint > 1) { |
|
621 LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); |
|
622 return ERROR(GENERIC); |
|
623 } |
|
624 if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) { |
|
625 LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n"); |
|
626 return ERROR(GENERIC); |
|
627 } |
|
628 if (kMinK < kMaxD || kMaxK < kMinK) { |
|
629 LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); |
|
630 return ERROR(GENERIC); |
|
631 } |
|
632 if (nbSamples == 0) { |
|
633 LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n"); |
|
634 return ERROR(GENERIC); |
|
635 } |
|
636 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { |
|
637 LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n", |
|
638 ZDICT_DICTSIZE_MIN); |
|
639 return ERROR(dstSize_tooSmall); |
|
640 } |
|
641 if (nbThreads > 1) { |
|
642 pool = POOL_create(nbThreads, 1); |
|
643 if (!pool) { |
|
644 return ERROR(memory_allocation); |
|
645 } |
|
646 } |
|
647 /* Initialization */ |
|
648 COVER_best_init(&best); |
|
649 memset(&coverParams, 0 , sizeof(coverParams)); |
|
650 FASTCOVER_convertToCoverParams(*parameters, &coverParams); |
|
651 accelParams = FASTCOVER_defaultAccelParameters[accel]; |
|
652 /* Turn down global display level to clean up display at level 2 and below */ |
|
653 g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; |
|
654 /* Loop through d first because each new value needs a new context */ |
|
655 LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", |
|
656 kIterations); |
|
657 for (d = kMinD; d <= kMaxD; d += 2) { |
|
658 /* Initialize the context for this value of d */ |
|
659 FASTCOVER_ctx_t ctx; |
|
660 LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); |
|
661 if (!FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams)) { |
|
662 LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); |
|
663 COVER_best_destroy(&best); |
|
664 POOL_free(pool); |
|
665 return ERROR(GENERIC); |
|
666 } |
|
667 /* Loop through k reusing the same context */ |
|
668 for (k = kMinK; k <= kMaxK; k += kStepSize) { |
|
669 /* Prepare the arguments */ |
|
670 FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( |
|
671 sizeof(FASTCOVER_tryParameters_data_t)); |
|
672 LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); |
|
673 if (!data) { |
|
674 LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); |
|
675 COVER_best_destroy(&best); |
|
676 FASTCOVER_ctx_destroy(&ctx); |
|
677 POOL_free(pool); |
|
678 return ERROR(GENERIC); |
|
679 } |
|
680 data->ctx = &ctx; |
|
681 data->best = &best; |
|
682 data->dictBufferCapacity = dictBufferCapacity; |
|
683 data->parameters = coverParams; |
|
684 data->parameters.k = k; |
|
685 data->parameters.d = d; |
|
686 data->parameters.splitPoint = splitPoint; |
|
687 data->parameters.steps = kSteps; |
|
688 data->parameters.zParams.notificationLevel = g_displayLevel; |
|
689 /* Check the parameters */ |
|
690 if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, |
|
691 data->ctx->f, accel)) { |
|
692 DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); |
|
693 free(data); |
|
694 continue; |
|
695 } |
|
696 /* Call the function and pass ownership of data to it */ |
|
697 COVER_best_start(&best); |
|
698 if (pool) { |
|
699 POOL_add(pool, &FASTCOVER_tryParameters, data); |
|
700 } else { |
|
701 FASTCOVER_tryParameters(data); |
|
702 } |
|
703 /* Print status */ |
|
704 LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", |
|
705 (U32)((iteration * 100) / kIterations)); |
|
706 ++iteration; |
|
707 } |
|
708 COVER_best_wait(&best); |
|
709 FASTCOVER_ctx_destroy(&ctx); |
|
710 } |
|
711 LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); |
|
712 /* Fill the output buffer and parameters with output of the best parameters */ |
|
713 { |
|
714 const size_t dictSize = best.dictSize; |
|
715 if (ZSTD_isError(best.compressedSize)) { |
|
716 const size_t compressedSize = best.compressedSize; |
|
717 COVER_best_destroy(&best); |
|
718 POOL_free(pool); |
|
719 return compressedSize; |
|
720 } |
|
721 FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); |
|
722 memcpy(dictBuffer, best.dict, dictSize); |
|
723 COVER_best_destroy(&best); |
|
724 POOL_free(pool); |
|
725 return dictSize; |
|
726 } |
|
727 |
|
728 } |