*
* A little about how it works, and probability theory:
*
- * When given an identifier (which we will denote I), we're essentially
+ * When given an identifier (which we will denote I), we're essentially
* just trying to choose the most likely correction for that identifier.
* (the actual "correction" can very well be the identifier itself).
* There is actually no way to know for sure that certian identifers
* out of all possible corrections that maximizes the probability of C
* for the original identifer I.
*
- * Thankfully there exists some theroies for probalistic interpretations
+ * Thankfully there exists some theroies for probalistic interpretations
* of data. Since we're operating on two distictive intepretations, the
* transposition from I to C. We need something that can express how much
* degree of I should rationally change to become C. this is called the
* AC P(I|C) P(C) / P(I)
*
* However since P(I) is the same for every possibility of I, we can
- * complete ignore it giving just:
+ * completley ignore it giving just:
* AC P(I|C) P(C)
*
* This greatly helps visualize how the parts of the expression are performed
* enumerates all feasible values of C, to determine the one that
* gives the greatest probability score.
*
- * In reality the requirement for a more complex expression involving
+ * In reality the requirement for a more complex expression involving
* two seperate models is considerably a waste. But one must recognize
* that P(C|I) is already conflating two factors. It's just much simpler
* to seperate the two models and deal with them explicitaly. To properly
*
* A little information on additional algorithms used:
*
- * Initially when I implemented this corrector, it was very slow.
+ * Initially when I implemented this corrector, it was very slow.
* Need I remind you this is essentially a brute force attack on strings,
* and since every transformation requires dynamic memory allocations,
* you can easily imagine where most of the runtime conflated. Yes
* shock to me. A forward allocator (or as some call it a bump-point
* allocator, or just a memory pool) was implemented. To combat this.
*
- * But of course even other factors were making it slow. Initially
+ * But of course even other factors were making it slow. Initially
* this used a hashtable. And hashtables have a good constant lookup
* time complexity. But the problem wasn't in the hashtable, it was
* in the hashing (despite having one of the fastest hash functions
*
* Future Work (If we really need it)
*
- * Currently we can only distinguishes one source of error in the
+ * Currently we can only distinguishes one source of error in the
* language model we use. This could become an issue for identifiers
* that have close colliding rates, e.g colate->coat yields collate.
*
- * Currently the error model has been fairly trivial, the smaller the
+ * Currently the error model has been fairly trivial, the smaller the
* edit distance the smaller the error. This usually causes some un-
* expected problems. e.g reciet->recite yields recipt. For QuakeC
* this could become a problem when lots of identifiers are involved.
*
- * Our control mechanisim could use a limit, i.e limit the number of
+ * Our control mechanisim could use a limit, i.e limit the number of
* sets of edits for distance X. This would also increase execution
* speed considerably.
- *
*/
-#define CORRECT_POOLSIZE (128*1024*1024)
+#define CORRECT_POOL_SIZE (128*1024*1024)
/*
* A forward allcator for the corrector. This corrector requires a lot
* of allocations. This forward allocator combats all those allocations
static GMQCC_INLINE void correct_pool_new(void) {
correct_pool_addr = 0;
- correct_pool_this = (unsigned char *)mem_a(CORRECT_POOLSIZE);
+ correct_pool_this = (unsigned char *)mem_a(CORRECT_POOL_SIZE);
vec_push(correct_pool_data, correct_pool_this);
}
static GMQCC_INLINE void *correct_pool_alloc(size_t bytes) {
void *data;
- if (correct_pool_addr + bytes >= CORRECT_POOLSIZE)
+ if (correct_pool_addr + bytes>= CORRECT_POOL_SIZE)
correct_pool_new();
- data = correct_pool_this;
+ data = (void*)correct_pool_this;
correct_pool_this += bytes;
correct_pool_addr += bytes;
-
return data;
}
void* correct_trie_get(const correct_trie_t *t, const char *key) {
const unsigned char *data = (const unsigned char*)key;
+
while (*data) {
- unsigned char ch = *data;
- const size_t vs = vec_size(t->entries);
- size_t i;
const correct_trie_t *entries = t->entries;
+ unsigned char ch = *data;
+ const size_t vs = vec_size(entries);
+ size_t i;
+
for (i = 0; i < vs; ++i) {
if (entries[i].ch == ch) {
t = &entries[i];
void correct_trie_set(correct_trie_t *t, const char *key, void * const value) {
const unsigned char *data = (const unsigned char*)key;
while (*data) {
- const size_t vs = vec_size(t->entries);
- unsigned char ch = *data;
correct_trie_t *entries = t->entries;
+ const size_t vs = vec_size(entries);
+ unsigned char ch = *data;
size_t i;
for (i = 0; i < vs; ++i) {
* Implementation of the corrector algorithm commences. A very efficent
* brute-force attack (thanks to tries and mempool :-)).
*/
-static size_t *correct_find(correct_trie_t *table, const char *word) {
+static GMQCC_INLINE size_t *correct_find(correct_trie_t *table, const char *word) {
return (size_t*)correct_trie_get(table, word);
}
-static int correct_update(correct_trie_t* *table, const char *word) {
+static GMQCC_INLINE bool correct_update(correct_trie_t* *table, const char *word) {
size_t *data = correct_find(*table, word);
if (!data)
- return 0;
+ return false;
(*data)++;
- return 1;
+ return true;
}
void correct_add(correct_trie_t* table, size_t ***size, const char *ident) {
*/
static int correct_exist(char **array, size_t rows, char *ident) {
size_t itr;
- for (itr = 0; itr < rows; itr++)
+ /*
+ * As an experiment I tried the following assembly for memcmp here:
+ *
+ * correct_cmp_loop:
+ * incl %eax ; eax = LHS
+ * incl %edx ; edx = LRS
+ * cmpl %eax, %ebx ; ebx = &LHS[END_POS]
+ *
+ * jbe correct_cmp_eq
+ * movb (%edx), %cl ; micro-optimized on even atoms :-)
+ * cmpb %cl, (%eax) ; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ * jg correct_cmp_gt
+ * jge correct_cmp_loop
+ * ...
+ *
+ * Despite how much optimization went in to this, the speed was the
+ * being conflicted by the strlen(ident) used for &LHS[END_POS]
+ * If we could eliminate the strlen with what I suggested on line
+ * 311 ... we can accelerate this whole damn thing quite a bit.
+ *
+ * However there is still something we can do here that does give
+ * us a little more speed. Although one more branch, we know for
+ * sure there is at least one byte to compare, if that one byte
+ * simply isn't the same we can skip the full check. Which means
+ * we skip a whole strlen call.
+ */
+ for (itr = 0; itr < rows; itr++) {
if (!memcmp(array[itr], ident, strlen(ident)))
return 1;
+ }
return 0;
}
static GMQCC_INLINE char **correct_known_resize(char **res, size_t *allocated, size_t size) {
size_t oldallocated = *allocated;
char **out;
- if (size+1 < *allocated)
+ if (size+1 < oldallocated)
return res;
- *allocated += 32;
- out = correct_pool_alloc(sizeof(*res) * *allocated);
+ out = correct_pool_alloc(sizeof(*res) * oldallocated + 32);
memcpy(out, res, sizeof(*res) * oldallocated);
+
+ *allocated += 32;
return out;
}