| /* fts2 has a design flaw which can lead to database corruption (see |
| ** below). It is recommended not to use it any longer, instead use |
| ** fts3 (or higher). If you believe that your use of fts2 is safe, |
| ** add -DSQLITE_ENABLE_BROKEN_FTS2=1 to your CFLAGS. |
| */ |
| #if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2)) \ |
| && !defined(SQLITE_ENABLE_BROKEN_FTS2) |
| #error fts2 has a design flaw and has been deprecated. |
| #endif |
| /* The flaw is that fts2 uses the content table's unaliased rowid as |
| ** the unique docid. fts2 embeds the rowid in the index it builds, |
| ** and expects the rowid to not change. The SQLite VACUUM operation |
| ** will renumber such rowids, thereby breaking fts2. If you are using |
| ** fts2 in a system which has disabled VACUUM, then you can continue |
| ** to use it safely. Note that PRAGMA auto_vacuum does NOT disable |
| ** VACUUM, though systems using auto_vacuum are unlikely to invoke |
| ** VACUUM. |
| ** |
| ** Unlike fts1, which is safe across VACUUM if you never delete |
| ** documents, fts2 has a second exposure to this flaw, in the segments |
| ** table. So fts2 should be considered unsafe across VACUUM in all |
| ** cases. |
| */ |
| |
| /* |
| ** 2006 Oct 10 |
| ** |
| ** The author disclaims copyright to this source code. In place of |
| ** a legal notice, here is a blessing: |
| ** |
| ** May you do good and not evil. |
| ** May you find forgiveness for yourself and forgive others. |
| ** May you share freely, never taking more than you give. |
| ** |
| ****************************************************************************** |
| ** |
| ** This is an SQLite module implementing full-text search. |
| */ |
| |
| /* |
| ** The code in this file is only compiled if: |
| ** |
| ** * The FTS2 module is being built as an extension |
| ** (in which case SQLITE_CORE is not defined), or |
| ** |
| ** * The FTS2 module is being built into the core of |
| ** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). |
| */ |
| |
| /* TODO(shess) Consider exporting this comment to an HTML file or the |
| ** wiki. |
| */ |
| /* The full-text index is stored in a series of b+tree (-like) |
| ** structures called segments which map terms to doclists. The |
| ** structures are like b+trees in layout, but are constructed from the |
| ** bottom up in optimal fashion and are not updatable. Since trees |
| ** are built from the bottom up, things will be described from the |
| ** bottom up. |
| ** |
| ** |
| **** Varints **** |
| ** The basic unit of encoding is a variable-length integer called a |
| ** varint. We encode variable-length integers in little-endian order |
| ** using seven bits * per byte as follows: |
| ** |
| ** KEY: |
| ** A = 0xxxxxxx 7 bits of data and one flag bit |
| ** B = 1xxxxxxx 7 bits of data and one flag bit |
| ** |
| ** 7 bits - A |
| ** 14 bits - BA |
| ** 21 bits - BBA |
| ** and so on. |
| ** |
| ** This is identical to how sqlite encodes varints (see util.c). |
| ** |
| ** |
| **** Document lists **** |
| ** A doclist (document list) holds a docid-sorted list of hits for a |
| ** given term. Doclists hold docids, and can optionally associate |
| ** token positions and offsets with docids. |
| ** |
| ** A DL_POSITIONS_OFFSETS doclist is stored like this: |
| ** |
| ** array { |
| ** varint docid; |
| ** array { (position list for column 0) |
| ** varint position; (delta from previous position plus POS_BASE) |
| ** varint startOffset; (delta from previous startOffset) |
| ** varint endOffset; (delta from startOffset) |
| ** } |
| ** array { |
| ** varint POS_COLUMN; (marks start of position list for new column) |
| ** varint column; (index of new column) |
| ** array { |
| ** varint position; (delta from previous position plus POS_BASE) |
| ** varint startOffset;(delta from previous startOffset) |
| ** varint endOffset; (delta from startOffset) |
| ** } |
| ** } |
| ** varint POS_END; (marks end of positions for this document. |
| ** } |
| ** |
| ** Here, array { X } means zero or more occurrences of X, adjacent in |
| ** memory. A "position" is an index of a token in the token stream |
| ** generated by the tokenizer, while an "offset" is a byte offset, |
| ** both based at 0. Note that POS_END and POS_COLUMN occur in the |
| ** same logical place as the position element, and act as sentinals |
| ** ending a position list array. |
| ** |
| ** A DL_POSITIONS doclist omits the startOffset and endOffset |
| ** information. A DL_DOCIDS doclist omits both the position and |
| ** offset information, becoming an array of varint-encoded docids. |
| ** |
| ** On-disk data is stored as type DL_DEFAULT, so we don't serialize |
| ** the type. Due to how deletion is implemented in the segmentation |
| ** system, on-disk doclists MUST store at least positions. |
| ** |
| ** |
| **** Segment leaf nodes **** |
| ** Segment leaf nodes store terms and doclists, ordered by term. Leaf |
| ** nodes are written using LeafWriter, and read using LeafReader (to |
| ** iterate through a single leaf node's data) and LeavesReader (to |
| ** iterate through a segment's entire leaf layer). Leaf nodes have |
| ** the format: |
| ** |
| ** varint iHeight; (height from leaf level, always 0) |
| ** varint nTerm; (length of first term) |
| ** char pTerm[nTerm]; (content of first term) |
| ** varint nDoclist; (length of term's associated doclist) |
| ** char pDoclist[nDoclist]; (content of doclist) |
| ** array { |
| ** (further terms are delta-encoded) |
| ** varint nPrefix; (length of prefix shared with previous term) |
| ** varint nSuffix; (length of unshared suffix) |
| ** char pTermSuffix[nSuffix];(unshared suffix of next term) |
| ** varint nDoclist; (length of term's associated doclist) |
| ** char pDoclist[nDoclist]; (content of doclist) |
| ** } |
| ** |
| ** Here, array { X } means zero or more occurrences of X, adjacent in |
| ** memory. |
| ** |
| ** Leaf nodes are broken into blocks which are stored contiguously in |
| ** the %_segments table in sorted order. This means that when the end |
| ** of a node is reached, the next term is in the node with the next |
| ** greater node id. |
| ** |
| ** New data is spilled to a new leaf node when the current node |
| ** exceeds LEAF_MAX bytes (default 2048). New data which itself is |
| ** larger than STANDALONE_MIN (default 1024) is placed in a standalone |
| ** node (a leaf node with a single term and doclist). The goal of |
| ** these settings is to pack together groups of small doclists while |
| ** making it efficient to directly access large doclists. The |
| ** assumption is that large doclists represent terms which are more |
| ** likely to be query targets. |
| ** |
| ** TODO(shess) It may be useful for blocking decisions to be more |
| ** dynamic. For instance, it may make more sense to have a 2.5k leaf |
| ** node rather than splitting into 2k and .5k nodes. My intuition is |
| ** that this might extend through 2x or 4x the pagesize. |
| ** |
| ** |
| **** Segment interior nodes **** |
| ** Segment interior nodes store blockids for subtree nodes and terms |
| ** to describe what data is stored by the each subtree. Interior |
| ** nodes are written using InteriorWriter, and read using |
| ** InteriorReader. InteriorWriters are created as needed when |
| ** SegmentWriter creates new leaf nodes, or when an interior node |
| ** itself grows too big and must be split. The format of interior |
| ** nodes: |
| ** |
| ** varint iHeight; (height from leaf level, always >0) |
| ** varint iBlockid; (block id of node's leftmost subtree) |
| ** optional { |
| ** varint nTerm; (length of first term) |
| ** char pTerm[nTerm]; (content of first term) |
| ** array { |
| ** (further terms are delta-encoded) |
| ** varint nPrefix; (length of shared prefix with previous term) |
| ** varint nSuffix; (length of unshared suffix) |
| ** char pTermSuffix[nSuffix]; (unshared suffix of next term) |
| ** } |
| ** } |
| ** |
| ** Here, optional { X } means an optional element, while array { X } |
| ** means zero or more occurrences of X, adjacent in memory. |
| ** |
| ** An interior node encodes n terms separating n+1 subtrees. The |
| ** subtree blocks are contiguous, so only the first subtree's blockid |
| ** is encoded. The subtree at iBlockid will contain all terms less |
| ** than the first term encoded (or all terms if no term is encoded). |
| ** Otherwise, for terms greater than or equal to pTerm[i] but less |
| ** than pTerm[i+1], the subtree for that term will be rooted at |
| ** iBlockid+i. Interior nodes only store enough term data to |
| ** distinguish adjacent children (if the rightmost term of the left |
| ** child is "something", and the leftmost term of the right child is |
| ** "wicked", only "w" is stored). |
| ** |
| ** New data is spilled to a new interior node at the same height when |
| ** the current node exceeds INTERIOR_MAX bytes (default 2048). |
| ** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing |
| ** interior nodes and making the tree too skinny. The interior nodes |
| ** at a given height are naturally tracked by interior nodes at |
| ** height+1, and so on. |
| ** |
| ** |
| **** Segment directory **** |
| ** The segment directory in table %_segdir stores meta-information for |
| ** merging and deleting segments, and also the root node of the |
| ** segment's tree. |
| ** |
| ** The root node is the top node of the segment's tree after encoding |
| ** the entire segment, restricted to ROOT_MAX bytes (default 1024). |
| ** This could be either a leaf node or an interior node. If the top |
| ** node requires more than ROOT_MAX bytes, it is flushed to %_segments |
| ** and a new root interior node is generated (which should always fit |
| ** within ROOT_MAX because it only needs space for 2 varints, the |
| ** height and the blockid of the previous root). |
| ** |
| ** The meta-information in the segment directory is: |
| ** level - segment level (see below) |
| ** idx - index within level |
| ** - (level,idx uniquely identify a segment) |
| ** start_block - first leaf node |
| ** leaves_end_block - last leaf node |
| ** end_block - last block (including interior nodes) |
| ** root - contents of root node |
| ** |
| ** If the root node is a leaf node, then start_block, |
| ** leaves_end_block, and end_block are all 0. |
| ** |
| ** |
| **** Segment merging **** |
| ** To amortize update costs, segments are groups into levels and |
| ** merged in matches. Each increase in level represents exponentially |
| ** more documents. |
| ** |
| ** New documents (actually, document updates) are tokenized and |
| ** written individually (using LeafWriter) to a level 0 segment, with |
| ** incrementing idx. When idx reaches MERGE_COUNT (default 16), all |
| ** level 0 segments are merged into a single level 1 segment. Level 1 |
| ** is populated like level 0, and eventually MERGE_COUNT level 1 |
| ** segments are merged to a single level 2 segment (representing |
| ** MERGE_COUNT^2 updates), and so on. |
| ** |
| ** A segment merge traverses all segments at a given level in |
| ** parallel, performing a straightforward sorted merge. Since segment |
| ** leaf nodes are written in to the %_segments table in order, this |
| ** merge traverses the underlying sqlite disk structures efficiently. |
| ** After the merge, all segment blocks from the merged level are |
| ** deleted. |
| ** |
| ** MERGE_COUNT controls how often we merge segments. 16 seems to be |
| ** somewhat of a sweet spot for insertion performance. 32 and 64 show |
| ** very similar performance numbers to 16 on insertion, though they're |
| ** a tiny bit slower (perhaps due to more overhead in merge-time |
| ** sorting). 8 is about 20% slower than 16, 4 about 50% slower than |
| ** 16, 2 about 66% slower than 16. |
| ** |
| ** At query time, high MERGE_COUNT increases the number of segments |
| ** which need to be scanned and merged. For instance, with 100k docs |
| ** inserted: |
| ** |
| ** MERGE_COUNT segments |
| ** 16 25 |
| ** 8 12 |
| ** 4 10 |
| ** 2 6 |
| ** |
| ** This appears to have only a moderate impact on queries for very |
| ** frequent terms (which are somewhat dominated by segment merge |
| ** costs), and infrequent and non-existent terms still seem to be fast |
| ** even with many segments. |
| ** |
| ** TODO(shess) That said, it would be nice to have a better query-side |
| ** argument for MERGE_COUNT of 16. Also, it is possible/likely that |
| ** optimizations to things like doclist merging will swing the sweet |
| ** spot around. |
| ** |
| ** |
| ** |
| **** Handling of deletions and updates **** |
| ** Since we're using a segmented structure, with no docid-oriented |
| ** index into the term index, we clearly cannot simply update the term |
| ** index when a document is deleted or updated. For deletions, we |
| ** write an empty doclist (varint(docid) varint(POS_END)), for updates |
| ** we simply write the new doclist. Segment merges overwrite older |
| ** data for a particular docid with newer data, so deletes or updates |
| ** will eventually overtake the earlier data and knock it out. The |
| ** query logic likewise merges doclists so that newer data knocks out |
| ** older data. |
| ** |
| ** TODO(shess) Provide a VACUUM type operation to clear out all |
| ** deletions and duplications. This would basically be a forced merge |
| ** into a single segment. |
| */ |
| |
| #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) |
| |
| #if defined(SQLITE_ENABLE_FTS2) && !defined(SQLITE_CORE) |
| # define SQLITE_CORE 1 |
| #endif |
| |
| #include <assert.h> |
| #include <stdlib.h> |
| #include <stdio.h> |
| #include <string.h> |
| #include "fts2.h" |
| #include "fts2_hash.h" |
| #include "fts2_tokenizer.h" |
| #include "sqlite3.h" |
| #include "sqlite3ext.h" |
| SQLITE_EXTENSION_INIT1 |
| |
| |
| /* TODO(shess) MAN, this thing needs some refactoring. At minimum, it |
| ** would be nice to order the file better, perhaps something along the |
| ** lines of: |
| ** |
| ** - utility functions |
| ** - table setup functions |
| ** - table update functions |
| ** - table query functions |
| ** |
| ** Put the query functions last because they're likely to reference |
| ** typedefs or functions from the table update section. |
| */ |
| |
| #if 0 |
| # define TRACE(A) printf A; fflush(stdout) |
| #else |
| # define TRACE(A) |
| #endif |
| |
| /* It is not safe to call isspace(), tolower(), or isalnum() on |
| ** hi-bit-set characters. This is the same solution used in the |
| ** tokenizer. |
| */ |
| /* TODO(shess) The snippet-generation code should be using the |
| ** tokenizer-generated tokens rather than doing its own local |
| ** tokenization. |
| */ |
| /* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ |
| static int safe_isspace(char c){ |
| return c==' ' || c=='\t' || c=='\n' || c=='\r' || c=='\v' || c=='\f'; |
| } |
| static int safe_tolower(char c){ |
| return (c>='A' && c<='Z') ? (c - 'A' + 'a') : c; |
| } |
| static int safe_isalnum(char c){ |
| return (c>='0' && c<='9') || (c>='A' && c<='Z') || (c>='a' && c<='z'); |
| } |
| |
| typedef enum DocListType { |
| DL_DOCIDS, /* docids only */ |
| DL_POSITIONS, /* docids + positions */ |
| DL_POSITIONS_OFFSETS /* docids + positions + offsets */ |
| } DocListType; |
| |
| /* |
| ** By default, only positions and not offsets are stored in the doclists. |
| ** To change this so that offsets are stored too, compile with |
| ** |
| ** -DDL_DEFAULT=DL_POSITIONS_OFFSETS |
| ** |
| ** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted |
| ** into (no deletes or updates). |
| */ |
| #ifndef DL_DEFAULT |
| # define DL_DEFAULT DL_POSITIONS |
| #endif |
| |
| enum { |
| POS_END = 0, /* end of this position list */ |
| POS_COLUMN, /* followed by new column number */ |
| POS_BASE |
| }; |
| |
| /* MERGE_COUNT controls how often we merge segments (see comment at |
| ** top of file). |
| */ |
| #define MERGE_COUNT 16 |
| |
| /* utility functions */ |
| |
| /* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single |
| ** record to prevent errors of the form: |
| ** |
| ** my_function(SomeType *b){ |
| ** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) |
| ** } |
| */ |
| /* TODO(shess) Obvious candidates for a header file. */ |
| #define CLEAR(b) memset(b, '\0', sizeof(*(b))) |
| |
| #ifndef NDEBUG |
| # define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) |
| #else |
| # define SCRAMBLE(b) |
| #endif |
| |
| /* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ |
| #define VARINT_MAX 10 |
| |
| /* Write a 64-bit variable-length integer to memory starting at p[0]. |
| * The length of data written will be between 1 and VARINT_MAX bytes. |
| * The number of bytes written is returned. */ |
| static int putVarint(char *p, sqlite_int64 v){ |
| unsigned char *q = (unsigned char *) p; |
| sqlite_uint64 vu = v; |
| do{ |
| *q++ = (unsigned char) ((vu & 0x7f) | 0x80); |
| vu >>= 7; |
| }while( vu!=0 ); |
| q[-1] &= 0x7f; /* turn off high bit in final byte */ |
| assert( q - (unsigned char *)p <= VARINT_MAX ); |
| return (int) (q - (unsigned char *)p); |
| } |
| |
| /* Read a 64-bit variable-length integer from memory starting at p[0]. |
| * Return the number of bytes read, or 0 on error. |
| * The value is stored in *v. */ |
| static int getVarint(const char *p, sqlite_int64 *v){ |
| const unsigned char *q = (const unsigned char *) p; |
| sqlite_uint64 x = 0, y = 1; |
| while( (*q & 0x80) == 0x80 ){ |
| x += y * (*q++ & 0x7f); |
| y <<= 7; |
| if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ |
| assert( 0 ); |
| return 0; |
| } |
| } |
| x += y * (*q++); |
| *v = (sqlite_int64) x; |
| return (int) (q - (unsigned char *)p); |
| } |
| |
| static int getVarint32(const char *p, int *pi){ |
| sqlite_int64 i; |
| int ret = getVarint(p, &i); |
| *pi = (int) i; |
| assert( *pi==i ); |
| return ret; |
| } |
| |
| /*******************************************************************/ |
| /* DataBuffer is used to collect data into a buffer in piecemeal |
| ** fashion. It implements the usual distinction between amount of |
| ** data currently stored (nData) and buffer capacity (nCapacity). |
| ** |
| ** dataBufferInit - create a buffer with given initial capacity. |
| ** dataBufferReset - forget buffer's data, retaining capacity. |
| ** dataBufferDestroy - free buffer's data. |
| ** dataBufferSwap - swap contents of two buffers. |
| ** dataBufferExpand - expand capacity without adding data. |
| ** dataBufferAppend - append data. |
| ** dataBufferAppend2 - append two pieces of data at once. |
| ** dataBufferReplace - replace buffer's data. |
| */ |
| typedef struct DataBuffer { |
| char *pData; /* Pointer to malloc'ed buffer. */ |
| int nCapacity; /* Size of pData buffer. */ |
| int nData; /* End of data loaded into pData. */ |
| } DataBuffer; |
| |
| static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ |
| assert( nCapacity>=0 ); |
| pBuffer->nData = 0; |
| pBuffer->nCapacity = nCapacity; |
| pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); |
| } |
| static void dataBufferReset(DataBuffer *pBuffer){ |
| pBuffer->nData = 0; |
| } |
| static void dataBufferDestroy(DataBuffer *pBuffer){ |
| if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); |
| SCRAMBLE(pBuffer); |
| } |
| static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ |
| DataBuffer tmp = *pBuffer1; |
| *pBuffer1 = *pBuffer2; |
| *pBuffer2 = tmp; |
| } |
| static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ |
| assert( nAddCapacity>0 ); |
| /* TODO(shess) Consider expanding more aggressively. Note that the |
| ** underlying malloc implementation may take care of such things for |
| ** us already. |
| */ |
| if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ |
| pBuffer->nCapacity = pBuffer->nData+nAddCapacity; |
| pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); |
| } |
| } |
| static void dataBufferAppend(DataBuffer *pBuffer, |
| const char *pSource, int nSource){ |
| assert( nSource>0 && pSource!=NULL ); |
| dataBufferExpand(pBuffer, nSource); |
| memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); |
| pBuffer->nData += nSource; |
| } |
| static void dataBufferAppend2(DataBuffer *pBuffer, |
| const char *pSource1, int nSource1, |
| const char *pSource2, int nSource2){ |
| assert( nSource1>0 && pSource1!=NULL ); |
| assert( nSource2>0 && pSource2!=NULL ); |
| dataBufferExpand(pBuffer, nSource1+nSource2); |
| memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); |
| memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); |
| pBuffer->nData += nSource1+nSource2; |
| } |
| static void dataBufferReplace(DataBuffer *pBuffer, |
| const char *pSource, int nSource){ |
| dataBufferReset(pBuffer); |
| dataBufferAppend(pBuffer, pSource, nSource); |
| } |
| |
| /* StringBuffer is a null-terminated version of DataBuffer. */ |
| typedef struct StringBuffer { |
| DataBuffer b; /* Includes null terminator. */ |
| } StringBuffer; |
| |
| static void initStringBuffer(StringBuffer *sb){ |
| dataBufferInit(&sb->b, 100); |
| dataBufferReplace(&sb->b, "", 1); |
| } |
| static int stringBufferLength(StringBuffer *sb){ |
| return sb->b.nData-1; |
| } |
| static char *stringBufferData(StringBuffer *sb){ |
| return sb->b.pData; |
| } |
| static void stringBufferDestroy(StringBuffer *sb){ |
| dataBufferDestroy(&sb->b); |
| } |
| |
| static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ |
| assert( sb->b.nData>0 ); |
| if( nFrom>0 ){ |
| sb->b.nData--; |
| dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); |
| } |
| } |
| static void append(StringBuffer *sb, const char *zFrom){ |
| nappend(sb, zFrom, strlen(zFrom)); |
| } |
| |
| /* Append a list of strings separated by commas. */ |
| static void appendList(StringBuffer *sb, int nString, char **azString){ |
| int i; |
| for(i=0; i<nString; ++i){ |
| if( i>0 ) append(sb, ", "); |
| append(sb, azString[i]); |
| } |
| } |
| |
| static int endsInWhiteSpace(StringBuffer *p){ |
| return stringBufferLength(p)>0 && |
| safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); |
| } |
| |
| /* If the StringBuffer ends in something other than white space, add a |
| ** single space character to the end. |
| */ |
| static void appendWhiteSpace(StringBuffer *p){ |
| if( stringBufferLength(p)==0 ) return; |
| if( !endsInWhiteSpace(p) ) append(p, " "); |
| } |
| |
| /* Remove white space from the end of the StringBuffer */ |
| static void trimWhiteSpace(StringBuffer *p){ |
| while( endsInWhiteSpace(p) ){ |
| p->b.pData[--p->b.nData-1] = '\0'; |
| } |
| } |
| |
| /*******************************************************************/ |
| /* DLReader is used to read document elements from a doclist. The |
| ** current docid is cached, so dlrDocid() is fast. DLReader does not |
| ** own the doclist buffer. |
| ** |
| ** dlrAtEnd - true if there's no more data to read. |
| ** dlrDocid - docid of current document. |
| ** dlrDocData - doclist data for current document (including docid). |
| ** dlrDocDataBytes - length of same. |
| ** dlrAllDataBytes - length of all remaining data. |
| ** dlrPosData - position data for current document. |
| ** dlrPosDataLen - length of pos data for current document (incl POS_END). |
| ** dlrStep - step to current document. |
| ** dlrInit - initial for doclist of given type against given data. |
| ** dlrDestroy - clean up. |
| ** |
| ** Expected usage is something like: |
| ** |
| ** DLReader reader; |
| ** dlrInit(&reader, pData, nData); |
| ** while( !dlrAtEnd(&reader) ){ |
| ** // calls to dlrDocid() and kin. |
| ** dlrStep(&reader); |
| ** } |
| ** dlrDestroy(&reader); |
| */ |
| typedef struct DLReader { |
| DocListType iType; |
| const char *pData; |
| int nData; |
| |
| sqlite_int64 iDocid; |
| int nElement; |
| } DLReader; |
| |
| static int dlrAtEnd(DLReader *pReader){ |
| assert( pReader->nData>=0 ); |
| return pReader->nData==0; |
| } |
| static sqlite_int64 dlrDocid(DLReader *pReader){ |
| assert( !dlrAtEnd(pReader) ); |
| return pReader->iDocid; |
| } |
| static const char *dlrDocData(DLReader *pReader){ |
| assert( !dlrAtEnd(pReader) ); |
| return pReader->pData; |
| } |
| static int dlrDocDataBytes(DLReader *pReader){ |
| assert( !dlrAtEnd(pReader) ); |
| return pReader->nElement; |
| } |
| static int dlrAllDataBytes(DLReader *pReader){ |
| assert( !dlrAtEnd(pReader) ); |
| return pReader->nData; |
| } |
| /* TODO(shess) Consider adding a field to track iDocid varint length |
| ** to make these two functions faster. This might matter (a tiny bit) |
| ** for queries. |
| */ |
| static const char *dlrPosData(DLReader *pReader){ |
| sqlite_int64 iDummy; |
| int n = getVarint(pReader->pData, &iDummy); |
| assert( !dlrAtEnd(pReader) ); |
| return pReader->pData+n; |
| } |
| static int dlrPosDataLen(DLReader *pReader){ |
| sqlite_int64 iDummy; |
| int n = getVarint(pReader->pData, &iDummy); |
| assert( !dlrAtEnd(pReader) ); |
| return pReader->nElement-n; |
| } |
| static void dlrStep(DLReader *pReader){ |
| assert( !dlrAtEnd(pReader) ); |
| |
| /* Skip past current doclist element. */ |
| assert( pReader->nElement<=pReader->nData ); |
| pReader->pData += pReader->nElement; |
| pReader->nData -= pReader->nElement; |
| |
| /* If there is more data, read the next doclist element. */ |
| if( pReader->nData!=0 ){ |
| sqlite_int64 iDocidDelta; |
| int iDummy, n = getVarint(pReader->pData, &iDocidDelta); |
| pReader->iDocid += iDocidDelta; |
| if( pReader->iType>=DL_POSITIONS ){ |
| assert( n<pReader->nData ); |
| while( 1 ){ |
| n += getVarint32(pReader->pData+n, &iDummy); |
| assert( n<=pReader->nData ); |
| if( iDummy==POS_END ) break; |
| if( iDummy==POS_COLUMN ){ |
| n += getVarint32(pReader->pData+n, &iDummy); |
| assert( n<pReader->nData ); |
| }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ |
| n += getVarint32(pReader->pData+n, &iDummy); |
| n += getVarint32(pReader->pData+n, &iDummy); |
| assert( n<pReader->nData ); |
| } |
| } |
| } |
| pReader->nElement = n; |
| assert( pReader->nElement<=pReader->nData ); |
| } |
| } |
| static void dlrInit(DLReader *pReader, DocListType iType, |
| const char *pData, int nData){ |
| assert( pData!=NULL && nData!=0 ); |
| pReader->iType = iType; |
| pReader->pData = pData; |
| pReader->nData = nData; |
| pReader->nElement = 0; |
| pReader->iDocid = 0; |
| |
| /* Load the first element's data. There must be a first element. */ |
| dlrStep(pReader); |
| } |
| static void dlrDestroy(DLReader *pReader){ |
| SCRAMBLE(pReader); |
| } |
| |
| #ifndef NDEBUG |
| /* Verify that the doclist can be validly decoded. Also returns the |
| ** last docid found because it is convenient in other assertions for |
| ** DLWriter. |
| */ |
| static void docListValidate(DocListType iType, const char *pData, int nData, |
| sqlite_int64 *pLastDocid){ |
| sqlite_int64 iPrevDocid = 0; |
| assert( nData>0 ); |
| assert( pData!=0 ); |
| assert( pData+nData>pData ); |
| while( nData!=0 ){ |
| sqlite_int64 iDocidDelta; |
| int n = getVarint(pData, &iDocidDelta); |
| iPrevDocid += iDocidDelta; |
| if( iType>DL_DOCIDS ){ |
| int iDummy; |
| while( 1 ){ |
| n += getVarint32(pData+n, &iDummy); |
| if( iDummy==POS_END ) break; |
| if( iDummy==POS_COLUMN ){ |
| n += getVarint32(pData+n, &iDummy); |
| }else if( iType>DL_POSITIONS ){ |
| n += getVarint32(pData+n, &iDummy); |
| n += getVarint32(pData+n, &iDummy); |
| } |
| assert( n<=nData ); |
| } |
| } |
| assert( n<=nData ); |
| pData += n; |
| nData -= n; |
| } |
| if( pLastDocid ) *pLastDocid = iPrevDocid; |
| } |
| #define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) |
| #else |
| #define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) |
| #endif |
| |
| /*******************************************************************/ |
| /* DLWriter is used to write doclist data to a DataBuffer. DLWriter |
| ** always appends to the buffer and does not own it. |
| ** |
| ** dlwInit - initialize to write a given type doclistto a buffer. |
| ** dlwDestroy - clear the writer's memory. Does not free buffer. |
| ** dlwAppend - append raw doclist data to buffer. |
| ** dlwCopy - copy next doclist from reader to writer. |
| ** dlwAdd - construct doclist element and append to buffer. |
| ** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). |
| */ |
| typedef struct DLWriter { |
| DocListType iType; |
| DataBuffer *b; |
| sqlite_int64 iPrevDocid; |
| #ifndef NDEBUG |
| int has_iPrevDocid; |
| #endif |
| } DLWriter; |
| |
| static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ |
| pWriter->b = b; |
| pWriter->iType = iType; |
| pWriter->iPrevDocid = 0; |
| #ifndef NDEBUG |
| pWriter->has_iPrevDocid = 0; |
| #endif |
| } |
| static void dlwDestroy(DLWriter *pWriter){ |
| SCRAMBLE(pWriter); |
| } |
| /* iFirstDocid is the first docid in the doclist in pData. It is |
| ** needed because pData may point within a larger doclist, in which |
| ** case the first item would be delta-encoded. |
| ** |
| ** iLastDocid is the final docid in the doclist in pData. It is |
| ** needed to create the new iPrevDocid for future delta-encoding. The |
| ** code could decode the passed doclist to recreate iLastDocid, but |
| ** the only current user (docListMerge) already has decoded this |
| ** information. |
| */ |
| /* TODO(shess) This has become just a helper for docListMerge. |
| ** Consider a refactor to make this cleaner. |
| */ |
| static void dlwAppend(DLWriter *pWriter, |
| const char *pData, int nData, |
| sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ |
| sqlite_int64 iDocid = 0; |
| char c[VARINT_MAX]; |
| int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ |
| #ifndef NDEBUG |
| sqlite_int64 iLastDocidDelta; |
| #endif |
| |
| /* Recode the initial docid as delta from iPrevDocid. */ |
| nFirstOld = getVarint(pData, &iDocid); |
| assert( nFirstOld<nData || (nFirstOld==nData && pWriter->iType==DL_DOCIDS) ); |
| nFirstNew = putVarint(c, iFirstDocid-pWriter->iPrevDocid); |
| |
| /* Verify that the incoming doclist is valid AND that it ends with |
| ** the expected docid. This is essential because we'll trust this |
| ** docid in future delta-encoding. |
| */ |
| ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); |
| assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); |
| |
| /* Append recoded initial docid and everything else. Rest of docids |
| ** should have been delta-encoded from previous initial docid. |
| */ |
| if( nFirstOld<nData ){ |
| dataBufferAppend2(pWriter->b, c, nFirstNew, |
| pData+nFirstOld, nData-nFirstOld); |
| }else{ |
| dataBufferAppend(pWriter->b, c, nFirstNew); |
| } |
| pWriter->iPrevDocid = iLastDocid; |
| } |
| static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ |
| dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), |
| dlrDocid(pReader), dlrDocid(pReader)); |
| } |
| static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ |
| char c[VARINT_MAX]; |
| int n = putVarint(c, iDocid-pWriter->iPrevDocid); |
| |
| /* Docids must ascend. */ |
| assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); |
| assert( pWriter->iType==DL_DOCIDS ); |
| |
| dataBufferAppend(pWriter->b, c, n); |
| pWriter->iPrevDocid = iDocid; |
| #ifndef NDEBUG |
| pWriter->has_iPrevDocid = 1; |
| #endif |
| } |
| |
| /*******************************************************************/ |
| /* PLReader is used to read data from a document's position list. As |
| ** the caller steps through the list, data is cached so that varints |
| ** only need to be decoded once. |
| ** |
| ** plrInit, plrDestroy - create/destroy a reader. |
| ** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors |
| ** plrAtEnd - at end of stream, only call plrDestroy once true. |
| ** plrStep - step to the next element. |
| */ |
| typedef struct PLReader { |
| /* These refer to the next position's data. nData will reach 0 when |
| ** reading the last position, so plrStep() signals EOF by setting |
| ** pData to NULL. |
| */ |
| const char *pData; |
| int nData; |
| |
| DocListType iType; |
| int iColumn; /* the last column read */ |
| int iPosition; /* the last position read */ |
| int iStartOffset; /* the last start offset read */ |
| int iEndOffset; /* the last end offset read */ |
| } PLReader; |
| |
| static int plrAtEnd(PLReader *pReader){ |
| return pReader->pData==NULL; |
| } |
| static int plrColumn(PLReader *pReader){ |
| assert( !plrAtEnd(pReader) ); |
| return pReader->iColumn; |
| } |
| static int plrPosition(PLReader *pReader){ |
| assert( !plrAtEnd(pReader) ); |
| return pReader->iPosition; |
| } |
| static int plrStartOffset(PLReader *pReader){ |
| assert( !plrAtEnd(pReader) ); |
| return pReader->iStartOffset; |
| } |
| static int plrEndOffset(PLReader *pReader){ |
| assert( !plrAtEnd(pReader) ); |
| return pReader->iEndOffset; |
| } |
| static void plrStep(PLReader *pReader){ |
| int i, n; |
| |
| assert( !plrAtEnd(pReader) ); |
| |
| if( pReader->nData==0 ){ |
| pReader->pData = NULL; |
| return; |
| } |
| |
| n = getVarint32(pReader->pData, &i); |
| if( i==POS_COLUMN ){ |
| n += getVarint32(pReader->pData+n, &pReader->iColumn); |
| pReader->iPosition = 0; |
| pReader->iStartOffset = 0; |
| n += getVarint32(pReader->pData+n, &i); |
| } |
| /* Should never see adjacent column changes. */ |
| assert( i!=POS_COLUMN ); |
| |
| if( i==POS_END ){ |
| pReader->nData = 0; |
| pReader->pData = NULL; |
| return; |
| } |
| |
| pReader->iPosition += i-POS_BASE; |
| if( pReader->iType==DL_POSITIONS_OFFSETS ){ |
| n += getVarint32(pReader->pData+n, &i); |
| pReader->iStartOffset += i; |
| n += getVarint32(pReader->pData+n, &i); |
| pReader->iEndOffset = pReader->iStartOffset+i; |
| } |
| assert( n<=pReader->nData ); |
| pReader->pData += n; |
| pReader->nData -= n; |
| } |
| |
| static void plrInit(PLReader *pReader, DLReader *pDLReader){ |
| pReader->pData = dlrPosData(pDLReader); |
| pReader->nData = dlrPosDataLen(pDLReader); |
| pReader->iType = pDLReader->iType; |
| pReader->iColumn = 0; |
| pReader->iPosition = 0; |
| pReader->iStartOffset = 0; |
| pReader->iEndOffset = 0; |
| plrStep(pReader); |
| } |
| static void plrDestroy(PLReader *pReader){ |
| SCRAMBLE(pReader); |
| } |
| |
| /*******************************************************************/ |
| /* PLWriter is used in constructing a document's position list. As a |
| ** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. |
| ** PLWriter writes to the associated DLWriter's buffer. |
| ** |
| ** plwInit - init for writing a document's poslist. |
| ** plwDestroy - clear a writer. |
| ** plwAdd - append position and offset information. |
| ** plwCopy - copy next position's data from reader to writer. |
| ** plwTerminate - add any necessary doclist terminator. |
| ** |
| ** Calling plwAdd() after plwTerminate() may result in a corrupt |
| ** doclist. |
| */ |
| /* TODO(shess) Until we've written the second item, we can cache the |
| ** first item's information. Then we'd have three states: |
| ** |
| ** - initialized with docid, no positions. |
| ** - docid and one position. |
| ** - docid and multiple positions. |
| ** |
| ** Only the last state needs to actually write to dlw->b, which would |
| ** be an improvement in the DLCollector case. |
| */ |
| typedef struct PLWriter { |
| DLWriter *dlw; |
| |
| int iColumn; /* the last column written */ |
| int iPos; /* the last position written */ |
| int iOffset; /* the last start offset written */ |
| } PLWriter; |
| |
| /* TODO(shess) In the case where the parent is reading these values |
| ** from a PLReader, we could optimize to a copy if that PLReader has |
| ** the same type as pWriter. |
| */ |
| static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, |
| int iStartOffset, int iEndOffset){ |
| /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, |
| ** iStartOffsetDelta, and iEndOffsetDelta. |
| */ |
| char c[5*VARINT_MAX]; |
| int n = 0; |
| |
| /* Ban plwAdd() after plwTerminate(). */ |
| assert( pWriter->iPos!=-1 ); |
| |
| if( pWriter->dlw->iType==DL_DOCIDS ) return; |
| |
| if( iColumn!=pWriter->iColumn ){ |
| n += putVarint(c+n, POS_COLUMN); |
| n += putVarint(c+n, iColumn); |
| pWriter->iColumn = iColumn; |
| pWriter->iPos = 0; |
| pWriter->iOffset = 0; |
| } |
| assert( iPos>=pWriter->iPos ); |
| n += putVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); |
| pWriter->iPos = iPos; |
| if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ |
| assert( iStartOffset>=pWriter->iOffset ); |
| n += putVarint(c+n, iStartOffset-pWriter->iOffset); |
| pWriter->iOffset = iStartOffset; |
| assert( iEndOffset>=iStartOffset ); |
| n += putVarint(c+n, iEndOffset-iStartOffset); |
| } |
| dataBufferAppend(pWriter->dlw->b, c, n); |
| } |
| static void plwCopy(PLWriter *pWriter, PLReader *pReader){ |
| plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), |
| plrStartOffset(pReader), plrEndOffset(pReader)); |
| } |
| static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ |
| char c[VARINT_MAX]; |
| int n; |
| |
| pWriter->dlw = dlw; |
| |
| /* Docids must ascend. */ |
| assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); |
| n = putVarint(c, iDocid-pWriter->dlw->iPrevDocid); |
| dataBufferAppend(pWriter->dlw->b, c, n); |
| pWriter->dlw->iPrevDocid = iDocid; |
| #ifndef NDEBUG |
| pWriter->dlw->has_iPrevDocid = 1; |
| #endif |
| |
| pWriter->iColumn = 0; |
| pWriter->iPos = 0; |
| pWriter->iOffset = 0; |
| } |
| /* TODO(shess) Should plwDestroy() also terminate the doclist? But |
| ** then plwDestroy() would no longer be just a destructor, it would |
| ** also be doing work, which isn't consistent with the overall idiom. |
| ** Another option would be for plwAdd() to always append any necessary |
| ** terminator, so that the output is always correct. But that would |
| ** add incremental work to the common case with the only benefit being |
| ** API elegance. Punt for now. |
| */ |
| static void plwTerminate(PLWriter *pWriter){ |
| if( pWriter->dlw->iType>DL_DOCIDS ){ |
| char c[VARINT_MAX]; |
| int n = putVarint(c, POS_END); |
| dataBufferAppend(pWriter->dlw->b, c, n); |
| } |
| #ifndef NDEBUG |
| /* Mark as terminated for assert in plwAdd(). */ |
| pWriter->iPos = -1; |
| #endif |
| } |
| static void plwDestroy(PLWriter *pWriter){ |
| SCRAMBLE(pWriter); |
| } |
| |
| /*******************************************************************/ |
| /* DLCollector wraps PLWriter and DLWriter to provide a |
| ** dynamically-allocated doclist area to use during tokenization. |
| ** |
| ** dlcNew - malloc up and initialize a collector. |
| ** dlcDelete - destroy a collector and all contained items. |
| ** dlcAddPos - append position and offset information. |
| ** dlcAddDoclist - add the collected doclist to the given buffer. |
| ** dlcNext - terminate the current document and open another. |
| */ |
| typedef struct DLCollector { |
| DataBuffer b; |
| DLWriter dlw; |
| PLWriter plw; |
| } DLCollector; |
| |
| /* TODO(shess) This could also be done by calling plwTerminate() and |
| ** dataBufferAppend(). I tried that, expecting nominal performance |
| ** differences, but it seemed to pretty reliably be worth 1% to code |
| ** it this way. I suspect it is the incremental malloc overhead (some |
| ** percentage of the plwTerminate() calls will cause a realloc), so |
| ** this might be worth revisiting if the DataBuffer implementation |
| ** changes. |
| */ |
| static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ |
| if( pCollector->dlw.iType>DL_DOCIDS ){ |
| char c[VARINT_MAX]; |
| int n = putVarint(c, POS_END); |
| dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); |
| }else{ |
| dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); |
| } |
| } |
| static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ |
| plwTerminate(&pCollector->plw); |
| plwDestroy(&pCollector->plw); |
| plwInit(&pCollector->plw, &pCollector->dlw, iDocid); |
| } |
| static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, |
| int iStartOffset, int iEndOffset){ |
| plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); |
| } |
| |
| static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ |
| DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); |
| dataBufferInit(&pCollector->b, 0); |
| dlwInit(&pCollector->dlw, iType, &pCollector->b); |
| plwInit(&pCollector->plw, &pCollector->dlw, iDocid); |
| return pCollector; |
| } |
| static void dlcDelete(DLCollector *pCollector){ |
| plwDestroy(&pCollector->plw); |
| dlwDestroy(&pCollector->dlw); |
| dataBufferDestroy(&pCollector->b); |
| SCRAMBLE(pCollector); |
| sqlite3_free(pCollector); |
| } |
| |
| |
| /* Copy the doclist data of iType in pData/nData into *out, trimming |
| ** unnecessary data as we go. Only columns matching iColumn are |
| ** copied, all columns copied if iColumn is -1. Elements with no |
| ** matching columns are dropped. The output is an iOutType doclist. |
| */ |
| /* NOTE(shess) This code is only valid after all doclists are merged. |
| ** If this is run before merges, then doclist items which represent |
| ** deletion will be trimmed, and will thus not effect a deletion |
| ** during the merge. |
| */ |
| static void docListTrim(DocListType iType, const char *pData, int nData, |
| int iColumn, DocListType iOutType, DataBuffer *out){ |
| DLReader dlReader; |
| DLWriter dlWriter; |
| |
| assert( iOutType<=iType ); |
| |
| dlrInit(&dlReader, iType, pData, nData); |
| dlwInit(&dlWriter, iOutType, out); |
| |
| while( !dlrAtEnd(&dlReader) ){ |
| PLReader plReader; |
| PLWriter plWriter; |
| int match = 0; |
| |
| plrInit(&plReader, &dlReader); |
| |
| while( !plrAtEnd(&plReader) ){ |
| if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ |
| if( !match ){ |
| plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); |
| match = 1; |
| } |
| plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), |
| plrStartOffset(&plReader), plrEndOffset(&plReader)); |
| } |
| plrStep(&plReader); |
| } |
| if( match ){ |
| plwTerminate(&plWriter); |
| plwDestroy(&plWriter); |
| } |
| |
| plrDestroy(&plReader); |
| dlrStep(&dlReader); |
| } |
| dlwDestroy(&dlWriter); |
| dlrDestroy(&dlReader); |
| } |
| |
| /* Used by docListMerge() to keep doclists in the ascending order by |
| ** docid, then ascending order by age (so the newest comes first). |
| */ |
| typedef struct OrderedDLReader { |
| DLReader *pReader; |
| |
| /* TODO(shess) If we assume that docListMerge pReaders is ordered by |
| ** age (which we do), then we could use pReader comparisons to break |
| ** ties. |
| */ |
| int idx; |
| } OrderedDLReader; |
| |
| /* Order eof to end, then by docid asc, idx desc. */ |
| static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ |
| if( dlrAtEnd(r1->pReader) ){ |
| if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ |
| return 1; /* Only r1 atEnd(). */ |
| } |
| if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ |
| |
| if( dlrDocid(r1->pReader)<dlrDocid(r2->pReader) ) return -1; |
| if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; |
| |
| /* Descending on idx. */ |
| return r2->idx-r1->idx; |
| } |
| |
| /* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that |
| ** p[1..n-1] is already sorted. |
| */ |
| /* TODO(shess) Is this frequent enough to warrant a binary search? |
| ** Before implementing that, instrument the code to check. In most |
| ** current usage, I expect that p[0] will be less than p[1] a very |
| ** high proportion of the time. |
| */ |
| static void orderedDLReaderReorder(OrderedDLReader *p, int n){ |
| while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ |
| OrderedDLReader tmp = p[0]; |
| p[0] = p[1]; |
| p[1] = tmp; |
| n--; |
| p++; |
| } |
| } |
| |
| /* Given an array of doclist readers, merge their doclist elements |
| ** into out in sorted order (by docid), dropping elements from older |
| ** readers when there is a duplicate docid. pReaders is assumed to be |
| ** ordered by age, oldest first. |
| */ |
| /* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably |
| ** be fixed. |
| */ |
| static void docListMerge(DataBuffer *out, |
| DLReader *pReaders, int nReaders){ |
| OrderedDLReader readers[MERGE_COUNT]; |
| DLWriter writer; |
| int i, n; |
| const char *pStart = 0; |
| int nStart = 0; |
| sqlite_int64 iFirstDocid = 0, iLastDocid = 0; |
| |
| assert( nReaders>0 ); |
| if( nReaders==1 ){ |
| dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); |
| return; |
| } |
| |
| assert( nReaders<=MERGE_COUNT ); |
| n = 0; |
| for(i=0; i<nReaders; i++){ |
| assert( pReaders[i].iType==pReaders[0].iType ); |
| readers[i].pReader = pReaders+i; |
| readers[i].idx = i; |
| n += dlrAllDataBytes(&pReaders[i]); |
| } |
| /* Conservatively size output to sum of inputs. Output should end |
| ** up strictly smaller than input. |
| */ |
| dataBufferExpand(out, n); |
| |
| /* Get the readers into sorted order. */ |
| while( i-->0 ){ |
| orderedDLReaderReorder(readers+i, nReaders-i); |
| } |
| |
| dlwInit(&writer, pReaders[0].iType, out); |
| while( !dlrAtEnd(readers[0].pReader) ){ |
| sqlite_int64 iDocid = dlrDocid(readers[0].pReader); |
| |
| /* If this is a continuation of the current buffer to copy, extend |
| ** that buffer. memcpy() seems to be more efficient if it has a |
| ** lots of data to copy. |
| */ |
| if( dlrDocData(readers[0].pReader)==pStart+nStart ){ |
| nStart += dlrDocDataBytes(readers[0].pReader); |
| }else{ |
| if( pStart!=0 ){ |
| dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); |
| } |
| pStart = dlrDocData(readers[0].pReader); |
| nStart = dlrDocDataBytes(readers[0].pReader); |
| iFirstDocid = iDocid; |
| } |
| iLastDocid = iDocid; |
| dlrStep(readers[0].pReader); |
| |
| /* Drop all of the older elements with the same docid. */ |
| for(i=1; i<nReaders && |
| !dlrAtEnd(readers[i].pReader) && |
| dlrDocid(readers[i].pReader)==iDocid; i++){ |
| dlrStep(readers[i].pReader); |
| } |
| |
| /* Get the readers back into order. */ |
| while( i-->0 ){ |
| orderedDLReaderReorder(readers+i, nReaders-i); |
| } |
| } |
| |
| /* Copy over any remaining elements. */ |
| if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); |
| dlwDestroy(&writer); |
| } |
| |
| /* Helper function for posListUnion(). Compares the current position |
| ** between left and right, returning as standard C idiom of <0 if |
| ** left<right, >0 if left>right, and 0 if left==right. "End" always |
| ** compares greater. |
| */ |
| static int posListCmp(PLReader *pLeft, PLReader *pRight){ |
| assert( pLeft->iType==pRight->iType ); |
| if( pLeft->iType==DL_DOCIDS ) return 0; |
| |
| if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; |
| if( plrAtEnd(pRight) ) return -1; |
| |
| if( plrColumn(pLeft)<plrColumn(pRight) ) return -1; |
| if( plrColumn(pLeft)>plrColumn(pRight) ) return 1; |
| |
| if( plrPosition(pLeft)<plrPosition(pRight) ) return -1; |
| if( plrPosition(pLeft)>plrPosition(pRight) ) return 1; |
| if( pLeft->iType==DL_POSITIONS ) return 0; |
| |
| if( plrStartOffset(pLeft)<plrStartOffset(pRight) ) return -1; |
| if( plrStartOffset(pLeft)>plrStartOffset(pRight) ) return 1; |
| |
| if( plrEndOffset(pLeft)<plrEndOffset(pRight) ) return -1; |
| if( plrEndOffset(pLeft)>plrEndOffset(pRight) ) return 1; |
| |
| return 0; |
| } |
| |
| /* Write the union of position lists in pLeft and pRight to pOut. |
| ** "Union" in this case meaning "All unique position tuples". Should |
| ** work with any doclist type, though both inputs and the output |
| ** should be the same type. |
| */ |
| static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ |
| PLReader left, right; |
| PLWriter writer; |
| |
| assert( dlrDocid(pLeft)==dlrDocid(pRight) ); |
| assert( pLeft->iType==pRight->iType ); |
| assert( pLeft->iType==pOut->iType ); |
| |
| plrInit(&left, pLeft); |
| plrInit(&right, pRight); |
| plwInit(&writer, pOut, dlrDocid(pLeft)); |
| |
| while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ |
| int c = posListCmp(&left, &right); |
| if( c<0 ){ |
| plwCopy(&writer, &left); |
| plrStep(&left); |
| }else if( c>0 ){ |
| plwCopy(&writer, &right); |
| plrStep(&right); |
| }else{ |
| plwCopy(&writer, &left); |
| plrStep(&left); |
| plrStep(&right); |
| } |
| } |
| |
| plwTerminate(&writer); |
| plwDestroy(&writer); |
| plrDestroy(&left); |
| plrDestroy(&right); |
| } |
| |
| /* Write the union of doclists in pLeft and pRight to pOut. For |
| ** docids in common between the inputs, the union of the position |
| ** lists is written. Inputs and outputs are always type DL_DEFAULT. |
| */ |
| static void docListUnion( |
| const char *pLeft, int nLeft, |
| const char *pRight, int nRight, |
| DataBuffer *pOut /* Write the combined doclist here */ |
| ){ |
| DLReader left, right; |
| DLWriter writer; |
| |
| if( nLeft==0 ){ |
| if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); |
| return; |
| } |
| if( nRight==0 ){ |
| dataBufferAppend(pOut, pLeft, nLeft); |
| return; |
| } |
| |
| dlrInit(&left, DL_DEFAULT, pLeft, nLeft); |
| dlrInit(&right, DL_DEFAULT, pRight, nRight); |
| dlwInit(&writer, DL_DEFAULT, pOut); |
| |
| while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ |
| if( dlrAtEnd(&right) ){ |
| dlwCopy(&writer, &left); |
| dlrStep(&left); |
| }else if( dlrAtEnd(&left) ){ |
| dlwCopy(&writer, &right); |
| dlrStep(&right); |
| }else if( dlrDocid(&left)<dlrDocid(&right) ){ |
| dlwCopy(&writer, &left); |
| dlrStep(&left); |
| }else if( dlrDocid(&left)>dlrDocid(&right) ){ |
| dlwCopy(&writer, &right); |
| dlrStep(&right); |
| }else{ |
| posListUnion(&left, &right, &writer); |
| dlrStep(&left); |
| dlrStep(&right); |
| } |
| } |
| |
| dlrDestroy(&left); |
| dlrDestroy(&right); |
| dlwDestroy(&writer); |
| } |
| |
| /* pLeft and pRight are DLReaders positioned to the same docid. |
| ** |
| ** If there are no instances in pLeft or pRight where the position |
| ** of pLeft is one less than the position of pRight, then this |
| ** routine adds nothing to pOut. |
| ** |
| ** If there are one or more instances where positions from pLeft |
| ** are exactly one less than positions from pRight, then add a new |
| ** document record to pOut. If pOut wants to hold positions, then |
| ** include the positions from pRight that are one more than a |
| ** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. |
| */ |
| static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight, |
| DLWriter *pOut){ |
| PLReader left, right; |
| PLWriter writer; |
| int match = 0; |
| |
| assert( dlrDocid(pLeft)==dlrDocid(pRight) ); |
| assert( pOut->iType!=DL_POSITIONS_OFFSETS ); |
| |
| plrInit(&left, pLeft); |
| plrInit(&right, pRight); |
| |
| while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ |
| if( plrColumn(&left)<plrColumn(&right) ){ |
| plrStep(&left); |
| }else if( plrColumn(&left)>plrColumn(&right) ){ |
| plrStep(&right); |
| }else if( plrPosition(&left)+1<plrPosition(&right) ){ |
| plrStep(&left); |
| }else if( plrPosition(&left)+1>plrPosition(&right) ){ |
| plrStep(&right); |
| }else{ |
| if( !match ){ |
| plwInit(&writer, pOut, dlrDocid(pLeft)); |
| match = 1; |
| } |
| plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); |
| plrStep(&left); |
| plrStep(&right); |
| } |
| } |
| |
| if( match ){ |
| plwTerminate(&writer); |
| plwDestroy(&writer); |
| } |
| |
| plrDestroy(&left); |
| plrDestroy(&right); |
| } |
| |
| /* We have two doclists with positions: pLeft and pRight. |
| ** Write the phrase intersection of these two doclists into pOut. |
| ** |
| ** A phrase intersection means that two documents only match |
| ** if pLeft.iPos+1==pRight.iPos. |
| ** |
| ** iType controls the type of data written to pOut. If iType is |
| ** DL_POSITIONS, the positions are those from pRight. |
| */ |
| static void docListPhraseMerge( |
| const char *pLeft, int nLeft, |
| const char *pRight, int nRight, |
| DocListType iType, |
| DataBuffer *pOut /* Write the combined doclist here */ |
| ){ |
| DLReader left, right; |
| DLWriter writer; |
| |
| if( nLeft==0 || nRight==0 ) return; |
| |
| assert( iType!=DL_POSITIONS_OFFSETS ); |
| |
| dlrInit(&left, DL_POSITIONS, pLeft, nLeft); |
| dlrInit(&right, DL_POSITIONS, pRight, nRight); |
| dlwInit(&writer, iType, pOut); |
| |
| while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ |
| if( dlrDocid(&left)<dlrDocid(&right) ){ |
| dlrStep(&left); |
| }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
| dlrStep(&right); |
| }else{ |
| posListPhraseMerge(&left, &right, &writer); |
| dlrStep(&left); |
| dlrStep(&right); |
| } |
| } |
| |
| dlrDestroy(&left); |
| dlrDestroy(&right); |
| dlwDestroy(&writer); |
| } |
| |
| /* We have two DL_DOCIDS doclists: pLeft and pRight. |
| ** Write the intersection of these two doclists into pOut as a |
| ** DL_DOCIDS doclist. |
| */ |
| static void docListAndMerge( |
| const char *pLeft, int nLeft, |
| const char *pRight, int nRight, |
| DataBuffer *pOut /* Write the combined doclist here */ |
| ){ |
| DLReader left, right; |
| DLWriter writer; |
| |
| if( nLeft==0 || nRight==0 ) return; |
| |
| dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
| dlrInit(&right, DL_DOCIDS, pRight, nRight); |
| dlwInit(&writer, DL_DOCIDS, pOut); |
| |
| while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ |
| if( dlrDocid(&left)<dlrDocid(&right) ){ |
| dlrStep(&left); |
| }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
| dlrStep(&right); |
| }else{ |
| dlwAdd(&writer, dlrDocid(&left)); |
| dlrStep(&left); |
| dlrStep(&right); |
| } |
| } |
| |
| dlrDestroy(&left); |
| dlrDestroy(&right); |
| dlwDestroy(&writer); |
| } |
| |
| /* We have two DL_DOCIDS doclists: pLeft and pRight. |
| ** Write the union of these two doclists into pOut as a |
| ** DL_DOCIDS doclist. |
| */ |
| static void docListOrMerge( |
| const char *pLeft, int nLeft, |
| const char *pRight, int nRight, |
| DataBuffer *pOut /* Write the combined doclist here */ |
| ){ |
| DLReader left, right; |
| DLWriter writer; |
| |
| if( nLeft==0 ){ |
| if( nRight!=0 ) dataBufferAppend(pOut, pRight, nRight); |
| return; |
| } |
| if( nRight==0 ){ |
| dataBufferAppend(pOut, pLeft, nLeft); |
| return; |
| } |
| |
| dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
| dlrInit(&right, DL_DOCIDS, pRight, nRight); |
| dlwInit(&writer, DL_DOCIDS, pOut); |
| |
| while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ |
| if( dlrAtEnd(&right) ){ |
| dlwAdd(&writer, dlrDocid(&left)); |
| dlrStep(&left); |
| }else if( dlrAtEnd(&left) ){ |
| dlwAdd(&writer, dlrDocid(&right)); |
| dlrStep(&right); |
| }else if( dlrDocid(&left)<dlrDocid(&right) ){ |
| dlwAdd(&writer, dlrDocid(&left)); |
| dlrStep(&left); |
| }else if( dlrDocid(&right)<dlrDocid(&left) ){ |
| dlwAdd(&writer, dlrDocid(&right)); |
| dlrStep(&right); |
| }else{ |
| dlwAdd(&writer, dlrDocid(&left)); |
| dlrStep(&left); |
| dlrStep(&right); |
| } |
| } |
| |
| dlrDestroy(&left); |
| dlrDestroy(&right); |
| dlwDestroy(&writer); |
| } |
| |
| /* We have two DL_DOCIDS doclists: pLeft and pRight. |
| ** Write into pOut as DL_DOCIDS doclist containing all documents that |
| ** occur in pLeft but not in pRight. |
| */ |
| static void docListExceptMerge( |
| const char *pLeft, int nLeft, |
| const char *pRight, int nRight, |
| DataBuffer *pOut /* Write the combined doclist here */ |
| ){ |
| DLReader left, right; |
| DLWriter writer; |
| |
| if( nLeft==0 ) return; |
| if( nRight==0 ){ |
| dataBufferAppend(pOut, pLeft, nLeft); |
| return; |
| } |
| |
| dlrInit(&left, DL_DOCIDS, pLeft, nLeft); |
| dlrInit(&right, DL_DOCIDS, pRight, nRight); |
| dlwInit(&writer, DL_DOCIDS, pOut); |
| |
| while( !dlrAtEnd(&left) ){ |
| while( !dlrAtEnd(&right) && dlrDocid(&right)<dlrDocid(&left) ){ |
| dlrStep(&right); |
| } |
| if( dlrAtEnd(&right) || dlrDocid(&left)<dlrDocid(&right) ){ |
| dlwAdd(&writer, dlrDocid(&left)); |
| } |
| dlrStep(&left); |
| } |
| |
| dlrDestroy(&left); |
| dlrDestroy(&right); |
| dlwDestroy(&writer); |
| } |
| |
| static char *string_dup_n(const char *s, int n){ |
| char *str = sqlite3_malloc(n + 1); |
| memcpy(str, s, n); |
| str[n] = '\0'; |
| return str; |
| } |
| |
| /* Duplicate a string; the caller must free() the returned string. |
| * (We don't use strdup() since it is not part of the standard C library and |
| * may not be available everywhere.) */ |
| static char *string_dup(const char *s){ |
| return string_dup_n(s, strlen(s)); |
| } |
| |
| /* Format a string, replacing each occurrence of the % character with |
| * zDb.zName. This may be more convenient than sqlite_mprintf() |
| * when one string is used repeatedly in a format string. |
| * The caller must free() the returned string. */ |
| static char *string_format(const char *zFormat, |
| const char *zDb, const char *zName){ |
| const char *p; |
| size_t len = 0; |
| size_t nDb = strlen(zDb); |
| size_t nName = strlen(zName); |
| size_t nFullTableName = nDb+1+nName; |
| char *result; |
| char *r; |
| |
| /* first compute length needed */ |
| for(p = zFormat ; *p ; ++p){ |
| len += (*p=='%' ? nFullTableName : 1); |
| } |
| len += 1; /* for null terminator */ |
| |
| r = result = sqlite3_malloc(len); |
| for(p = zFormat; *p; ++p){ |
| if( *p=='%' ){ |
| memcpy(r, zDb, nDb); |
| r += nDb; |
| *r++ = '.'; |
| memcpy(r, zName, nName); |
| r += nName; |
| } else { |
| *r++ = *p; |
| } |
| } |
| *r++ = '\0'; |
| assert( r == result + len ); |
| return result; |
| } |
| |
| static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, |
| const char *zFormat){ |
| char *zCommand = string_format(zFormat, zDb, zName); |
| int rc; |
| TRACE(("FTS2 sql: %s\n", zCommand)); |
| rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); |
| sqlite3_free(zCommand); |
| return rc; |
| } |
| |
| static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, |
| sqlite3_stmt **ppStmt, const char *zFormat){ |
| char *zCommand = string_format(zFormat, zDb, zName); |
| int rc; |
| TRACE(("FTS2 prepare: %s\n", zCommand)); |
| rc = sqlite3_prepare_v2(db, zCommand, -1, ppStmt, NULL); |
| sqlite3_free(zCommand); |
| return rc; |
| } |
| |
| /* end utility functions */ |
| |
| /* Forward reference */ |
| typedef struct fulltext_vtab fulltext_vtab; |
| |
| /* A single term in a query is represented by an instances of |
| ** the following structure. |
| */ |
| typedef struct QueryTerm { |
| short int nPhrase; /* How many following terms are part of the same phrase */ |
| short int iPhrase; /* This is the i-th term of a phrase. */ |
| short int iColumn; /* Column of the index that must match this term */ |
| signed char isOr; /* this term is preceded by "OR" */ |
| signed char isNot; /* this term is preceded by "-" */ |
| signed char isPrefix; /* this term is followed by "*" */ |
| char *pTerm; /* text of the term. '\000' terminated. malloced */ |
| int nTerm; /* Number of bytes in pTerm[] */ |
| } QueryTerm; |
| |
| |
| /* A query string is parsed into a Query structure. |
| * |
| * We could, in theory, allow query strings to be complicated |
| * nested expressions with precedence determined by parentheses. |
| * But none of the major search engines do this. (Perhaps the |
| * feeling is that an parenthesized expression is two complex of |
| * an idea for the average user to grasp.) Taking our lead from |
| * the major search engines, we will allow queries to be a list |
| * of terms (with an implied AND operator) or phrases in double-quotes, |
| * with a single optional "-" before each non-phrase term to designate |
| * negation and an optional OR connector. |
| * |
| * OR binds more tightly than the implied AND, which is what the |
| * major search engines seem to do. So, for example: |
| * |
| * [one two OR three] ==> one AND (two OR three) |
| * [one OR two three] ==> (one OR two) AND three |
| * |
| * A "-" before a term matches all entries that lack that term. |
| * The "-" must occur immediately before the term with in intervening |
| * space. This is how the search engines do it. |
| * |
| * A NOT term cannot be the right-hand operand of an OR. If this |
| * occurs in the query string, the NOT is ignored: |
| * |
| * [one OR -two] ==> one OR two |
| * |
| */ |
| typedef struct Query { |
| fulltext_vtab *pFts; /* The full text index */ |
| int nTerms; /* Number of terms in the query */ |
| QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ |
| int nextIsOr; /* Set the isOr flag on the next inserted term */ |
| int nextColumn; /* Next word parsed must be in this column */ |
| int dfltColumn; /* The default column */ |
| } Query; |
| |
| |
| /* |
| ** An instance of the following structure keeps track of generated |
| ** matching-word offset information and snippets. |
| */ |
| typedef struct Snippet { |
| int nMatch; /* Total number of matches */ |
| int nAlloc; /* Space allocated for aMatch[] */ |
| struct snippetMatch { /* One entry for each matching term */ |
| char snStatus; /* Status flag for use while constructing snippets */ |
| short int iCol; /* The column that contains the match */ |
| short int iTerm; /* The index in Query.pTerms[] of the matching term */ |
| short int nByte; /* Number of bytes in the term */ |
| int iStart; /* The offset to the first character of the term */ |
| } *aMatch; /* Points to space obtained from malloc */ |
| char *zOffset; /* Text rendering of aMatch[] */ |
| int nOffset; /* strlen(zOffset) */ |
| char *zSnippet; /* Snippet text */ |
| int nSnippet; /* strlen(zSnippet) */ |
| } Snippet; |
| |
| |
| typedef enum QueryType { |
| QUERY_GENERIC, /* table scan */ |
| QUERY_ROWID, /* lookup by rowid */ |
| QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ |
| } QueryType; |
| |
| typedef enum fulltext_statement { |
| CONTENT_INSERT_STMT, |
| CONTENT_SELECT_STMT, |
| CONTENT_UPDATE_STMT, |
| CONTENT_DELETE_STMT, |
| CONTENT_EXISTS_STMT, |
| |
| BLOCK_INSERT_STMT, |
| BLOCK_SELECT_STMT, |
| BLOCK_DELETE_STMT, |
| BLOCK_DELETE_ALL_STMT, |
| |
| SEGDIR_MAX_INDEX_STMT, |
| SEGDIR_SET_STMT, |
| SEGDIR_SELECT_LEVEL_STMT, |
| SEGDIR_SPAN_STMT, |
| SEGDIR_DELETE_STMT, |
| SEGDIR_SELECT_SEGMENT_STMT, |
| SEGDIR_SELECT_ALL_STMT, |
| SEGDIR_DELETE_ALL_STMT, |
| SEGDIR_COUNT_STMT, |
| |
| MAX_STMT /* Always at end! */ |
| } fulltext_statement; |
| |
| /* These must exactly match the enum above. */ |
| /* TODO(shess): Is there some risk that a statement will be used in two |
| ** cursors at once, e.g. if a query joins a virtual table to itself? |
| ** If so perhaps we should move some of these to the cursor object. |
| */ |
| static const char *const fulltext_zStatement[MAX_STMT] = { |
| /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ |
| /* CONTENT_SELECT */ "select * from %_content where rowid = ?", |
| /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ |
| /* CONTENT_DELETE */ "delete from %_content where rowid = ?", |
| /* CONTENT_EXISTS */ "select rowid from %_content limit 1", |
| |
| /* BLOCK_INSERT */ "insert into %_segments values (?)", |
| /* BLOCK_SELECT */ "select block from %_segments where rowid = ?", |
| /* BLOCK_DELETE */ "delete from %_segments where rowid between ? and ?", |
| /* BLOCK_DELETE_ALL */ "delete from %_segments", |
| |
| /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", |
| /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", |
| /* SEGDIR_SELECT_LEVEL */ |
| "select start_block, leaves_end_block, root from %_segdir " |
| " where level = ? order by idx", |
| /* SEGDIR_SPAN */ |
| "select min(start_block), max(end_block) from %_segdir " |
| " where level = ? and start_block <> 0", |
| /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", |
| |
| /* NOTE(shess): The first three results of the following two |
| ** statements must match. |
| */ |
| /* SEGDIR_SELECT_SEGMENT */ |
| "select start_block, leaves_end_block, root from %_segdir " |
| " where level = ? and idx = ?", |
| /* SEGDIR_SELECT_ALL */ |
| "select start_block, leaves_end_block, root from %_segdir " |
| " order by level desc, idx asc", |
| /* SEGDIR_DELETE_ALL */ "delete from %_segdir", |
| /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", |
| }; |
| |
| /* |
| ** A connection to a fulltext index is an instance of the following |
| ** structure. The xCreate and xConnect methods create an instance |
| ** of this structure and xDestroy and xDisconnect free that instance. |
| ** All other methods receive a pointer to the structure as one of their |
| ** arguments. |
| */ |
| struct fulltext_vtab { |
| sqlite3_vtab base; /* Base class used by SQLite core */ |
| sqlite3 *db; /* The database connection */ |
| const char *zDb; /* logical database name */ |
| const char *zName; /* virtual table name */ |
| int nColumn; /* number of columns in virtual table */ |
| char **azColumn; /* column names. malloced */ |
| char **azContentColumn; /* column names in content table; malloced */ |
| sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ |
| |
| /* Precompiled statements which we keep as long as the table is |
| ** open. |
| */ |
| sqlite3_stmt *pFulltextStatements[MAX_STMT]; |
| |
| /* Precompiled statements used for segment merges. We run a |
| ** separate select across the leaf level of each tree being merged. |
| */ |
| sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; |
| /* The statement used to prepare pLeafSelectStmts. */ |
| #define LEAF_SELECT \ |
| "select block from %_segments where rowid between ? and ? order by rowid" |
| |
| /* These buffer pending index updates during transactions. |
| ** nPendingData estimates the memory size of the pending data. It |
| ** doesn't include the hash-bucket overhead, nor any malloc |
| ** overhead. When nPendingData exceeds kPendingThreshold, the |
| ** buffer is flushed even before the transaction closes. |
| ** pendingTerms stores the data, and is only valid when nPendingData |
| ** is >=0 (nPendingData<0 means pendingTerms has not been |
| ** initialized). iPrevDocid is the last docid written, used to make |
| ** certain we're inserting in sorted order. |
| */ |
| int nPendingData; |
| #define kPendingThreshold (1*1024*1024) |
| sqlite_int64 iPrevDocid; |
| fts2Hash pendingTerms; |
| }; |
| |
| /* |
| ** When the core wants to do a query, it create a cursor using a |
| ** call to xOpen. This structure is an instance of a cursor. It |
| ** is destroyed by xClose. |
| */ |
| typedef struct fulltext_cursor { |
| sqlite3_vtab_cursor base; /* Base class used by SQLite core */ |
| QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ |
| sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ |
| int eof; /* True if at End Of Results */ |
| Query q; /* Parsed query string */ |
| Snippet snippet; /* Cached snippet for the current row */ |
| int iColumn; /* Column being searched */ |
| DataBuffer result; /* Doclist results from fulltextQuery */ |
| DLReader reader; /* Result reader if result not empty */ |
| } fulltext_cursor; |
| |
| static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ |
| return (fulltext_vtab *) c->base.pVtab; |
| } |
| |
| static const sqlite3_module fts2Module; /* forward declaration */ |
| |
| /* Return a dynamically generated statement of the form |
| * insert into %_content (rowid, ...) values (?, ...) |
| */ |
| static const char *contentInsertStatement(fulltext_vtab *v){ |
| StringBuffer sb; |
| int i; |
| |
| initStringBuffer(&sb); |
| append(&sb, "insert into %_content (rowid, "); |
| appendList(&sb, v->nColumn, v->azContentColumn); |
| append(&sb, ") values (?"); |
| for(i=0; i<v->nColumn; ++i) |
| append(&sb, ", ?"); |
| append(&sb, ")"); |
| return stringBufferData(&sb); |
| } |
| |
| /* Return a dynamically generated statement of the form |
| * update %_content set [col_0] = ?, [col_1] = ?, ... |
| * where rowid = ? |
| */ |
| static const char *contentUpdateStatement(fulltext_vtab *v){ |
| StringBuffer sb; |
| int i; |
| |
| initStringBuffer(&sb); |
| append(&sb, "update %_content set "); |
| for(i=0; i<v->nColumn; ++i) { |
| if( i>0 ){ |
| append(&sb, ", "); |
| } |
| append(&sb, v->azContentColumn[i]); |
| append(&sb, " = ?"); |
| } |
| append(&sb, " where rowid = ?"); |
| return stringBufferData(&sb); |
| } |
| |
| /* Puts a freshly-prepared statement determined by iStmt in *ppStmt. |
| ** If the indicated statement has never been prepared, it is prepared |
| ** and cached, otherwise the cached version is reset. |
| */ |
| static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, |
| sqlite3_stmt **ppStmt){ |
| assert( iStmt<MAX_STMT ); |
| if( v->pFulltextStatements[iStmt]==NULL ){ |
| const char *zStmt; |
| int rc; |
| switch( iStmt ){ |
| case CONTENT_INSERT_STMT: |
| zStmt = contentInsertStatement(v); break; |
| case CONTENT_UPDATE_STMT: |
| zStmt = contentUpdateStatement(v); break; |
| default: |
| zStmt = fulltext_zStatement[iStmt]; |
| } |
| rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], |
| zStmt); |
| if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); |
| if( rc!=SQLITE_OK ) return rc; |
| } else { |
| int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); |
| if( rc!=SQLITE_OK ) return rc; |
| } |
| |
| *ppStmt = v->pFulltextStatements[iStmt]; |
| return SQLITE_OK; |
| } |
| |
| /* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and |
| ** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, |
| ** where we expect no results. |
| */ |
| static int sql_single_step(sqlite3_stmt *s){ |
| int rc = sqlite3_step(s); |
| return (rc==SQLITE_DONE) ? SQLITE_OK : rc; |
| } |
| |
| /* Like sql_get_statement(), but for special replicated LEAF_SELECT |
| ** statements. idx -1 is a special case for an uncached version of |
| ** the statement (used in the optimize implementation). |
| */ |
| /* TODO(shess) Write version for generic statements and then share |
| ** that between the cached-statement functions. |
| */ |
| static int sql_get_leaf_statement(fulltext_vtab *v, int idx, |
| sqlite3_stmt **ppStmt){ |
| assert( idx>=-1 && idx<MERGE_COUNT ); |
| if( idx==-1 ){ |
| return sql_prepare(v->db, v->zDb, v->zName, ppStmt, LEAF_SELECT); |
| }else if( v->pLeafSelectStmts[idx]==NULL ){ |
| int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], |
| LEAF_SELECT); |
| if( rc!=SQLITE_OK ) return rc; |
| }else{ |
| int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); |
| if( rc!=SQLITE_OK ) return rc; |
| } |
| |
| *ppStmt = v->pLeafSelectStmts[idx]; |
| return SQLITE_OK; |
| } |
| |
| /* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ |
| static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, |
| sqlite3_value **pValues){ |
| sqlite3_stmt *s; |
| int i; |
| int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_value(s, 1, rowid); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| for(i=0; i<v->nColumn; ++i){ |
| rc = sqlite3_bind_value(s, 2+i, pValues[i]); |
| if( rc!=SQLITE_OK ) return rc; |
| } |
| |
| return sql_single_step(s); |
| } |
| |
| /* update %_content set col0 = pValues[0], col1 = pValues[1], ... |
| * where rowid = [iRowid] */ |
| static int content_update(fulltext_vtab *v, sqlite3_value **pValues, |
| sqlite_int64 iRowid){ |
| sqlite3_stmt *s; |
| int i; |
| int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| for(i=0; i<v->nColumn; ++i){ |
| rc = sqlite3_bind_value(s, 1+i, pValues[i]); |
| if( rc!=SQLITE_OK ) return rc; |
| } |
| |
| rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| return sql_single_step(s); |
| } |
| |
| static void freeStringArray(int nString, const char **pString){ |
| int i; |
| |
| for (i=0 ; i < nString ; ++i) { |
| if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); |
| } |
| sqlite3_free((void *) pString); |
| } |
| |
| /* select * from %_content where rowid = [iRow] |
| * The caller must delete the returned array and all strings in it. |
| * null fields will be NULL in the returned array. |
| * |
| * TODO: Perhaps we should return pointer/length strings here for consistency |
| * with other code which uses pointer/length. */ |
| static int content_select(fulltext_vtab *v, sqlite_int64 iRow, |
| const char ***pValues){ |
| sqlite3_stmt *s; |
| const char **values; |
| int i; |
| int rc; |
| |
| *pValues = NULL; |
| |
| rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int64(s, 1, iRow); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_step(s); |
| if( rc!=SQLITE_ROW ) return rc; |
| |
| values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); |
| for(i=0; i<v->nColumn; ++i){ |
| if( sqlite3_column_type(s, i)==SQLITE_NULL ){ |
| values[i] = NULL; |
| }else{ |
| values[i] = string_dup((char*)sqlite3_column_text(s, i)); |
| } |
| } |
| |
| /* We expect only one row. We must execute another sqlite3_step() |
| * to complete the iteration; otherwise the table will remain locked. */ |
| rc = sqlite3_step(s); |
| if( rc==SQLITE_DONE ){ |
| *pValues = values; |
| return SQLITE_OK; |
| } |
| |
| freeStringArray(v->nColumn, values); |
| return rc; |
| } |
| |
| /* delete from %_content where rowid = [iRow ] */ |
| static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int64(s, 1, iRow); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| return sql_single_step(s); |
| } |
| |
| /* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if |
| ** no rows exist, and any error in case of failure. |
| */ |
| static int content_exists(fulltext_vtab *v){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_step(s); |
| if( rc!=SQLITE_ROW ) return rc; |
| |
| /* We expect only one row. We must execute another sqlite3_step() |
| * to complete the iteration; otherwise the table will remain locked. */ |
| rc = sqlite3_step(s); |
| if( rc==SQLITE_DONE ) return SQLITE_ROW; |
| if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
| return rc; |
| } |
| |
| /* insert into %_segments values ([pData]) |
| ** returns assigned rowid in *piBlockid |
| */ |
| static int block_insert(fulltext_vtab *v, const char *pData, int nData, |
| sqlite_int64 *piBlockid){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_step(s); |
| if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
| if( rc!=SQLITE_DONE ) return rc; |
| |
| *piBlockid = sqlite3_last_insert_rowid(v->db); |
| return SQLITE_OK; |
| } |
| |
| /* delete from %_segments |
| ** where rowid between [iStartBlockid] and [iEndBlockid] |
| ** |
| ** Deletes the range of blocks, inclusive, used to delete the blocks |
| ** which form a segment. |
| */ |
| static int block_delete(fulltext_vtab *v, |
| sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int64(s, 1, iStartBlockid); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int64(s, 2, iEndBlockid); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| return sql_single_step(s); |
| } |
| |
| /* Returns SQLITE_ROW with *pidx set to the maximum segment idx found |
| ** at iLevel. Returns SQLITE_DONE if there are no segments at |
| ** iLevel. Otherwise returns an error. |
| */ |
| static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int(s, 1, iLevel); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_step(s); |
| /* Should always get at least one row due to how max() works. */ |
| if( rc==SQLITE_DONE ) return SQLITE_DONE; |
| if( rc!=SQLITE_ROW ) return rc; |
| |
| /* NULL means that there were no inputs to max(). */ |
| if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ |
| rc = sqlite3_step(s); |
| if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
| return rc; |
| } |
| |
| *pidx = sqlite3_column_int(s, 0); |
| |
| /* We expect only one row. We must execute another sqlite3_step() |
| * to complete the iteration; otherwise the table will remain locked. */ |
| rc = sqlite3_step(s); |
| if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
| if( rc!=SQLITE_DONE ) return rc; |
| return SQLITE_ROW; |
| } |
| |
| /* insert into %_segdir values ( |
| ** [iLevel], [idx], |
| ** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], |
| ** [pRootData] |
| ** ) |
| */ |
| static int segdir_set(fulltext_vtab *v, int iLevel, int idx, |
| sqlite_int64 iStartBlockid, |
| sqlite_int64 iLeavesEndBlockid, |
| sqlite_int64 iEndBlockid, |
| const char *pRootData, int nRootData){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int(s, 1, iLevel); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int(s, 2, idx); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int64(s, 3, iStartBlockid); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int64(s, 5, iEndBlockid); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| return sql_single_step(s); |
| } |
| |
| /* Queries %_segdir for the block span of the segments in level |
| ** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, |
| ** SQLITE_ROW if there are blocks, else an error. |
| */ |
| static int segdir_span(fulltext_vtab *v, int iLevel, |
| sqlite_int64 *piStartBlockid, |
| sqlite_int64 *piEndBlockid){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int(s, 1, iLevel); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_step(s); |
| if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ |
| if( rc!=SQLITE_ROW ) return rc; |
| |
| /* This happens if all segments at this level are entirely inline. */ |
| if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ |
| /* We expect only one row. We must execute another sqlite3_step() |
| * to complete the iteration; otherwise the table will remain locked. */ |
| int rc2 = sqlite3_step(s); |
| if( rc2==SQLITE_ROW ) return SQLITE_ERROR; |
| return rc2; |
| } |
| |
| *piStartBlockid = sqlite3_column_int64(s, 0); |
| *piEndBlockid = sqlite3_column_int64(s, 1); |
| |
| /* We expect only one row. We must execute another sqlite3_step() |
| * to complete the iteration; otherwise the table will remain locked. */ |
| rc = sqlite3_step(s); |
| if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
| if( rc!=SQLITE_DONE ) return rc; |
| return SQLITE_ROW; |
| } |
| |
| /* Delete the segment blocks and segment directory records for all |
| ** segments at iLevel. |
| */ |
| static int segdir_delete(fulltext_vtab *v, int iLevel){ |
| sqlite3_stmt *s; |
| sqlite_int64 iStartBlockid, iEndBlockid; |
| int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); |
| if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; |
| |
| if( rc==SQLITE_ROW ){ |
| rc = block_delete(v, iStartBlockid, iEndBlockid); |
| if( rc!=SQLITE_OK ) return rc; |
| } |
| |
| /* Delete the segment directory itself. */ |
| rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_bind_int64(s, 1, iLevel); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| return sql_single_step(s); |
| } |
| |
| /* Delete entire fts index, SQLITE_OK on success, relevant error on |
| ** failure. |
| */ |
| static int segdir_delete_all(fulltext_vtab *v){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sql_single_step(s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| return sql_single_step(s); |
| } |
| |
| /* Returns SQLITE_OK with *pnSegments set to the number of entries in |
| ** %_segdir and *piMaxLevel set to the highest level which has a |
| ** segment. Otherwise returns the SQLite error which caused failure. |
| */ |
| static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ |
| sqlite3_stmt *s; |
| int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); |
| if( rc!=SQLITE_OK ) return rc; |
| |
| rc = sqlite3_step(s); |
| /* TODO(shess): This case should not be possible? Should stronger |
| ** measures be taken if it happens? |
| */ |
| if( rc==SQLITE_DONE ){ |
| *pnSegments = 0; |
| *piMaxLevel = 0; |
| return SQLITE_OK; |
| } |
| if( rc!=SQLITE_ROW ) return rc; |
| |
| *pnSegments = sqlite3_column_int(s, 0); |
| *piMaxLevel = sqlite3_column_int(s, 1); |
| |
| /* We expect only one row. We must execute another sqlite3_step() |
| * to complete the iteration; otherwise the table will remain locked. */ |
| rc = sqlite3_step(s); |
| if( rc==SQLITE_DONE ) return SQLITE_OK; |
| if( rc==SQLITE_ROW ) return SQLITE_ERROR; |
| return rc; |
| } |
| |
| /* TODO(shess) clearPendingTerms() is far down the file because |
| ** writeZeroSegment() is far down the file because LeafWriter is far |
| ** down the file. Consider refactoring the code to move the non-vtab |
| ** code above the vtab code so that we don't need this forward |
| ** reference. |
| */ |
| static int clearPendingTerms(fulltext_vtab *v); |
| |
| /* |
| ** Free the memory used to contain a fulltext_vtab structure. |
| */ |
| static void fulltext_vtab_destroy(fulltext_vtab *v){ |
| int iStmt, i; |
| |
| TRACE(("FTS2 Destroy %p\n", v)); |
| for( iStmt=0; iStmt<MAX_STMT; iStmt++ ){ |
| if( v->pFulltextStatements[iStmt]!=NULL ){ |
| sqlite3_finalize(v->pFulltextStatements[iStmt]); |
| v->pFulltextStatements[iStmt] = NULL; |
| } |
| } |
| |
| for( i=0; i<MERGE_COUNT; i++ ){ |
| if( v->pLeafSelectStmts[i]!=NULL ){ |
| sqlite3_finalize(v->pLeafSelectStmts[i]); |
| v->pLeafSelectStmts[i] = NULL; |
| } |
| } |
| |
| if( v->pTokenizer!=NULL ){ |
| v->pTokenizer->pModule->xDestroy(v->pTokenizer); |
| v->pTokenizer = NULL; |
| } |
| |
| clearPendingTerms(v); |
| |
| sqlite3_free(v->azColumn); |
| for(i = 0; i < v->nColumn; ++i) { |
| sqlite3_free(v->azContentColumn[i]); |
| } |
| sqlite3_free(v->azContentColumn); |
| sqlite3_free(v); |
| } |
| |
| /* |
| ** Token types for parsing the arguments to xConnect or xCreate. |
| */ |
| #define TOKEN_EOF 0 /* End of file */ |
| #define TOKEN_SPACE 1 /* Any kind of whitespace */ |
| #define TOKEN_ID 2 /* An identifier */ |
| #define TOKEN_STRING 3 /* A string literal */ |
| #define TOKEN_PUNCT 4 /* A single punctuation character */ |
| |
| /* |
| ** If X is a character that can be used in an identifier then |
| ** IdChar(X) will be true. Otherwise it is false. |
| ** |
| ** For ASCII, any character with the high-order bit set is |
| ** allowed in an identifier. For 7-bit characters, |
| ** sqlite3IsIdChar[X] must be 1. |
| ** |
| ** Ticket #1066. the SQL standard does not allow '$' in the |
| ** middle of identfiers. But many SQL implementations do. |
| ** SQLite will allow '$' in identifiers for compatibility. |
| ** But the feature is undocumented. |
| */ |
| static const char isIdChar[] = { |
| /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ |
| 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ |
| 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ |
| 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ |
| 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ |
| 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ |
| 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ |
| }; |
| #define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) |
| |
| |
| /* |
| ** Return the length of the token that begins at z[0]. |
| ** Store the token type in *tokenType before returning. |
| */ |
| static int getToken(const char *z, int *tokenType){ |
| int i, c; |
| switch( *z ){ |
| case 0: { |
| *tokenType = TOKEN_EOF; |
| return 0; |
| } |
| case ' ': case '\t': case '\n': case '\f': case '\r': { |
| for(i=1; safe_isspace(z[i]); i++){} |
| *tokenType = TOKEN_SPACE; |
| return i; |
| } |
| case '`': |
| case '\'': |
| case '"': { |
| int delim = z[0]; |
| for(i=1; (c=z[i])!=0; i++){ |
| if( c==delim ){ |
| if( z[i+1]==delim ){ |
| i++; |
| }else{ |
| break; |
| } |
| } |
| } |
| *tokenType = TOKEN_STRING; |
| return i + (c!=0); |
| } |
| case '[': { |
| for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} |
| *tokenType = TOKEN_ID; |
| return i; |
| } |
| default: { |
| if( !IdChar(*z) ){ |
| break; |
| } |
| for(i=1; IdChar(z[i]); i++){} |
| *tokenType = TOKEN_ID; |
| return i; |
| } |
| } |
| *tokenType = TOKEN_PUNCT; |
| return 1; |
| } |
| |
| /* |
| ** A token extracted from a string is an instance of the following |
| ** structure. |
| */ |
| typedef struct Token { |
| const char *z; /* Pointer to token text. Not '\000' terminated */ |
| short int n; /* Length of the token text in bytes. */ |
| } Token; |
| |
| /* |
| ** Given a input string (which is really one of the argv[] parameters |
| ** passed into xConnect or xCreate) split the string up into tokens. |
| ** Return an array of pointers to '\000' terminated strings, one string |
| ** for each non-whitespace token. |
| ** |
| ** The returned array is terminated by a single NULL pointer. |
| ** |
| ** Space to hold the returned array is obtained from a single |
| ** malloc and should be freed by passing the return value to free(). |
| ** The individual strings within the token list are all a part of |
| ** the single memory allocation and will all be freed at once. |
| */ |
| static char **tokenizeString(const char *z, int *pnToken){ |
| int nToken = 0; |
| Token *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); |
| int n = 1; |
| int e, i; |
| int totalSize = 0; |
| char **azToken; |
| char *zCopy; |
| while( n>0 ){ |
| n = getToken(z, &e); |
| if( e!=TOKEN_SPACE ){ |
| aToken[nToken].z = z; |
| aToken[nToken].n = n; |
| nToken++; |
| totalSize += n+1; |
| } |
| z += n; |
| } |
| azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); |
| zCopy = (char*)&azToken[nToken]; |
| nToken--; |
| for(i=0; i<nToken; i++){ |
| azToken[i] = zCopy; |
| n = aToken[i].n; |
| memcpy(zCopy, aToken[i].z, n); |
| zCopy[n] = 0; |
| zCopy += n+1; |
| } |
| azToken[nToken] = 0; |
| sqlite3_free(aToken); |
| *pnToken = nToken; |
| return azToken; |
| } |
| |
| /* |
| ** Convert an SQL-style quoted string into a normal string by removing |
| ** the quote characters. The conversion is done in-place. If the |
| ** input does not begin with a quote character, then this routine |
| ** is a no-op. |
| ** |
| ** Examples: |
| ** |
| ** "abc" becomes abc |
| ** 'xyz' becomes xyz |
| ** [pqr] becomes pqr |
| ** `mno` becomes mno |
| */ |
| static void dequoteString(char *z){ |
| int quote; |
| int i, j; |
| if( z==0 ) return; |
| quote = z[0]; |
| switch( quote ){ |
| case '\'': break; |
| case '"': break; |
| case '`': break; /* For MySQL compatibility */ |
| case '[': quote = ']'; break; /* For MS SqlServer compatibility */ |
| default: return; |
| } |
| for(i=1, j=0; z[i]; i++){ |
| if( z[i]==quote ){ |
| if( z[i+1]==quote ){ |
| z[j++] = quote; |
| i++; |
| }else{ |
| z[j++] = 0; |
| break; |
| } |
| }else{ |
| z[j++] = z[i]; |
| } |
| } |
| } |
| |
| /* |
| ** The input azIn is a NULL-terminated list of tokens. Remove the first |
| ** token and all punctuation tokens. Remove the quotes from |
| ** around string literal tokens. |
| ** |
| ** Example: |
| ** |
| ** input: tokenize chinese ( 'simplifed' , 'mixed' ) |
| ** output: chinese simplifed mixed |
| ** |
| ** Another example: |
| ** |
| ** input: delimiters ( '[' , ']' , '...' ) |
| ** output: [ ] ... |
| */ |
| static void tokenListToIdList(char **azIn){ |
| int i, j; |
| if( azIn ){ |
| for(i=0, j=-1; azIn[i]; i++){ |
| if( safe_isalnum(azIn[i][0]) || azIn[i][1] ){ |
| dequoteString(azIn[i]); |
| if( j>=0 ){ |
| azIn[j] = azIn[i]; |
| } |
| j++; |
| } |
| } |
| azIn[j] = 0; |
| } |
| } |
| |
| |
| /* |
| ** Find the first alphanumeric token in the string zIn. Null-terminate |
| ** this token. Remove any quotation marks. And return a pointer to |
| ** the result. |
| */ |
| static char *firstToken(char *zIn, char **pzTail){ |
| int n, ttype; |
| while(1){ |
| n = getToken(zIn, &ttype); |
| if( ttype==TOKEN_SPACE ){ |
| zIn += n; |
| }else if( ttype==TOKEN_EOF ){ |
| *pzTail = zIn; |
| return 0; |
| }else{ |
| zIn[n] = 0; |
| *pzTail = &zIn[1]; |
| dequoteString(zIn); |
| return zIn; |
| } |
| } |
| /*NOTREACHED*/ |
| } |
| |
| /* Return true if... |
| ** |
| ** * s begins with the string t, ignoring case |
| ** * s is longer than t |
| ** * The first character of s beyond t is not a alphanumeric |
| ** |
| ** Ignore leading space in *s. |
| ** |
| ** To put it another way, return true if the first token of |
| ** s[] is t[]. |
| */ |
| static int startsWith(const char *s, const char *t){ |
| while( safe_isspace(*s) ){ s++; } |
| while( *t ){ |
| if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; |
| } |
| return *s!='_' && !safe_isalnum(*s); |
| } |
| |
| /* |
| ** An instance of this structure defines the "spec" of a |
| ** full text index. This structure is populated by parseSpec |
| ** and use by fulltextConnect and fulltextCreate. |
| */ |
| typedef struct TableSpec { |
| const char *zDb; /* Logical database name */ |
| const char *zName; /* Name of the full-text index */ |
| int nColumn; /* Number of columns to be indexed */ |
| char **azColumn; /* Original names of columns to be indexed */ |
| char **azContentColumn; /* Column names for %_content */ |
| char **azTokenizer; /* Name of tokenizer and its arguments */ |
| } TableSpec; |
| |
| /* |
| ** Reclaim all of the memory used by a TableSpec |
| */ |
| static void clearTableSpec(TableSpec *p) { |
| sqlite3_free(p->azColumn); |
| sqlite3_free(p->azContentColumn); |
| sqlite3_free(p->azTokenizer); |
| } |
| |
| /* Parse a CREATE VIRTUAL TABLE statement, which looks like this: |
| * |
| * CREATE VIRTUAL TABLE email |
| * USING fts2(subject, body, tokenize mytokenizer(myarg)) |
| * |
| * We return parsed information in a TableSpec structure. |
| * |
| */ |
| static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, |
| char**pzErr){ |
| int i, n; |
| char *z, *zDummy; |
| char **azArg; |
| const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ |
| |
| assert( argc>=3 ); |
| /* Current interface: |
| ** argv[0] - module name |
| ** argv[1] - database name |
| ** argv[2] - table name |
| ** argv[3..] - columns, optionally followed by tokenizer specification |
| ** and snippet delimiters specification. |
| */ |
| |
| /* Make a copy of the complete argv[][] array in a single allocation. |
| ** The argv[][] array is read-only and transient. We can write to the |
| ** copy in order to modify things and the copy is persistent. |
| */ |
| CLEAR(pSpec); |
| for(i=n=0; i<argc; i++){ |
| n += strlen(argv[i]) + 1; |
| } |
| azArg = sqlite3_malloc( sizeof(char*)*argc + n ); |
| if( azArg==0 ){ |
| return SQLITE_NOMEM; |
| } |
| z = (char*)&azArg[argc]; |
| for(i=0; i<argc; i++){ |
| azArg[i] = z; |
| strcpy(z, argv[i]); |
| z += strlen(z)+1; |
| } |
| |
| /* Identify the column names and the tokenizer and delimiter arguments |
| ** in the argv[][] array. |
| */ |
| pSpec->zDb = azArg[1]; |
| pSpec->zName = azArg[2]; |
| pSpec->nColumn = 0; |
| pSpec->azColumn = azArg; |
| zTokenizer = "tokenize simple"; |
| for(i=3; i<argc; ++i){ |
| if( startsWith(azArg[i],"tokenize") ){ |
| zTokenizer = azArg[i]; |
| }else{ |
| z = azArg[pSpec->nColumn] = firstToken(azArg[i], &zDummy); |
| pSpec->nColumn++; |
| } |
| } |
| if( pSpec->nColumn==0 ){ |
| azArg[0] = "content"; |
| pSpec->nColumn = 1; |
| } |
| |
| /* |
| ** Construct the list of content column names. |
| ** |
| ** Each content column name will be of the form cNNAAAA |
| ** where NN is the column number and AAAA is the sanitized |
| ** column name. "sanitized" means that special characters are |
| ** converted to "_". The cNN prefix guarantees that all column |
| ** names are unique. |
| ** |
| ** The AAAA suffix is not strictly necessary. It is included |
| ** for the convenience of people who might examine the generated |
| ** %_content table and wonder what the columns are used for. |
| */ |
| pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); |
| if( pSpec->azContentColumn==0 ){ |
| clearTableSpec(pSpec); |
| return SQLITE_NOMEM; |
| } |
| for(i=0; i<pSpec->nColumn; i++){ |
| char *p; |
| pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); |
| for (p = pSpec->azContentColumn[i]; *p ; ++p) { |
| if( !safe_isalnum(*p) ) *p = '_'; |
| } |
| } |
| |
| /* |
| ** Parse the tokenizer specification string. |
| */ |
|