NCBI C++ ToolKit
SeqTable_sparse_index.cpp
Go to the documentation of this file.

Go to the SVN repository for this file.

1 /* $Id: SeqTable_sparse_index.cpp 99064 2023-02-08 19:14:27Z ucko $
2  * ===========================================================================
3  *
4  * PUBLIC DOMAIN NOTICE
5  * National Center for Biotechnology Information
6  *
7  * This software/database is a "United States Government Work" under the
8  * terms of the United States Copyright Act. It was written as part of
9  * the author's official duties as a United States Government employee and
10  * thus cannot be copyrighted. This software/database is freely available
11  * to the public for use. The National Library of Medicine and the U.S.
12  * Government have not placed any restriction on its use or reproduction.
13  *
14  * Although all reasonable efforts have been taken to ensure the accuracy
15  * and reliability of the software and data, the NLM and the U.S.
16  * Government do not and cannot warrant the performance or results that
17  * may be obtained by using this software or data. The NLM and the U.S.
18  * Government disclaim all warranties, express or implied, including
19  * warranties of performance, merchantability or fitness for any particular
20  * purpose.
21  *
22  * Please cite the author in any work or product based on this material.
23  *
24  * ===========================================================================
25  *
26  * Author: Eugene Vasilchenko
27  *
28  * File Description:
29  * Support for SeqTable-sparse-index ASN.1 type
30  * API to work with different representations of sparse index
31  *
32  * Remark:
33  * This code was originally generated by application DATATOOL
34  * using the following specifications:
35  * 'seqtable.asn'.
36  */
37 
38 // standard includes
39 #include <ncbi_pch.hpp>
40 
41 // generated includes
43 
45 #include <util/bitset/bmfunc.h>
46 #include <util/bitset/bmserial.h>
47 
49 
50 // generated classes
51 
53 
54 BEGIN_objects_SCOPE // namespace ncbi::objects::
55 
56 // destructor
58 {
59 }
60 
61 
62 #ifdef NCBI_COMPILER_GCC
65 #endif
66 
67 DEFINE_STATIC_MUTEX(sx_PrepareMutex_sparse_index);
68 
69 
70 static inline size_t sx_CalcByteBitCount(Uint4 word)
71 {
72  return bm::word_bitcount(word);
73 }
74 
75 
76 static inline size_t sx_CalcByteBitCount(Uint1 byte)
77 {
79 }
80 
81 
82 static inline size_t sx_CalcBlockBitCount(const char* block, size_t size)
83 {
84  const bm::word_t* word_block = reinterpret_cast<const bm::word_t*>(block);
85  const bm::word_t* word_block_end = word_block + size/sizeof(bm::word_t);
86  size_t ret = bm::bit_count_min_unroll(word_block, word_block_end);
87 
88  const char* tail_ptr = reinterpret_cast<const char*>(word_block_end);
89  const char* block_end = block + size;
90  for ( ; tail_ptr != block_end; ++tail_ptr ) {
91  ret += sx_CalcByteBitCount(Uint1(*tail_ptr));
92  }
93 
94  return ret;
95 }
96 
97 
98 // return number of the first set bit in a byte
99 // bit 0x80 - 0
100 // bit 0x40 - 1
101 // ...
102 // bit 0x01 - 7
103 // if there are no set bits return kInvalidRow
104 static inline size_t sx_FindFirstNonZeroBit(Uint1 b)
105 {
106  for ( size_t i = 0; i < 8; ++i, b <<= 1 ) {
107  if ( b&0x80 ) {
108  return i;
109  }
110  }
112 }
113 
114 
115 // return number of the next set bit in a byte starting with bit prev_i
116 // bit 0x80 - 0
117 // bit 0x40 - 1
118 // ...
119 // bit 0x01 - 7
120 // if there are no more set bits return kInvalidRow
121 static inline size_t sx_FindNextNonZeroBit(Uint1 b, size_t prev_i)
122 {
123  b <<= prev_i+1;
124  for ( size_t i = prev_i+1; i < 8; ++i, b <<= 1 ) {
125  if ( b&0x80 ) {
126  return i;
127  }
128  }
130 }
131 
132 
133 // return number of the first non-zero byte in a range
134 // if there are no non-zero bytes return kInvalidRow
135 static inline size_t sx_FindFirstNonZeroByte(const char* beg,
136  const char* end)
137 {
138  typedef Uint8 TBig; // faster scan using bigger type than char
139  const char* ptr = beg;
140  // align to bigger type
141  for ( ; ptr != end && reinterpret_cast<size_t>(ptr)%sizeof(TBig); ++ptr ) {
142  if ( *ptr ) {
143  return ptr - beg;
144  }
145  }
146  // scan using bigger type
147  for ( ; ptr+sizeof(TBig) <= end; ptr += sizeof(TBig) ) {
148  if ( *reinterpret_cast<const TBig*>(ptr) != 0 ) {
149  break;
150  }
151  }
152  // scan for exact char position
153  for ( ; ptr != end; ++ptr ) {
154  if ( *ptr ) {
155  return ptr - beg;
156  }
157  }
159 }
160 
161 
162 // return number of the first non-zero byte in a vector, starting with index
163 // if there are no more non-zero bytes return kInvalidRow
164 static inline size_t sx_FindFirstNonZeroByte(const vector<char>& bytes,
165  size_t index)
166 {
167  size_t size = bytes.size();
168  const char* ptr = &bytes[0];
169  size_t offset = sx_FindFirstNonZeroByte(ptr+index, ptr+size);
172  }
173  return index + offset;
174 }
175 
176 
178 
179 
180 size_t CSeqTable_sparse_index::x_GetBitSetCache(size_t byte_count) const
181 {
182  const TBit_set& bytes = GetBit_set();
183  size_t size = bytes.size();
184  CMutexGuard guard(sx_PrepareMutex_sparse_index);
185  if ( !m_Cache ) {
186  m_Cache.Reset(new SBitsInfo);
187  }
188  SBitsInfo& info= dynamic_cast<SBitsInfo&>(*m_Cache);
189  static const size_t kBlockSize = SBitsInfo::kBlockSize;
190  size_t block_index = byte_count / kBlockSize;
191  size_t block_offset = byte_count % kBlockSize;
192  for ( ; block_index > info.m_BlocksFilled; ) {
193  if ( !info.m_Blocks ) {
194  size_t block_count = size / kBlockSize;
195  info.m_Blocks.reset(new size_t[block_count]);
196  }
197  size_t next_index = info.m_BlocksFilled;
198  size_t count = sx_CalcBlockBitCount(&bytes[next_index*kBlockSize],
199  kBlockSize);
200  if ( next_index > 0 ) {
201  count += info.m_Blocks[next_index-1];
202  }
203  info.m_Blocks[next_index] = count;
204  info.m_BlocksFilled = next_index+1;
205  }
206  size_t ret = block_index? info.m_Blocks[block_index-1]: 0;
207  if ( block_offset ) {
208  if ( block_index != info.m_CacheBlockIndex ) {
209  if ( !info.m_CacheBlockInfo ) {
210  info.m_CacheBlockInfo.reset(new size_t[kBlockSize]);
211  }
212  size_t count = 0;
213  size_t block_pos = block_index*kBlockSize;
214  size_t block_size = min(kBlockSize, size-block_pos);
215  const char* block = &bytes[block_pos];
216  for ( size_t i = 0; i < block_size; ++i ) {
217  count += sx_CalcByteBitCount(Uint1(block[i]));
218  info.m_CacheBlockInfo[i] = count;
219  }
220  info.m_CacheBlockIndex = block_index;
221  }
222  ret += info.m_CacheBlockInfo[block_offset-1];
223  }
224  return ret;
225 }
226 
227 
228 /////////////////////////////////////////////////////////////////////////////
229 // CIndexDeltaSumCache
230 /////////////////////////////////////////////////////////////////////////////
231 
232 //#define USE_DELTA_CACHE
233 const size_t CIndexDeltaSumCache::kBlockSize = 128;
236 
237 
239  : m_Blocks(new TValue[(size+kBlockSize-1)/kBlockSize]),
240  m_BlocksFilled(0),
241 #ifdef USE_DELTA_CACHE
242  m_CacheBlockInfo(new TValue[kBlockSize]),
243 #endif
244  m_CacheBlockIndex(size_t(0)-1)
245 {
246 }
247 
248 
250 {
251 }
252 
253 
254 inline
257  size_t block_index,
258  size_t block_offset)
259 {
260 #ifdef USE_DELTA_CACHE
261  _ASSERT(block_index <= m_BlocksFilled);
262  if ( block_index != m_CacheBlockIndex ) {
263  size_t size = deltas.size();
264  size_t block_pos = block_index*kBlockSize;
265  _ASSERT(block_pos < size);
266  size_t block_size = min(kBlockSize, size-block_pos);
267  _ASSERT(block_offset < block_size);
268  TValue sum = block_index == 0? 0: m_Blocks[block_index-1];
269  const TDeltas::value_type* block = &deltas[block_pos];
270  for ( size_t i = 0; i < block_size; ++i ) {
271  sum += block[i];
272  m_CacheBlockInfo[i] = sum;
273  }
274  m_CacheBlockIndex = block_index;
275  if ( block_index == m_BlocksFilled ) {
276  m_Blocks[block_index] = sum;
277  m_BlocksFilled = block_index+1;
278  }
279  }
280  return m_CacheBlockInfo[block_offset];
281 #else
282  size_t size = deltas.size();
283  size_t block_pos = block_index*kBlockSize;
284  _ASSERT(block_pos < size);
285  size_t block_size = min(kBlockSize, size-block_pos);
286  _ASSERT(block_offset < block_size);
287  TValue sum = block_index == 0? 0: m_Blocks[block_index-1];
288  const TDeltas::value_type* block = &deltas[block_pos];
289  for ( size_t i = 0; i <= block_offset; ++i ) {
290  sum += block[i];
291  }
292  if ( block_index == m_BlocksFilled ) {
293  TValue sum2 = sum;
294  for ( size_t i = block_offset+1; i < block_size; ++i ) {
295  sum2 += block[i];
296  }
297  m_Blocks[block_index] = sum2;
298  m_BlocksFilled = block_index+1;
299  }
300  return sum;
301 #endif
302 }
303 
304 
305 inline
307  size_t block_index,
308  TValue find_sum)
309 {
310  _ASSERT(block_index<=m_BlocksFilled);
311  _ASSERT(block_index==m_BlocksFilled || find_sum<=m_Blocks[block_index]);
312  _ASSERT(block_index==0 || find_sum>m_Blocks[block_index-1]);
313  size_t size = deltas.size();
314  size_t block_pos = block_index*kBlockSize;
315  _ASSERT(block_pos < size);
316  size_t block_size = min(kBlockSize, size-block_pos);
317 #ifdef USE_DELTA_CACHE
318  if ( block_index < m_BlocksFilled && find_sum > m_Blocks[block_index] ) {
319  return kBlockTooLow;
320  }
321  x_GetDeltaSum2(deltas, block_index, 0);
322  _ASSERT(block_index < m_BlocksFilled);
323  if ( find_sum > m_Blocks[block_index] ) {
324  return kBlockTooLow;
325  }
326  _ASSERT(find_sum <= m_Blocks[block_index]);
327  size_t value_index = lower_bound(&m_CacheBlockInfo[0],
328  &m_CacheBlockInfo[block_size],
329  find_sum) - &m_CacheBlockInfo[0];
330  _ASSERT(value_index < block_size);
331  _ASSERT(find_sum <= m_CacheBlockInfo[value_index]);
332  _ASSERT(value_index==0 || find_sum > m_CacheBlockInfo[value_index-1]);
333  if ( find_sum != m_CacheBlockInfo[value_index] ) {
334  return kInvalidRow;
335  }
336  return block_pos + value_index;
337 #else
338  TValue sum = block_index == 0? 0: m_Blocks[block_index-1];
339  const TDeltas::value_type* block = &deltas[block_pos];
340  for ( size_t i = 0; i < block_size; ++i ) {
341  sum += block[i];
342  if ( sum >= find_sum ) {
343  if ( block_index == m_BlocksFilled ) {
344  TValue sum2 = sum;
345  for ( size_t j = i+1; j < block_size; ++j ) {
346  sum2 += block[j];
347  }
348  m_Blocks[block_index] = sum2;
349  m_BlocksFilled = block_index+1;
350  }
351  return sum > find_sum? kInvalidRow: block_pos+i;
352  }
353  }
354  if ( block_index == m_BlocksFilled ) {
355  m_Blocks[block_index] = sum;
356  m_BlocksFilled = block_index+1;
357  }
358  return kBlockTooLow;
359 #endif
360 }
361 
362 
365  size_t index)
366 {
367  _ASSERT(index < deltas.size());
368  size_t block_index = index / kBlockSize;
369  size_t block_offset = index % kBlockSize;
370  while ( block_index >= m_BlocksFilled ) {
371  x_GetDeltaSum2(deltas, m_BlocksFilled, 0);
372  }
373  return x_GetDeltaSum2(deltas, block_index, block_offset);
374 }
375 
376 
378  TValue find_sum)
379 {
380  size_t size = deltas.size();
381  size_t block_index;
382  if ( m_BlocksFilled > 0 && find_sum <= m_Blocks[m_BlocksFilled-1] ) {
383  // sum is within already preprocessed block, locate the block
384  block_index = lower_bound(&m_Blocks[0],
386  find_sum) - &m_Blocks[0];
387  size_t row = x_FindDeltaSum2(deltas, block_index, find_sum);
389  return row;
390  }
391  else {
392  // preprocess blocks sequentially until row will be within block range
393  for ( ;; ) {
394  block_index = m_BlocksFilled;
395  if ( block_index*kBlockSize >= size ) {
396  // last block is processed but the required sum is still bigger
397  return kInvalidRow;
398  }
399  size_t row = x_FindDeltaSum2(deltas, block_index, find_sum);
400  if ( row != kBlockTooLow ) {
401  return row;
402  }
403  }
404  }
405 }
406 
407 /////////////////////////////////////////////////////////////////////////////
408 
409 
411 {
414  if ( !info ) {
416  }
417  return *info;
418 }
419 
420 
421 inline
422 size_t CSeqTable_sparse_index::x_GetDeltaSum(size_t index) const
423 {
424  CMutexGuard guard(sx_PrepareMutex_sparse_index);
425  return x_GetDeltaCache().GetDeltaSum(GetIndexes_delta(), index);
426 }
427 
428 
429 inline
431 {
432  CMutexGuard guard(sx_PrepareMutex_sparse_index);
434 }
435 
436 
438 {
439  switch ( Which() ) {
440  case e_Indexes:
441  {
442  const TIndexes& indexes = GetIndexes();
443  return indexes.empty()? 0: indexes.back()+1;
444  }
445  case e_Indexes_delta:
446  {
447  const TIndexes_delta& deltas = GetIndexes_delta();
448  return deltas.empty()? 0: x_GetDeltaSum(deltas.size()-1)+1;
449  }
450  case e_Bit_set:
451  return GetBit_set().size()*8;
452  case e_Bit_set_bvector:
453  return GetBit_set_bvector().GetSize();
454  default:
455  return 0;
456  }
457 }
458 
459 
461 {
462  switch ( Which() ) {
463  case e_Indexes:
464  {
465  const TIndexes& indexes = GetIndexes();
466  return indexes.empty()? kInvalidRow: indexes.front();
467  }
468  case e_Indexes_delta:
469  {
470  const TIndexes_delta& deltas = GetIndexes_delta();
471  return deltas.empty()? kInvalidRow: deltas.front();
472  }
473  case e_Bit_set:
474  {
475  const TBit_set& bytes = GetBit_set();
476  size_t byte_index = sx_FindFirstNonZeroByte(bytes, 0);
477  if ( byte_index == kInvalidRow ) {
478  return kInvalidRow;
479  }
480  return byte_index*8 + sx_FindFirstNonZeroBit(Uint1(bytes[byte_index]));
481  }
482  case e_Bit_set_bvector:
484  default:
485  return kInvalidRow;
486  }
487 }
488 
489 
491  size_t value_index) const
492 {
493  switch ( Which() ) {
494  case e_Indexes:
495  {
496  const TIndexes& indexes = GetIndexes();
497  return ++value_index >= indexes.size()?
498  kInvalidRow: indexes[value_index];
499  }
500  case e_Indexes_delta:
501  {
502  const TIndexes_delta& deltas = GetIndexes_delta();
503  return ++value_index >= deltas.size()?
504  kInvalidRow: row + deltas[value_index];
505  }
506  case e_Bit_set:
507  {
508  const TBit_set& bytes = GetBit_set();
509  size_t byte_index = row / 8;
510  size_t bit_index = row % 8;
511  bit_index = sx_FindNextNonZeroBit(Uint1(bytes[byte_index]), bit_index);
512  if ( bit_index != kInvalidRow ) {
513  return byte_index*8 + bit_index;
514  }
515  byte_index = sx_FindFirstNonZeroByte(bytes, byte_index + 1);
516  if ( byte_index == kInvalidRow ) {
517  return kInvalidRow;
518  }
519  return byte_index*8 + sx_FindFirstNonZeroBit(Uint1(bytes[byte_index]));
520  }
521  case e_Bit_set_bvector:
522  {
524  static_cast<bm::bvector<>::size_type>(row));
525  return row == 0? kInvalidRow: row;
526  }
527  default:
528  return kInvalidRow;
529  };
530 }
531 
532 
534 {
535  switch ( Which() ) {
536  case e_Indexes:
537  {
538  const TIndexes& indexes = GetIndexes();
539  TIndexes::const_iterator iter =
540  lower_bound(indexes.begin(), indexes.end(), row);
541  if ( iter != indexes.end() && *iter == row ) {
542  return iter - indexes.begin();
543  }
544  else {
545  return kSkipped;
546  }
547  }
548  case e_Indexes_delta:
549  return x_FindDeltaSum(row);
550  case e_Bit_set:
551  {
552  const TBit_set& bytes = GetBit_set();
553  size_t byte_index = row/8;
554  if ( byte_index >= bytes.size() ) {
555  return kSkipped;
556  }
557  Uint1 byte = bytes[byte_index];
558  size_t bit_index = row%8; // most significant bit has index 0
559  if ( !((byte<<bit_index)&0x80) ) {
560  return kSkipped;
561  }
562  size_t count = sx_CalcByteBitCount(Uint1(byte>>(8-bit_index)));
563  if ( byte_index ) {
564  count += x_GetBitSetCache(byte_index);
565  }
566  return count;
567  }
568  case e_Bit_set_bvector:
569  {
571  auto bv_row = static_cast<bm::bvector<>::size_type>(row);
572  if ( row >= bv.size() || !bv.get_bit(bv_row) ) {
573  return kSkipped;
574  }
575  return row == 0 ? 0 : bv.count_range(0, bv_row - 1);
576  }
577  default:
578  return kSkipped;
579  }
580 }
581 
582 
584 {
585  switch ( Which() ) {
586  case e_Indexes:
587  {
588  const TIndexes& indexes = GetIndexes();
589  TIndexes::const_iterator iter =
590  lower_bound(indexes.begin(), indexes.end(), row);
591  return iter != indexes.end() && *iter == row;
592  }
593  case e_Indexes_delta:
594  return x_FindDeltaSum(row) != kInvalidRow;
595  case e_Bit_set:
596  {
597  const TBit_set& bits = GetBit_set();
598  size_t i = row/8, j = row%8;
599  return i < bits.size() && ((bits[i]<<j)&0x80) != 0;
600  }
601  case e_Bit_set_bvector:
602  {
604  return row < bv.size()
605  && bv.get_bit(static_cast<bm::bvector<>::size_type>(row));
606  }
607  default:
608  return false;
609  }
610 }
611 
612 
614 {
615  if ( Which() == type ) {
616  return;
617  }
618  switch ( type ) {
619  case e_Indexes:
620  ChangeToIndexes();
621  break;
622  case e_Indexes_delta:
624  break;
625  case e_Bit_set:
626  ChangeToBit_set();
627  break;
628  case e_Bit_set_bvector:
630  break;
631  default:
632  NCBI_THROW(CSeqTableException, eIncompatibleValueType,
633  "CSeqTable_sparse_index::ChangeTo(): "
634  "requested sparse index type is invalid");
635  }
636 }
637 
638 
640 {
641  if ( IsIndexes() ) {
642  return;
643  }
644  TIndexes indexes;
645  if ( IsIndexes_delta() ) {
646  // convert delta to running sum
647  indexes.swap(SetIndexes_delta());
649  NON_CONST_ITERATE ( TIndexes, it, indexes ) {
650  row += *it;
651  *it = row;
652  }
653  }
654  else {
655  for ( const_iterator it = begin(); it; ++it ) {
656  indexes.push_back(static_cast<TIndexes::value_type>(it.GetRow()));
657  }
658  }
659  SetIndexes().swap(indexes);
660 }
661 
662 
664 {
665  if ( IsIndexes_delta() ) {
666  return;
667  }
668  TIndexes_delta indexes;
669  if ( IsIndexes() ) {
670  // convert to delta
671  indexes.swap(SetIndexes());
672  TIndexes_delta::value_type prev_row = 0;
673  NON_CONST_ITERATE ( TIndexes_delta, it, indexes ) {
675  *it = row - prev_row;
676  prev_row = row;
677  }
678  }
679  else {
680  TIndexes_delta::value_type prev_row = 0;
681  for ( const_iterator it = begin(); it; ++it ) {
682  auto row = static_cast<TIndexes_delta::value_type>(it.GetRow());
683  indexes.push_back(row-prev_row);
684  prev_row = row;
685  }
686  }
687  SetIndexes_delta().swap(indexes);
688 }
689 
690 
691 
693 {
694  if ( IsBit_set() ) {
695  return;
696  }
697  TBit_set bytes;
698  size_t size = GetSize();
699  if ( size != kInvalidRow ) {
700  bytes.reserve((GetSize()+7)/8);
701  }
702  size_t last_byte_index = 0;
703  Uint1 last_byte = 0;
704  for ( const_iterator it = begin(); it; ++it ) {
705  size_t row = it.GetRow();
706  size_t byte_index = row / 8;
707  if ( byte_index != last_byte_index ) {
708  if ( bytes.capacity() < byte_index+1 ) {
709  bytes.reserve(max(bytes.capacity(), byte_index+1)*2);
710  }
711  bytes.resize(last_byte_index);
712  bytes.push_back(last_byte);
713  last_byte_index = byte_index;
714  last_byte = 0;
715  }
716  size_t bit_index = row % 8;
717  last_byte |= 0x80 >> bit_index;
718  }
719  if ( last_byte ) {
720  bytes.reserve(last_byte_index+1);
721  bytes.resize(last_byte_index);
722  bytes.push_back(last_byte);
723  }
724  SetBit_set().swap(bytes);
725 }
726 
727 
728 
730 {
731  if ( IsBit_set_bvector() ) {
732  return;
733  }
734  typedef bm::bvector<>::size_type TBVSize;
736  new bm::bvector<>(static_cast<TBVSize>(GetSize())));
737  for ( const_iterator it = begin(); it; ++it ) {
738  size_t row = it.GetRow();
739  bv->set_bit(static_cast<TBVSize>(row));
740  }
741  bv->optimize();
743 }
744 
745 
746 END_objects_SCOPE // namespace ncbi::objects::
747 
#define USE_DELTA_CACHE
static size_t sx_CalcByteBitCount(Uint4 word)
static size_t sx_FindFirstNonZeroByte(const char *beg, const char *end)
static size_t sx_CalcBlockBitCount(const char *block, size_t size)
static size_t sx_FindNextNonZeroBit(Uint1 b, size_t prev_i)
static size_t sx_FindFirstNonZeroBit(Uint1 b)
DEFINE_STATIC_MUTEX(sx_PrepareMutex_sparse_index)
User-defined methods of the data storage class.
Bit manipulation primitives (internal)
Serialization / compression of bvector<>. Set theoretical operations on compressed BLOBs.
AutoPtr –.
Definition: ncbimisc.hpp:401
const bm::bvector & GetBitVector(void) const
void SetBitVector(const bm::bvector<> *bv)
TValue GetDeltaSum(const TDeltas &deltas, size_t index)
static const size_t kBlockSize
CSeqTable_sparse_index_Base::TIndexes_delta TDeltas
static const size_t kInvalidRow
TValue x_GetDeltaSum2(const TDeltas &deltas, size_t block_index, size_t block_offset)
AutoArray< TValue > m_CacheBlockInfo
static const size_t kBlockTooLow
AutoArray< TValue > m_Blocks
size_t x_FindDeltaSum2(const TDeltas &deltas, size_t block_index, TValue find_sum)
size_t FindDeltaSum(const TDeltas &deltas, TValue find_sum)
Seq-loc and seq-align mapper exceptions.
TIndexes_delta & SetIndexes_delta(void)
static const size_t kSkipped
size_t GetIndexAt(size_t row) const
size_t x_FindDeltaSum(size_t sum) const
const_iterator begin(void) const
size_t x_GetFirstRowWithValue(void) const
CIndexDeltaSumCache & x_GetDeltaCache(void) const
size_t x_GetNextRowWithValue(size_t row, size_t value_index) const
TBit_set_bvector & SetBit_set_bvector(void)
size_t x_GetBitSetCache(size_t byte_count) const
bool HasValueAt(size_t row) const
static const size_t kInvalidRow
size_t x_GetDeltaSum(size_t index) const
bool get_bit(size_type n) const noexcept
returns true if bit n is set and false is bit n is 0.
Definition: bm.h:3602
size_type size() const noexcept
Returns bvector's capacity (number of bits it can store)
Definition: bm.h:1300
size_type count_range(size_type left, size_type right, const rs_index_type &rs_idx) const noexcept
Returns count of 1 bits in the given range [left..right] Uses rank-select index to accelerate the sea...
Definition: bm.h:3516
bvector_size_type size_type
Definition: bm.h:121
size_type get_next(size_type prev) const noexcept
Finds the number of the next bit ON.
Definition: bm.h:1609
size_type get_first() const noexcept
find first 1 bit in vector. Function may return 0 and this requires an extra check if bit 0 is actual...
Definition: bm.h:1600
int offset
Definition: replacements.h:160
#define NON_CONST_ITERATE(Type, Var, Cont)
Non constant version of ITERATE macro.
Definition: ncbimisc.hpp:822
element_type * release(void)
Release will release ownership of pointer to caller.
Definition: ncbimisc.hpp:472
#define NCBI_THROW(exception_class, err_code, message)
Generic macro to throw an exception, given the exception class, error code and message string.
Definition: ncbiexpt.hpp:704
TObjectType * GetNCPointerOrNull(void) const THROWS_NONE
Get pointer value.
Definition: ncbiobj.hpp:1162
void Reset(void)
Reset reference object.
Definition: ncbiobj.hpp:773
uint8_t Uint1
1-byte (8-bit) unsigned integer
Definition: ncbitype.h:99
uint32_t Uint4
4-byte (32-bit) unsigned integer
Definition: ncbitype.h:103
uint64_t Uint8
8-byte (64-bit) unsigned integer
Definition: ncbitype.h:105
#define END_NCBI_SCOPE
End previously defined NCBI scope.
Definition: ncbistl.hpp:103
#define BEGIN_NCBI_SCOPE
Define ncbi namespace.
Definition: ncbistl.hpp:100
bm::id_t word_bitcount(bm::id_t w) noexcept
Definition: bmutil.h:582
unsigned bit_count_min_unroll(const bm::word_t *block, const bm::word_t *block_end) noexcept
Bitcount for bit block without agressive unrolling.
Definition: bmfunc.h:4996
bool IsBit_set_bvector(void) const
Check if variant Bit_set_bvector is selected.
E_Choice Which(void) const
Which variant is currently selected.
const TIndexes & GetIndexes(void) const
Get the variant data.
bool IsIndexes_delta(void) const
Check if variant Indexes_delta is selected.
const TBit_set & GetBit_set(void) const
Get the variant data.
bool IsBit_set(void) const
Check if variant Bit_set is selected.
const TBit_set_bvector & GetBit_set_bvector(void) const
Get the variant data.
const TIndexes_delta & GetIndexes_delta(void) const
Get the variant data.
TSize GetSize(void) const
Get the Size member data.
bool IsIndexes(void) const
Check if variant Indexes is selected.
@ e_Bit_set
Bitset of rows with values, set bit means the row has value. Most-significant bit in each octet comes...
@ e_Bit_set_bvector
Bitset of rows with values, as serialized bvector<>, see include/util/bitset/bm.h.
@ e_Indexes_delta
Indexes of rows with values, delta-encoded.
@ e_Indexes
Indexes of rows with values.
int i
static void byte(MDB_val *v)
Definition: mdb_dump.c:81
static MDB_envinfo info
Definition: mdb_load.c:37
unsigned int word_t
Definition: bmconst.h:39
double value_type
The numeric datatype used by the parser.
Definition: muParserDef.h:228
const struct ncbi::grid::netcache::search::fields::SIZE size
T max(T x_, T y_)
T min(T x_, T y_)
#define count
#define row(bind, expected)
Definition: string_bind.c:73
Structure to aid in counting bits table contains count of bits in 0-255 diapason of numbers.
Definition: bmconst.h:309
Definition: type.c:6
#define _ASSERT
Modified on Fri Sep 20 14:58:01 2024 by modify_doxy.py rev. 669887