Commit 42c4fdadd5e1cb7937f1be3b66c5493e9098a2c1
1 parent
f0dc837e
Code simplification by removing short type optimization
Showing
1 changed file
with
64 additions
and
208 deletions
openbr/core/boost.cpp
| ... | ... | @@ -21,7 +21,6 @@ logRatio( double val ) |
| 21 | 21 | |
| 22 | 22 | #define CV_CMP_NUM_IDX(i,j) (aux[i] < aux[j]) |
| 23 | 23 | static CV_IMPLEMENT_QSORT_EX( icvSortIntAux, int, CV_CMP_NUM_IDX, const float* ) |
| 24 | -static CV_IMPLEMENT_QSORT_EX( icvSortUShAux, unsigned short, CV_CMP_NUM_IDX, const float* ) | |
| 25 | 24 | |
| 26 | 25 | #define CV_THRESHOLD_EPS (0.00001F) |
| 27 | 26 | |
| ... | ... | @@ -186,7 +185,7 @@ struct CascadeBoostTrainData : CvDTreeTrainData |
| 186 | 185 | return ((uint64_t)work_var_count + 1) * sample_count; |
| 187 | 186 | } |
| 188 | 187 | |
| 189 | - virtual const int* getBufferValues(CvDTreeNode* n, int* labelsBuf, uint64_t vi); | |
| 188 | + virtual const int* getBufferValues(CvDTreeNode* n, uint64_t vi); | |
| 190 | 189 | virtual const int* get_class_labels(CvDTreeNode* n, int* labelsBuf); |
| 191 | 190 | virtual const int* get_cv_labels(CvDTreeNode* n, int* labelsBuf); |
| 192 | 191 | virtual const int* get_sample_indices(CvDTreeNode* n, int* indicesBuf); |
| ... | ... | @@ -298,62 +297,29 @@ CvDTreeNode* CascadeBoostTrainData::subsample_data( const CvMat* _subsample_idx |
| 298 | 297 | int num_valid = data_root->get_num_valid(vi); |
| 299 | 298 | CV_Assert( num_valid == sample_count ); |
| 300 | 299 | |
| 301 | - if (is_buf_16u) | |
| 300 | + int* idst_idx = buf->data.i + root->buf_idx*getLength() + (uint64_t)vi*sample_count + root->offset; | |
| 301 | + for( int i = 0; i < num_valid; i++ ) | |
| 302 | 302 | { |
| 303 | - unsigned short* udst_idx = (unsigned short*)(buf->data.s + root->buf_idx*getLength() + (uint64_t)vi*sample_count + data_root->offset); | |
| 304 | - for( int i = 0; i < num_valid; i++ ) | |
| 305 | - { | |
| 306 | - idx = src_idx[i]; | |
| 307 | - count_i = co[idx*2]; | |
| 308 | - if( count_i ) | |
| 309 | - for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ ) | |
| 310 | - udst_idx[j] = (unsigned short)cur_ofs; | |
| 311 | - } | |
| 312 | - } | |
| 313 | - else | |
| 314 | - { | |
| 315 | - int* idst_idx = buf->data.i + root->buf_idx*getLength() + (uint64_t)vi*sample_count + root->offset; | |
| 316 | - for( int i = 0; i < num_valid; i++ ) | |
| 317 | - { | |
| 318 | - idx = src_idx[i]; | |
| 319 | - count_i = co[idx*2]; | |
| 320 | - if( count_i ) | |
| 321 | - for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ ) | |
| 322 | - idst_idx[j] = cur_ofs; | |
| 323 | - } | |
| 303 | + idx = src_idx[i]; | |
| 304 | + count_i = co[idx*2]; | |
| 305 | + if( count_i ) | |
| 306 | + for( cur_ofs = co[idx*2+1]; count_i > 0; count_i--, j++, cur_ofs++ ) | |
| 307 | + idst_idx[j] = cur_ofs; | |
| 324 | 308 | } |
| 325 | 309 | } |
| 326 | 310 | |
| 327 | 311 | // subsample cv_lables |
| 328 | 312 | const int* src_lbls = get_cv_labels(data_root, (int*)(uchar*)inn_buf); |
| 329 | - if (is_buf_16u) | |
| 330 | - { | |
| 331 | - unsigned short* udst = (unsigned short*)(buf->data.s + root->buf_idx*getLength() + (workVarCount-1)*sample_count + root->offset); | |
| 332 | - for( int i = 0; i < count; i++ ) | |
| 333 | - udst[i] = (unsigned short)src_lbls[sidx[i]]; | |
| 334 | - } | |
| 335 | - else | |
| 336 | - { | |
| 337 | - int* idst = buf->data.i + root->buf_idx*getLength() + | |
| 338 | - (workVarCount-1)*sample_count + root->offset; | |
| 339 | - for( int i = 0; i < count; i++ ) | |
| 340 | - idst[i] = src_lbls[sidx[i]]; | |
| 341 | - } | |
| 313 | + int* idst = buf->data.i + root->buf_idx*getLength() + | |
| 314 | + (workVarCount-1)*sample_count + root->offset; | |
| 315 | + for( int i = 0; i < count; i++ ) | |
| 316 | + idst[i] = src_lbls[sidx[i]]; | |
| 342 | 317 | |
| 343 | 318 | // subsample sample_indices |
| 344 | 319 | const int* sample_idx_src = get_sample_indices(data_root, (int*)(uchar*)inn_buf); |
| 345 | - if (is_buf_16u) | |
| 346 | - { | |
| 347 | - unsigned short* sample_idx_dst = (unsigned short*)(buf->data.s + root->buf_idx*getLength() + workVarCount*sample_count + root->offset); | |
| 348 | - for( int i = 0; i < count; i++ ) | |
| 349 | - sample_idx_dst[i] = (unsigned short)sample_idx_src[sidx[i]]; | |
| 350 | - } | |
| 351 | - else | |
| 352 | - { | |
| 353 | - int* sample_idx_dst = buf->data.i + root->buf_idx*getLength() + workVarCount*sample_count + root->offset; | |
| 354 | - for( int i = 0; i < count; i++ ) | |
| 355 | - sample_idx_dst[i] = sample_idx_src[sidx[i]]; | |
| 356 | - } | |
| 320 | + int* sample_idx_dst = buf->data.i + root->buf_idx*getLength() + workVarCount*sample_count + root->offset; | |
| 321 | + for( int i = 0; i < count; i++ ) | |
| 322 | + sample_idx_dst[i] = sample_idx_src[sidx[i]]; | |
| 357 | 323 | |
| 358 | 324 | for( int vi = 0; vi < var_count; vi++ ) |
| 359 | 325 | root->set_num_valid(vi, count); |
| ... | ... | @@ -459,12 +425,10 @@ void CascadeBoostTrainData::setData( const FeatureEvaluator* _featureEvaluator, |
| 459 | 425 | sample_count = _numSamples; |
| 460 | 426 | |
| 461 | 427 | is_buf_16u = false; |
| 462 | - if (sample_count < 65536) | |
| 463 | - is_buf_16u = true; | |
| 464 | 428 | |
| 465 | 429 | // 1048576 is the number of bytes in a megabyte |
| 466 | 430 | numPrecalcVal = min( cvRound((double)_precalcValBufSize*1048576. / (sizeof(float)*sample_count)), var_count ); |
| 467 | - numPrecalcIdx = min( cvRound((double)_precalcIdxBufSize*1048576. / ((is_buf_16u ? sizeof(unsigned short) : sizeof (int))*sample_count)), var_count ); | |
| 431 | + numPrecalcIdx = min( cvRound((double)_precalcIdxBufSize*1048576. / (sizeof (int)*sample_count)), var_count ); | |
| 468 | 432 | |
| 469 | 433 | assert( numPrecalcIdx >= 0 && numPrecalcVal >= 0 ); |
| 470 | 434 | |
| ... | ... | @@ -510,13 +474,9 @@ void CascadeBoostTrainData::setData( const FeatureEvaluator* _featureEvaluator, |
| 510 | 474 | effective_buf_width *= buf_count; |
| 511 | 475 | |
| 512 | 476 | if (effective_buf_width * effective_buf_height != effective_buf_size) |
| 513 | - { | |
| 514 | 477 | CV_Error(CV_StsBadArg, "The memory buffer cannot be allocated since its size exceeds integer fields limit"); |
| 515 | - } | |
| 516 | - if ( is_buf_16u ) | |
| 517 | - buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_16UC(channels) ); | |
| 518 | - else | |
| 519 | - buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_32SC(channels) ); | |
| 478 | + | |
| 479 | + buf = cvCreateMat( effective_buf_height, effective_buf_width, CV_32SC(channels) ); | |
| 520 | 480 | |
| 521 | 481 | cat_count = cvCreateMat( 1, cat_var_count + 1, CV_32SC(channels) ); |
| 522 | 482 | |
| ... | ... | @@ -542,10 +502,7 @@ void CascadeBoostTrainData::setData( const FeatureEvaluator* _featureEvaluator, |
| 542 | 502 | data_root = new_node( 0, sample_count, 0, 0 ); |
| 543 | 503 | |
| 544 | 504 | // set sample labels |
| 545 | - if (is_buf_16u) | |
| 546 | - udst = (unsigned short*)(buf->data.s + (uint64_t)work_var_count*sample_count); | |
| 547 | - else | |
| 548 | - idst = buf->data.i + (uint64_t)work_var_count*sample_count; | |
| 505 | + idst = buf->data.i + (uint64_t)work_var_count*sample_count; | |
| 549 | 506 | |
| 550 | 507 | for (int si = 0; si < sample_count; si++) |
| 551 | 508 | { |
| ... | ... | @@ -594,29 +551,21 @@ const int* CascadeBoostTrainData::get_class_labels( CvDTreeNode* n, int* labelsB |
| 594 | 551 | return labelsBuf; |
| 595 | 552 | } |
| 596 | 553 | |
| 597 | -const int* CascadeBoostTrainData::getBufferValues(CvDTreeNode* n, int* indicesBuf, uint64_t vi) | |
| 554 | +const int* CascadeBoostTrainData::getBufferValues(CvDTreeNode* n, uint64_t vi) | |
| 598 | 555 | { |
| 599 | - const int* cat_values = 0; | |
| 600 | - if (!is_buf_16u) | |
| 601 | - cat_values = buf->data.i + n->buf_idx*getLength() + vi*sample_count + n->offset; | |
| 602 | - else { | |
| 603 | - const unsigned short* short_values = (const unsigned short*)(buf->data.s + n->buf_idx*getLength() + vi*sample_count + n->offset); | |
| 604 | - for (int i = 0; i < n->sample_count; i++) | |
| 605 | - indicesBuf[i] = short_values[i]; | |
| 606 | - cat_values = indicesBuf; | |
| 607 | - } | |
| 608 | - | |
| 609 | - return cat_values; | |
| 556 | + return buf->data.i + n->buf_idx*getLength() + vi*sample_count + n->offset; | |
| 610 | 557 | } |
| 611 | 558 | |
| 612 | -const int* CascadeBoostTrainData::get_sample_indices( CvDTreeNode* n, int* indicesBuf ) | |
| 559 | +const int* CascadeBoostTrainData::get_sample_indices(CvDTreeNode* n, int* indicesBuf) | |
| 613 | 560 | { |
| 614 | - return getBufferValues(n,indicesBuf,get_work_var_count()); | |
| 561 | + Q_UNUSED(indicesBuf) | |
| 562 | + return getBufferValues(n,get_work_var_count()); | |
| 615 | 563 | } |
| 616 | 564 | |
| 617 | -const int* CascadeBoostTrainData::get_cv_labels( CvDTreeNode* n, int* indicesBuf ) | |
| 565 | +const int* CascadeBoostTrainData::get_cv_labels(CvDTreeNode* n, int* indicesBuf) | |
| 618 | 566 | { |
| 619 | - return getBufferValues(n,indicesBuf,get_work_var_count()-1); | |
| 567 | + Q_UNUSED(indicesBuf) | |
| 568 | + return getBufferValues(n,get_work_var_count()-1); | |
| 620 | 569 | } |
| 621 | 570 | |
| 622 | 571 | void CascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* ordValuesBuf, int* sortedIndicesBuf, const float** ordValues, const int** sortedIndices, int* sampleIndicesBuf ) |
| ... | ... | @@ -629,14 +578,7 @@ void CascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* ord |
| 629 | 578 | // For this feature (this code refers to features as values, hence vi == value index), |
| 630 | 579 | // have we precalculated (presorted) the training samples by their feature response? |
| 631 | 580 | if (vi < numPrecalcIdx) { |
| 632 | - if (!is_buf_16u) | |
| 633 | - *sortedIndices = buf->data.i + n->buf_idx*getLength() + (uint64_t)vi*sample_count + n->offset; | |
| 634 | - else { | |
| 635 | - const unsigned short* shortIndices = (const unsigned short*)(buf->data.s + n->buf_idx*getLength() + (uint64_t)vi*sample_count + n->offset ); | |
| 636 | - for (int i = 0; i < nodeSampleCount; i++) | |
| 637 | - sortedIndicesBuf[i] = shortIndices[i]; | |
| 638 | - *sortedIndices = sortedIndicesBuf; | |
| 639 | - } | |
| 581 | + *sortedIndices = getBufferValues(n, vi); | |
| 640 | 582 | |
| 641 | 583 | // For this feature, have we precalculated all of the feature responses? |
| 642 | 584 | if (vi < numPrecalcVal) { |
| ... | ... | @@ -731,26 +673,20 @@ struct IndexPrecalc : Precalc |
| 731 | 673 | { |
| 732 | 674 | int* idst; |
| 733 | 675 | unsigned short* udst; |
| 734 | - bool isBufShort; | |
| 735 | 676 | |
| 736 | - IndexPrecalc(const FeatureEvaluator* featureEvaluator, CvMat* buf, int sampleCount, bool isBufShort) : | |
| 677 | + IndexPrecalc(const FeatureEvaluator* featureEvaluator, CvMat* buf, int sampleCount) : | |
| 737 | 678 | Precalc(featureEvaluator, sampleCount), |
| 738 | - isBufShort(isBufShort) | |
| 739 | - { | |
| 740 | - udst = (unsigned short*)buf->data.s; | |
| 741 | - idst = buf->data.i; | |
| 742 | - } | |
| 679 | + idst(buf->data.i) | |
| 680 | + {} | |
| 743 | 681 | |
| 744 | 682 | void setBuffer(uint64_t fi, uint64_t si) const |
| 745 | 683 | { |
| 746 | - if (isBufShort) *(udst + fi*sampleCount + si) = (unsigned short)si; | |
| 747 | - else *(idst + fi*sampleCount + si) = si; | |
| 684 | + *(idst + fi*sampleCount + si) = si; | |
| 748 | 685 | } |
| 749 | 686 | |
| 750 | 687 | void sortBuffer(uint64_t fi, float *valCachePtr) const |
| 751 | 688 | { |
| 752 | - if (isBufShort) icvSortUShAux(udst + fi*sampleCount, sampleCount, valCachePtr); | |
| 753 | - else icvSortIntAux(idst + fi*sampleCount, sampleCount, valCachePtr); | |
| 689 | + icvSortIntAux(idst + fi*sampleCount, sampleCount, valCachePtr); | |
| 754 | 690 | } |
| 755 | 691 | |
| 756 | 692 | virtual void operator()(const Range& range) const |
| ... | ... | @@ -771,8 +707,8 @@ struct FeatureAndIndexPrecalc : IndexPrecalc |
| 771 | 707 | { |
| 772 | 708 | Mat *valCache; |
| 773 | 709 | |
| 774 | - FeatureAndIndexPrecalc(const FeatureEvaluator* featureEvaluator, CvMat* buf, Mat* valCache, int sampleCount, bool isBufShort) : | |
| 775 | - IndexPrecalc(featureEvaluator, buf, sampleCount, isBufShort), | |
| 710 | + FeatureAndIndexPrecalc(const FeatureEvaluator* featureEvaluator, CvMat* buf, Mat* valCache, int sampleCount) : | |
| 711 | + IndexPrecalc(featureEvaluator, buf, sampleCount), | |
| 776 | 712 | valCache(valCache) |
| 777 | 713 | {} |
| 778 | 714 | |
| ... | ... | @@ -816,11 +752,11 @@ void CascadeBoostTrainData::precalculate() |
| 816 | 752 | |
| 817 | 753 | // Compute features and sort training samples for feature indices we are not going to cache |
| 818 | 754 | parallel_for_(Range(numPrecalcVal, numPrecalcIdx), |
| 819 | - IndexPrecalc(featureEvaluator, buf, sample_count, is_buf_16u!=0)); | |
| 755 | + IndexPrecalc(featureEvaluator, buf, sample_count)); | |
| 820 | 756 | |
| 821 | 757 | // Compute features and sort training samples for features indices we are going to cache |
| 822 | 758 | parallel_for_(Range(0, minPrecalc), |
| 823 | - FeatureAndIndexPrecalc(featureEvaluator, buf, &valCache, sample_count, is_buf_16u!=0)); | |
| 759 | + FeatureAndIndexPrecalc(featureEvaluator, buf, &valCache, sample_count)); | |
| 824 | 760 | |
| 825 | 761 | // Compute feature values for feature indices for which we are not going to sort training samples |
| 826 | 762 | parallel_for_(Range(minPrecalc, numPrecalcVal), |
| ... | ... | @@ -880,8 +816,7 @@ void CascadeBoostTree::split_node_data( CvDTreeNode* node ) |
| 880 | 816 | |
| 881 | 817 | complete_node_dir(node); |
| 882 | 818 | |
| 883 | - for (uint64_t i = nLeft = nRight = 0; i < nodeSampleCount; i++) | |
| 884 | - { | |
| 819 | + for (uint64_t i = nLeft = nRight = 0; i < nodeSampleCount; i++) { | |
| 885 | 820 | int d = dir[i]; |
| 886 | 821 | // initialize new indices for splitting ordered variables |
| 887 | 822 | newIdx[i] = (nLeft & (d-1)) | (nRight & -d); // d ? ri : li |
| ... | ... | @@ -900,59 +835,27 @@ void CascadeBoostTree::split_node_data( CvDTreeNode* node ) |
| 900 | 835 | if( ci >= 0 || !splitInputData ) |
| 901 | 836 | continue; |
| 902 | 837 | |
| 903 | - int n1 = node->get_num_valid(vi); | |
| 904 | - float *src_val_buf = (float*)(tempBuf + nodeSampleCount); | |
| 905 | - int *src_sorted_idx_buf = (int*)(src_val_buf + nodeSampleCount); | |
| 906 | - int *src_sample_idx_buf = src_sorted_idx_buf + nodeSampleCount; | |
| 907 | - const int* src_sorted_idx = 0; | |
| 908 | - const float* src_val = 0; | |
| 909 | - | |
| 910 | - data->get_ord_var_data(node, vi, src_val_buf, src_sorted_idx_buf, &src_val, &src_sorted_idx, src_sample_idx_buf); | |
| 911 | - | |
| 838 | + const int* src_sorted_idx = ((CascadeBoostTrainData*)data)->getBufferValues(node, vi); | |
| 912 | 839 | for(uint64_t i = 0; i < nodeSampleCount; i++) |
| 913 | 840 | tempBuf[i] = src_sorted_idx[i]; |
| 914 | 841 | |
| 915 | - if (data->is_buf_16u) { | |
| 916 | - ushort *ldst, *rdst; | |
| 917 | - ldst = (ushort*)(buf->data.s + left->buf_idx*length_buf_row + vi*sampleCount + left->offset); | |
| 918 | - rdst = (ushort*)(ldst + nLeft); | |
| 919 | - | |
| 920 | - // split sorted | |
| 921 | - for (int i = 0; i < n1; i++) { | |
| 922 | - int idx = tempBuf[i]; | |
| 923 | - int d = dir[idx]; | |
| 924 | - idx = newIdx[idx]; | |
| 925 | - if (d) { | |
| 926 | - *rdst = (ushort)idx; | |
| 927 | - rdst++; | |
| 928 | - } else { | |
| 929 | - *ldst = (ushort)idx; | |
| 930 | - ldst++; | |
| 931 | - } | |
| 932 | - } | |
| 933 | - } | |
| 934 | - else | |
| 935 | - { | |
| 936 | - int *ldst, *rdst; | |
| 937 | - ldst = buf->data.i + left->buf_idx*length_buf_row + vi*sampleCount + left->offset; | |
| 938 | - rdst = buf->data.i + right->buf_idx*length_buf_row + vi*sampleCount + right->offset; | |
| 842 | + int *ldst, *rdst; | |
| 843 | + ldst = buf->data.i + left->buf_idx*length_buf_row + vi*sampleCount + left->offset; | |
| 844 | + rdst = buf->data.i + right->buf_idx*length_buf_row + vi*sampleCount + right->offset; | |
| 939 | 845 | |
| 940 | - // split sorted | |
| 941 | - for( int i = 0; i < n1; i++ ) | |
| 942 | - { | |
| 943 | - int idx = tempBuf[i]; | |
| 944 | - int d = dir[idx]; | |
| 945 | - idx = newIdx[idx]; | |
| 946 | - if (d) | |
| 947 | - { | |
| 948 | - *rdst = idx; | |
| 949 | - rdst++; | |
| 950 | - } | |
| 951 | - else | |
| 952 | - { | |
| 953 | - *ldst = idx; | |
| 954 | - ldst++; | |
| 955 | - } | |
| 846 | + int n1 = node->get_num_valid(vi); | |
| 847 | + | |
| 848 | + // split sorted | |
| 849 | + for (int i = 0; i < n1; i++) { | |
| 850 | + int idx = tempBuf[i]; | |
| 851 | + int d = dir[idx]; | |
| 852 | + idx = newIdx[idx]; | |
| 853 | + if (d) { | |
| 854 | + *rdst = idx; | |
| 855 | + rdst++; | |
| 856 | + } else { | |
| 857 | + *ldst = idx; | |
| 858 | + ldst++; | |
| 956 | 859 | } |
| 957 | 860 | } |
| 958 | 861 | } |
| ... | ... | @@ -964,37 +867,16 @@ void CascadeBoostTree::split_node_data( CvDTreeNode* node ) |
| 964 | 867 | for(uint64_t i = 0; i < nodeSampleCount; i++) |
| 965 | 868 | tempBuf[i] = src_lbls[i]; |
| 966 | 869 | |
| 967 | - if (data->is_buf_16u) { | |
| 968 | - unsigned short *ldst = (unsigned short *)(buf->data.s + left->buf_idx*length_buf_row + (workVarCount-1)*sampleCount + left->offset); | |
| 969 | - unsigned short *rdst = (unsigned short *)(buf->data.s + right->buf_idx*length_buf_row + (workVarCount-1)*sampleCount + right->offset); | |
| 970 | - | |
| 971 | - for( uint64_t i = 0; i < nodeSampleCount; i++ ) { | |
| 972 | - int idx = tempBuf[i]; | |
| 973 | - if (dir[i]) { | |
| 974 | - *rdst = (unsigned short)idx; | |
| 975 | - rdst++; | |
| 976 | - } else { | |
| 977 | - *ldst = (unsigned short)idx; | |
| 978 | - ldst++; | |
| 979 | - } | |
| 980 | - } | |
| 981 | - | |
| 982 | - } | |
| 983 | - else | |
| 984 | 870 | { |
| 985 | 871 | int *ldst = buf->data.i + left->buf_idx*length_buf_row + (workVarCount-1)*sampleCount + left->offset; |
| 986 | 872 | int *rdst = buf->data.i + right->buf_idx*length_buf_row + (workVarCount-1)*sampleCount + right->offset; |
| 987 | 873 | |
| 988 | - for( uint64_t i = 0; i < nodeSampleCount; i++ ) | |
| 989 | - { | |
| 874 | + for (uint64_t i = 0; i < nodeSampleCount; i++) { | |
| 990 | 875 | int idx = tempBuf[i]; |
| 991 | - if (dir[i]) | |
| 992 | - { | |
| 876 | + if (dir[i]) { | |
| 993 | 877 | *rdst = idx; |
| 994 | 878 | rdst++; |
| 995 | - } | |
| 996 | - else | |
| 997 | - { | |
| 879 | + } else { | |
| 998 | 880 | *ldst = idx; |
| 999 | 881 | ldst++; |
| 1000 | 882 | } |
| ... | ... | @@ -1008,35 +890,15 @@ void CascadeBoostTree::split_node_data( CvDTreeNode* node ) |
| 1008 | 890 | for(uint64_t i = 0; i < nodeSampleCount; i++) |
| 1009 | 891 | tempBuf[i] = sampleIdx_src[i]; |
| 1010 | 892 | |
| 1011 | - if (data->is_buf_16u) { | |
| 1012 | - unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*length_buf_row + workVarCount*sampleCount + left->offset); | |
| 1013 | - unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*length_buf_row + workVarCount*sampleCount + right->offset); | |
| 1014 | - for (uint64_t i = 0; i < nodeSampleCount; i++) { | |
| 1015 | - unsigned short idx = (unsigned short)tempBuf[i]; | |
| 1016 | - if (dir[i]) { | |
| 1017 | - *rdst = idx; | |
| 1018 | - rdst++; | |
| 1019 | - } else { | |
| 1020 | - *ldst = idx; | |
| 1021 | - ldst++; | |
| 1022 | - } | |
| 1023 | - } | |
| 1024 | - } | |
| 1025 | - else | |
| 1026 | 893 | { |
| 1027 | 894 | int* ldst = buf->data.i + left->buf_idx*length_buf_row + workVarCount*sampleCount + left->offset; |
| 1028 | 895 | int* rdst = buf->data.i + right->buf_idx*length_buf_row + workVarCount*sampleCount + right->offset; |
| 1029 | - for (uint64_t i = 0; i < nodeSampleCount; i++) | |
| 1030 | - { | |
| 896 | + for (uint64_t i = 0; i < nodeSampleCount; i++) { | |
| 1031 | 897 | int idx = tempBuf[i]; |
| 1032 | - if (dir[i]) | |
| 1033 | - { | |
| 1034 | - | |
| 898 | + if (dir[i]) { | |
| 1035 | 899 | *rdst = idx; |
| 1036 | 900 | rdst++; |
| 1037 | - } | |
| 1038 | - else | |
| 1039 | - { | |
| 901 | + } else { | |
| 1040 | 902 | *ldst = idx; |
| 1041 | 903 | ldst++; |
| 1042 | 904 | } |
| ... | ... | @@ -1167,15 +1029,9 @@ void CascadeBoost::update_weights(CvBoostTree* tree) |
| 1167 | 1029 | |
| 1168 | 1030 | // set the labels to find (from within weak tree learning proc) |
| 1169 | 1031 | // the particular sample weight, and where to store the response. |
| 1170 | - if (data->is_buf_16u) { | |
| 1171 | - unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*length_buf_row + data->data_root->offset + (uint64_t)(data->work_var_count-1)*data->sample_count); | |
| 1172 | - for (int i = 0; i < n; i++) | |
| 1173 | - labels[i] = (unsigned short)i; | |
| 1174 | - } else { | |
| 1175 | - int* labels = buf->data.i + data->data_root->buf_idx*length_buf_row + data->data_root->offset + (uint64_t)(data->work_var_count-1)*data->sample_count; | |
| 1176 | - for( int i = 0; i < n; i++ ) | |
| 1177 | - labels[i] = i; | |
| 1178 | - } | |
| 1032 | + int* labels = buf->data.i + data->data_root->buf_idx*length_buf_row + data->data_root->offset + (uint64_t)(data->work_var_count-1)*data->sample_count; | |
| 1033 | + for( int i = 0; i < n; i++ ) | |
| 1034 | + labels[i] = i; | |
| 1179 | 1035 | |
| 1180 | 1036 | for (int i = 0; i < n; i++) { |
| 1181 | 1037 | // save original categorical responses {0,1}, convert them to {-1,1} | ... | ... |