00001 #include <string>
00002 #include <map>
00003 #include <limits>
00004 #include <vector>
00005
00006 #include <boost/unordered_map.hpp>
00007 #include <boost/functional/hash.hpp>
00008
00009 #include "moses/FF/StatefulFeatureFunction.h"
00010 #include "moses/PP/CountsPhraseProperty.h"
00011 #include "moses/TranslationOptionList.h"
00012 #include "moses/TranslationOption.h"
00013 #include "moses/Util.h"
00014 #include "moses/TypeDef.h"
00015 #include "moses/StaticData.h"
00016 #include "moses/Phrase.h"
00017 #include "moses/AlignmentInfo.h"
00018 #include "moses/AlignmentInfoCollection.h"
00019 #include "moses/Word.h"
00020 #include "moses/FactorCollection.h"
00021
00022 #include "Normalizer.h"
00023 #include "Classifier.h"
00024 #include "VWFeatureBase.h"
00025 #include "TabbedSentence.h"
00026 #include "ThreadLocalByFeatureStorage.h"
00027 #include "TrainingLoss.h"
00028 #include "VWTargetSentence.h"
00029 #include "VWState.h"
00030 #include "VW.h"
00031
00032 namespace Moses
00033 {
00034
00035 VW::VW(const std::string &line)
00036 : StatefulFeatureFunction(1, line)
00037 , TLSTargetSentence(this)
00038 , m_train(false)
00039 , m_sentenceStartWord(Word())
00040 {
00041 ReadParameters();
00042 Discriminative::ClassifierFactory *classifierFactory = m_train
00043 ? new Discriminative::ClassifierFactory(m_modelPath)
00044 : new Discriminative::ClassifierFactory(m_modelPath, m_vwOptions);
00045
00046 m_tlsClassifier = new TLSClassifier(this, *classifierFactory);
00047
00048 m_tlsFutureScores = new TLSFloatHashMap(this);
00049 m_tlsComputedStateExtensions = new TLSStateExtensions(this);
00050 m_tlsTranslationOptionFeatures = new TLSFeatureVectorMap(this);
00051 m_tlsTargetContextFeatures = new TLSFeatureVectorMap(this);
00052
00053 if (! m_normalizer) {
00054 VERBOSE(1, "VW :: No loss function specified, assuming logistic loss.\n");
00055 m_normalizer = (Discriminative::Normalizer *) new Discriminative::LogisticLossNormalizer();
00056 }
00057
00058 if (! m_trainingLoss) {
00059 VERBOSE(1, "VW :: Using basic 1/0 loss calculation in training.\n");
00060 m_trainingLoss = (TrainingLoss *) new TrainingLossBasic();
00061 }
00062
00063
00064 const Factor *bosFactor = FactorCollection::Instance().AddFactor(BOS_);
00065 for (size_t i = 0; i < MAX_NUM_FACTORS; i++)
00066 m_sentenceStartWord.SetFactor(i, bosFactor);
00067 }
00068
00069 VW::~VW()
00070 {
00071 delete m_tlsClassifier;
00072 delete m_normalizer;
00073
00074 }
00075
00076 FFState* VW::EvaluateWhenApplied(
00077 const Hypothesis& curHypo,
00078 const FFState* prevState,
00079 ScoreComponentCollection* accumulator) const
00080 {
00081 VERBOSE(3, "VW :: Evaluating translation options\n");
00082
00083 const VWState& prevVWState = *static_cast<const VWState *>(prevState);
00084
00085 const std::vector<VWFeatureBase*>& contextFeatures =
00086 VWFeatureBase::GetTargetContextFeatures(GetScoreProducerDescription());
00087
00088 if (contextFeatures.empty()) {
00089
00090
00091
00092 return new VWState();
00093 }
00094
00095 size_t spanStart = curHypo.GetTranslationOption().GetStartPos();
00096 size_t spanEnd = curHypo.GetTranslationOption().GetEndPos();
00097
00098
00099 size_t cacheKey = MakeCacheKey(prevState, spanStart, spanEnd);
00100
00101 boost::unordered_map<size_t, FloatHashMap> &computedStateExtensions
00102 = *m_tlsComputedStateExtensions->GetStored();
00103
00104 if (computedStateExtensions.find(cacheKey) == computedStateExtensions.end()) {
00105
00106 const TranslationOptionList *topts =
00107 curHypo.GetManager().getSntTranslationOptions()->GetTranslationOptionList(spanStart, spanEnd);
00108
00109 const InputType& input = curHypo.GetManager().GetSource();
00110
00111 Discriminative::Classifier &classifier = *m_tlsClassifier->GetStored();
00112
00113
00114 size_t contextHash = prevVWState.hash();
00115
00116 FeatureVectorMap &contextFeaturesCache = *m_tlsTargetContextFeatures->GetStored();
00117
00118 FeatureVectorMap::const_iterator contextIt = contextFeaturesCache.find(contextHash);
00119 if (contextIt == contextFeaturesCache.end()) {
00120
00121
00122 const Phrase &targetContext = prevVWState.GetPhrase();
00123 Discriminative::FeatureVector contextVector;
00124 const AlignmentInfo *alignInfo = TransformAlignmentInfo(curHypo, targetContext.GetSize());
00125 for(size_t i = 0; i < contextFeatures.size(); ++i)
00126 (*contextFeatures[i])(input, targetContext, *alignInfo, classifier, contextVector);
00127
00128 contextFeaturesCache[contextHash] = contextVector;
00129 VERBOSE(3, "VW :: context cache miss\n");
00130 } else {
00131
00132 classifier.AddLabelIndependentFeatureVector(contextIt->second);
00133 VERBOSE(3, "VW :: context cache hit\n");
00134 }
00135
00136 std::vector<float> losses(topts->size());
00137
00138 for (size_t toptIdx = 0; toptIdx < topts->size(); toptIdx++) {
00139 const TranslationOption *topt = topts->Get(toptIdx);
00140 const TargetPhrase &targetPhrase = topt->GetTargetPhrase();
00141 size_t toptHash = hash_value(*topt);
00142
00143
00144 losses[toptIdx] = m_tlsFutureScores->GetStored()->find(toptHash)->second;
00145
00146
00147
00148 const Discriminative::FeatureVector &targetFeatureVector =
00149 m_tlsTranslationOptionFeatures->GetStored()->find(toptHash)->second;
00150
00151 classifier.AddLabelDependentFeatureVector(targetFeatureVector);
00152
00153
00154 losses[toptIdx] += classifier.Predict(MakeTargetLabel(targetPhrase));
00155 }
00156
00157
00158 (*m_normalizer)(losses);
00159
00160
00161 FloatHashMap &toptScores = computedStateExtensions[cacheKey];
00162 for (size_t toptIdx = 0; toptIdx < topts->size(); toptIdx++) {
00163 const TranslationOption *topt = topts->Get(toptIdx);
00164 size_t toptHash = hash_value(*topt);
00165 toptScores[toptHash] = FloorScore(TransformScore(losses[toptIdx]));
00166 }
00167
00168 VERBOSE(3, "VW :: cache miss\n");
00169 } else {
00170 VERBOSE(3, "VW :: cache hit\n");
00171 }
00172
00173
00174 std::vector<float> newScores(m_numScoreComponents);
00175 size_t toptHash = hash_value(curHypo.GetTranslationOption());
00176 newScores[0] = computedStateExtensions[cacheKey][toptHash];
00177 VERBOSE(3, "VW :: adding score: " << newScores[0] << "\n");
00178 accumulator->PlusEquals(this, newScores);
00179
00180 return new VWState(prevVWState, curHypo);
00181 }
00182
00183 const FFState* VW::EmptyHypothesisState(const InputType &input) const
00184 {
00185 size_t maxContextSize = VWFeatureBase::GetMaximumContextSize(GetScoreProducerDescription());
00186 Phrase initialPhrase;
00187 for (size_t i = 0; i < maxContextSize; i++)
00188 initialPhrase.AddWord(m_sentenceStartWord);
00189
00190 return new VWState(initialPhrase);
00191 }
00192
00193 void VW::EvaluateTranslationOptionListWithSourceContext(const InputType &input
00194 , const TranslationOptionList &translationOptionList) const
00195 {
00196 Discriminative::Classifier &classifier = *m_tlsClassifier->GetStored();
00197
00198 if (translationOptionList.size() == 0)
00199 return;
00200
00201 VERBOSE(3, "VW :: Evaluating translation options\n");
00202
00203
00204 const std::vector<VWFeatureBase*>& sourceFeatures =
00205 VWFeatureBase::GetSourceFeatures(GetScoreProducerDescription());
00206
00207 const std::vector<VWFeatureBase*>& contextFeatures =
00208 VWFeatureBase::GetTargetContextFeatures(GetScoreProducerDescription());
00209
00210 const std::vector<VWFeatureBase*>& targetFeatures =
00211 VWFeatureBase::GetTargetFeatures(GetScoreProducerDescription());
00212
00213 size_t maxContextSize = VWFeatureBase::GetMaximumContextSize(GetScoreProducerDescription());
00214
00215
00216 bool haveTargetContextFeatures = ! contextFeatures.empty();
00217
00218 const Range &sourceRange = translationOptionList.Get(0)->GetSourceWordsRange();
00219
00220 if (m_train) {
00221
00222
00223
00224
00225
00226 std::vector<bool> correct(translationOptionList.size());
00227 std::vector<int> startsAt(translationOptionList.size());
00228 std::set<int> uncoveredStartingPositions;
00229
00230 for (size_t i = 0; i < translationOptionList.size(); i++) {
00231 std::pair<bool, int> isCorrect = IsCorrectTranslationOption(* translationOptionList.Get(i));
00232 correct[i] = isCorrect.first;
00233 startsAt[i] = isCorrect.second;
00234 if (isCorrect.first) {
00235 uncoveredStartingPositions.insert(isCorrect.second);
00236 }
00237 }
00238
00239
00240 std::vector<bool> keep = (m_leaveOneOut.size() > 0)
00241 ? LeaveOneOut(translationOptionList, correct)
00242 : std::vector<bool>(translationOptionList.size(), true);
00243
00244 while (! uncoveredStartingPositions.empty()) {
00245 int currentStart = *uncoveredStartingPositions.begin();
00246 uncoveredStartingPositions.erase(uncoveredStartingPositions.begin());
00247
00248
00249 int firstCorrect = -1;
00250 for (size_t i = 0; i < translationOptionList.size(); i++) {
00251 if (keep[i] && correct[i] && startsAt[i] == currentStart) {
00252 firstCorrect = i;
00253 break;
00254 }
00255 }
00256
00257
00258 if (firstCorrect == -1) {
00259 VERBOSE(3, "VW :: skipping topt collection, no correct translation for span at current tgt start position\n");
00260 continue;
00261 }
00262
00263
00264 const TargetPhrase &correctPhrase = translationOptionList.Get(firstCorrect)->GetTargetPhrase();
00265
00266
00267
00268 Discriminative::FeatureVector dummyVector;
00269
00270
00271 for(size_t i = 0; i < sourceFeatures.size(); ++i)
00272 (*sourceFeatures[i])(input, sourceRange, classifier, dummyVector);
00273
00274
00275 Phrase targetContext;
00276 for (size_t i = 0; i < maxContextSize; i++)
00277 targetContext.AddWord(m_sentenceStartWord);
00278
00279 const Phrase *targetSent = GetStored()->m_sentence;
00280
00281
00282 AlignmentInfo contextAlignment = TransformAlignmentInfo(*GetStored()->m_alignment, maxContextSize, currentStart);
00283
00284 if (currentStart > 0)
00285 targetContext.Append(targetSent->GetSubString(Range(0, currentStart - 1)));
00286
00287
00288 for(size_t i = 0; i < contextFeatures.size(); ++i)
00289 (*contextFeatures[i])(input, targetContext, contextAlignment, classifier, dummyVector);
00290
00291
00292 for (size_t toptIdx = 0; toptIdx < translationOptionList.size(); toptIdx++) {
00293
00294
00295 if (! keep[toptIdx])
00296 continue;
00297
00298
00299 const TargetPhrase &targetPhrase = translationOptionList.Get(toptIdx)->GetTargetPhrase();
00300 for(size_t i = 0; i < targetFeatures.size(); ++i)
00301 (*targetFeatures[i])(input, targetPhrase, classifier, dummyVector);
00302
00303 bool isCorrect = correct[toptIdx] && startsAt[toptIdx] == currentStart;
00304 float loss = (*m_trainingLoss)(targetPhrase, correctPhrase, isCorrect);
00305
00306
00307 classifier.Train(MakeTargetLabel(targetPhrase), loss);
00308 }
00309 }
00310 } else {
00311
00312
00313
00314
00315 std::vector<float> losses(translationOptionList.size());
00316
00317 Discriminative::FeatureVector outFeaturesSourceNamespace;
00318
00319
00320 for(size_t i = 0; i < sourceFeatures.size(); ++i)
00321 (*sourceFeatures[i])(input, sourceRange, classifier, outFeaturesSourceNamespace);
00322
00323 for (size_t toptIdx = 0; toptIdx < translationOptionList.size(); toptIdx++) {
00324 const TranslationOption *topt = translationOptionList.Get(toptIdx);
00325 const TargetPhrase &targetPhrase = topt->GetTargetPhrase();
00326 Discriminative::FeatureVector outFeaturesTargetNamespace;
00327
00328
00329 for(size_t i = 0; i < targetFeatures.size(); ++i)
00330 (*targetFeatures[i])(input, targetPhrase, classifier, outFeaturesTargetNamespace);
00331
00332
00333
00334 size_t toptHash = hash_value(*topt);
00335 m_tlsTranslationOptionFeatures->GetStored()->insert(
00336 std::make_pair(toptHash, outFeaturesTargetNamespace));
00337
00338
00339 losses[toptIdx] = classifier.Predict(MakeTargetLabel(targetPhrase));
00340 }
00341
00342
00343 std::vector<float> rawLosses = losses;
00344 (*m_normalizer)(losses);
00345
00346
00347 for (size_t toptIdx = 0; toptIdx < translationOptionList.size(); toptIdx++) {
00348 TranslationOption *topt = *(translationOptionList.begin() + toptIdx);
00349 if (! haveTargetContextFeatures) {
00350
00351 std::vector<float> newScores(m_numScoreComponents);
00352 newScores[0] = FloorScore(TransformScore(losses[toptIdx]));
00353
00354 ScoreComponentCollection &scoreBreakDown = topt->GetScoreBreakdown();
00355 scoreBreakDown.PlusEquals(this, newScores);
00356
00357 topt->UpdateScore();
00358 } else {
00359
00360
00361 size_t toptHash = hash_value(*topt);
00362
00363
00364
00365 Discriminative::FeatureVector emptySource;
00366 const Discriminative::FeatureVector &targetFeatureVector =
00367 m_tlsTranslationOptionFeatures->GetStored()->find(toptHash)->second;
00368 classifier.AddLabelIndependentFeatureVector(emptySource);
00369 classifier.AddLabelDependentFeatureVector(targetFeatureVector);
00370 float targetOnlyLoss = classifier.Predict(VW_DUMMY_LABEL);
00371
00372 float futureScore = rawLosses[toptIdx] - targetOnlyLoss;
00373 m_tlsFutureScores->GetStored()->insert(std::make_pair(toptHash, futureScore));
00374 }
00375 }
00376 }
00377 }
00378
00379 void VW::SetParameter(const std::string& key, const std::string& value)
00380 {
00381 if (key == "train") {
00382 m_train = Scan<bool>(value);
00383 } else if (key == "path") {
00384 m_modelPath = value;
00385 } else if (key == "vw-options") {
00386 m_vwOptions = value;
00387 } else if (key == "leave-one-out-from") {
00388 m_leaveOneOut = value;
00389 } else if (key == "training-loss") {
00390
00391 if (value == "basic") {
00392 m_trainingLoss = (TrainingLoss *) new TrainingLossBasic();
00393 } else if (value == "bleu") {
00394 m_trainingLoss = (TrainingLoss *) new TrainingLossBLEU();
00395 } else {
00396 UTIL_THROW2("Unknown training loss type:" << value);
00397 }
00398 } else if (key == "loss") {
00399
00400
00401 if (value == "logistic") {
00402 m_normalizer = (Discriminative::Normalizer *) new Discriminative::LogisticLossNormalizer();
00403 } else if (value == "squared") {
00404 m_normalizer = (Discriminative::Normalizer *) new Discriminative::SquaredLossNormalizer();
00405 } else {
00406 UTIL_THROW2("Unknown loss type:" << value);
00407 }
00408 } else {
00409 StatefulFeatureFunction::SetParameter(key, value);
00410 }
00411 }
00412
00413 void VW::InitializeForInput(ttasksptr const& ttask)
00414 {
00415
00416 m_tlsFutureScores->GetStored()->clear();
00417
00418
00419 m_tlsComputedStateExtensions->GetStored()->clear();
00420
00421
00422
00423
00424
00425 m_tlsTargetContextFeatures->GetStored()->clear();
00426 m_tlsTranslationOptionFeatures->GetStored()->clear();
00427
00428 InputType const& source = *(ttask->GetSource().get());
00429
00430 if (! m_train)
00431 return;
00432
00433 UTIL_THROW_IF2(source.GetType() != TabbedSentenceInput,
00434 "This feature function requires the TabbedSentence input type");
00435
00436 const TabbedSentence& tabbedSentence = static_cast<const TabbedSentence&>(source);
00437 UTIL_THROW_IF2(tabbedSentence.GetColumns().size() < 2,
00438 "TabbedSentence must contain target<tab>alignment");
00439
00440
00441 Phrase *target = new Phrase();
00442 target->CreateFromString(
00443 Output
00444 , StaticData::Instance().options()->output.factor_order
00445 , tabbedSentence.GetColumns()[0]
00446 , NULL);
00447
00448
00449
00450
00451 AlignmentInfo *alignment = new AlignmentInfo(tabbedSentence.GetColumns()[1]);
00452
00453 VWTargetSentence &targetSent = *GetStored();
00454 targetSent.Clear();
00455 targetSent.m_sentence = target;
00456 targetSent.m_alignment = alignment;
00457
00458
00459 targetSent.SetConstraints(source.GetSize());
00460 }
00461
00462
00463
00464
00465
00466 const AlignmentInfo *VW::TransformAlignmentInfo(const Hypothesis &curHypo, size_t contextSize) const
00467 {
00468 std::set<std::pair<size_t, size_t> > alignmentPoints;
00469 const Hypothesis *contextHypo = curHypo.GetPrevHypo();
00470 int idxInContext = contextSize - 1;
00471 int processedWordsInHypo = 0;
00472 while (idxInContext >= 0 && contextHypo) {
00473 int idxInHypo = contextHypo->GetCurrTargetLength() - 1 - processedWordsInHypo;
00474 if (idxInHypo >= 0) {
00475 const AlignmentInfo &hypoAlign = contextHypo->GetCurrTargetPhrase().GetAlignTerm();
00476 std::set<size_t> alignedToTgt = hypoAlign.GetAlignmentsForTarget(idxInHypo);
00477 size_t srcOffset = contextHypo->GetCurrSourceWordsRange().GetStartPos();
00478 BOOST_FOREACH(size_t srcIdx, alignedToTgt) {
00479 alignmentPoints.insert(std::make_pair(srcOffset + srcIdx, idxInContext));
00480 }
00481 processedWordsInHypo++;
00482 idxInContext--;
00483 } else {
00484 processedWordsInHypo = 0;
00485 contextHypo = contextHypo->GetPrevHypo();
00486 }
00487 }
00488
00489 return AlignmentInfoCollection::Instance().Add(alignmentPoints);
00490 }
00491
00492 AlignmentInfo VW::TransformAlignmentInfo(const AlignmentInfo &alignInfo, size_t contextSize, int currentStart) const
00493 {
00494 std::set<std::pair<size_t, size_t> > alignmentPoints;
00495 for (int i = std::max(0, currentStart - (int)contextSize); i < currentStart; i++) {
00496 std::set<size_t> alignedToTgt = alignInfo.GetAlignmentsForTarget(i);
00497 BOOST_FOREACH(size_t srcIdx, alignedToTgt) {
00498 alignmentPoints.insert(std::make_pair(srcIdx, i + contextSize));
00499 }
00500 }
00501 return AlignmentInfo(alignmentPoints);
00502 }
00503
00504 std::pair<bool, int> VW::IsCorrectTranslationOption(const TranslationOption &topt) const
00505 {
00506
00507
00508
00509 int sourceStart = topt.GetSourceWordsRange().GetStartPos();
00510 int sourceEnd = topt.GetSourceWordsRange().GetEndPos();
00511
00512 const VWTargetSentence &targetSentence = *GetStored();
00513
00514
00515 int targetStart = targetSentence.m_sentence->GetSize();
00516 int targetEnd = -1;
00517
00518
00519 for(int i = sourceStart; i <= sourceEnd; ++i) {
00520 if(targetSentence.m_sourceConstraints[i].IsSet()) {
00521 if(targetStart > targetSentence.m_sourceConstraints[i].GetMin())
00522 targetStart = targetSentence.m_sourceConstraints[i].GetMin();
00523 if(targetEnd < targetSentence.m_sourceConstraints[i].GetMax())
00524 targetEnd = targetSentence.m_sourceConstraints[i].GetMax();
00525 }
00526 }
00527
00528 if(targetEnd == -1)
00529 return std::make_pair(false, -1);
00530
00531
00532
00533
00534 int targetStart2 = targetStart;
00535 for(int i = targetStart2; i >= 0 && !targetSentence.m_targetConstraints[i].IsSet(); --i)
00536 targetStart2 = i;
00537
00538 int targetEnd2 = targetEnd;
00539 for(int i = targetEnd2;
00540 i < targetSentence.m_sentence->GetSize() && !targetSentence.m_targetConstraints[i].IsSet();
00541 ++i)
00542 targetEnd2 = i;
00543
00544
00545
00546 const TargetPhrase &tphrase = topt.GetTargetPhrase();
00547
00548
00549
00550 if(tphrase.GetSize() < targetEnd - targetStart + 1)
00551 return std::make_pair(false, -1);
00552
00553
00554 if(tphrase.GetSize() > targetEnd2 - targetStart2 + 1)
00555 return std::make_pair(false, -1);
00556
00557
00558 for(int tempStart = targetStart2; tempStart <= targetStart; tempStart++) {
00559 bool found = true;
00560
00561 for(int i = tempStart; i <= targetEnd2 && i < tphrase.GetSize() + tempStart; ++i) {
00562 if(tphrase.GetWord(i - tempStart) != targetSentence.m_sentence->GetWord(i)) {
00563 found = false;
00564 break;
00565 }
00566 }
00567
00568 if(found) {
00569
00570 return std::make_pair(true, tempStart);
00571 }
00572 }
00573
00574 return std::make_pair(false, -1);
00575 }
00576
00577 std::vector<bool> VW::LeaveOneOut(const TranslationOptionList &topts, const std::vector<bool> &correct) const
00578 {
00579 UTIL_THROW_IF2(m_leaveOneOut.size() == 0 || ! m_train, "LeaveOneOut called in wrong setting!");
00580
00581 float sourceRawCount = 0.0;
00582 const float ONE = 1.0001;
00583
00584 std::vector<bool> keepOpt;
00585
00586 for (size_t i = 0; i < topts.size(); i++) {
00587 TranslationOption *topt = *(topts.begin() + i);
00588 const TargetPhrase &targetPhrase = topt->GetTargetPhrase();
00589
00590
00591 const CountsPhraseProperty *property =
00592 static_cast<const CountsPhraseProperty *>(targetPhrase.GetProperty("Counts"));
00593
00594 if (! property) {
00595 VERBOSE(2, "VW :: Counts not found for topt! Is this an OOV?\n");
00596
00597 keepOpt.assign(topts.size(), true);
00598 return keepOpt;
00599 }
00600
00601 if (sourceRawCount == 0.0) {
00602 sourceRawCount = property->GetSourceMarginal() - ONE;
00603 if (sourceRawCount <= 0) {
00604
00605 keepOpt.assign(topts.size(), false);
00606 return keepOpt;
00607 }
00608 }
00609
00610 float discount = correct[i] ? ONE : 0.0;
00611 float target = property->GetTargetMarginal() - discount;
00612 float joint = property->GetJointCount() - discount;
00613 if (discount != 0.0) VERBOSE(3, "VW :: leaving one out!\n");
00614
00615 if (joint > 0) {
00616
00617 const FeatureFunction *feature = &FindFeatureFunction(m_leaveOneOut);
00618 std::vector<float> scores = targetPhrase.GetScoreBreakdown().GetScoresForProducer(feature);
00619 UTIL_THROW_IF2(scores.size() != 4, "Unexpected number of scores in feature " << m_leaveOneOut);
00620 scores[0] = TransformScore(joint / target);
00621 scores[2] = TransformScore(joint / sourceRawCount);
00622
00623 ScoreComponentCollection &scoreBreakDown = topt->GetScoreBreakdown();
00624 scoreBreakDown.Assign(feature, scores);
00625 topt->UpdateScore();
00626 keepOpt.push_back(true);
00627 } else {
00628
00629 VERBOSE(2, "VW :: discarded topt when leaving one out\n");
00630 keepOpt.push_back(false);
00631 }
00632 }
00633
00634 return keepOpt;
00635 }
00636
00637 }