Commit 8694c7b0 authored by rprenger's avatar rprenger
Browse files

Found a bug. If you don't make this change and you ask for 1 token you get 2 etc.

parent 055a673e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -241,7 +241,7 @@ def sample_sequence_batch(model, context_tokens, context_lengths,
        
        lengths = torch.ones([batch_size]).long().cuda() * maxlen

        while context_length <= (maxlen):
        while context_length < maxlen:
            types2use = None
            if counter == 0:
                tokens2use = tokens[:, :context_length]