Reviewed-on: #2
@ -221,8 +221,6 @@ def perplexity(model, data, batch_size=32):
# Progress update
processed = min(i + batch_size, total_sequences)
print(f"\rppl {processed}/{total_sequences} ({processed/total_sequences*100:.1f}%)", end="", flush=True)
print() # Final newline
return np.exp(total_loss_sum / total_tokens_count)
#################################### Model #########################################mo
@ -223,8 +223,6 @@ def perplexity(model, data, batch_size=32):
@ -327,8 +327,6 @@ def perplexity(model, data, batch_size=32):
@ -326,8 +326,6 @@ def perplexity(model, data, batch_size=32):
@ -325,8 +325,6 @@ def perplexity(model, data, batch_size=32):
@ -343,8 +343,6 @@ def perplexity(model, data, batch_size=32):
#################################### Model #########################################
@ -341,8 +341,6 @@ def perplexity(model, data, batch_size=32):