From 98bbd73b6929cabef234d39cc58ab30e144a75fc Mon Sep 17 00:00:00 2001
From: Henri Vasserman <henv@hot.ee>
Date: Mon, 3 Jul 2023 03:37:49 +0300
Subject: [PATCH] fix server crashes

---
 examples/server/server.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index e4ddbe9865506..3bf98595763e2 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -906,7 +906,7 @@ int main(int argc, char ** argv) {
 
             while (llama.has_next_token) {
                 const completion_token_output token_with_probs = llama.doCompletion();
-                const std::string token_text = llama_token_to_str(llama.ctx, token_with_probs.tok);
+                const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
 
                 stop_pos = llama.findStoppingStrings(llama.generated_text,
                     token_text.size(), STOP_FULL);
@@ -933,7 +933,7 @@ int main(int argc, char ** argv) {
 
                 while (llama.has_next_token) {
                     const completion_token_output token_with_probs = llama.doCompletion();
-                    const std::string token_text = llama_token_to_str(llama.ctx, token_with_probs.tok);
+                    const std::string token_text = token_with_probs.tok == -1 ? "" : llama_token_to_str(llama.ctx, token_with_probs.tok);
                     if (llama.multibyte_pending > 0) {
                         continue;
                     }