From b8acb6c9b845ab25ca564da3c1cdf9016bd9a586 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 16 Oct 2023 00:20:03 +0300 Subject: [PATCH] swift : fix build ggml-ci --- examples/batched.swift/Sources/main.swift | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift index 938f30512..a1ac6562d 100644 --- a/examples/batched.swift/Sources/main.swift +++ b/examples/batched.swift/Sources/main.swift @@ -69,7 +69,7 @@ for id: llama_token in tokens { print("\n") -var batch = llama_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0) +var batch = llama_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0, 1) defer { llama_batch_free(batch) } @@ -80,7 +80,8 @@ batch.n_tokens = Int32(tokens.count) for (i, token) in tokens.enumerated() { batch.token[i] = token batch.pos[i] = Int32(i) - batch.seq_id[i] = 0 + batch.n_seq_id[i] = 1 + batch.seq_id[i][0] = 0 batch.logits[i] = 0 } @@ -169,7 +170,8 @@ while n_cur <= n_len { // push this new token for next evaluation batch.token[Int(batch.n_tokens)] = new_token_id batch.pos[Int(batch.n_tokens)] = n_cur - batch.seq_id[Int(batch.n_tokens)] = Int32(i) + batch.n_seq_id[Int(batch.n_tokens)] = 1 + batch.seq_id[Int(batch.n_tokens)][0] = Int32(i) batch.logits[Int(batch.n_tokens)] = 1 i_batch[i] = batch.n_tokens