llama : fix msvc warnings
This commit is contained in:
parent
9f5ac6d2d2
commit
5eb626225d
1 changed files with 3 additions and 3 deletions
|
@ -1275,7 +1275,7 @@ struct llama_hparams {
|
|||
if (this->rope_finetuned != other.rope_finetuned) return true;
|
||||
if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
|
||||
|
||||
const float EPSILON = 1e-9;
|
||||
const float EPSILON = 1e-9f;
|
||||
|
||||
if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
|
||||
if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
|
||||
|
@ -10294,7 +10294,7 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
|
|||
std::string result = model->vocab.id_to_token[token].text;
|
||||
llama_unescape_whitespace(result);
|
||||
if (length < (int) result.length()) {
|
||||
return -result.length();
|
||||
return -(int) result.length();
|
||||
}
|
||||
memcpy(buf, result.c_str(), result.length());
|
||||
return result.length();
|
||||
|
@ -10324,7 +10324,7 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
|
|||
std::string result = model->vocab.id_to_token[token].text;
|
||||
result = llama_decode_text(result);
|
||||
if (length < (int) result.length()) {
|
||||
return -result.length();
|
||||
return -(int) result.length();
|
||||
}
|
||||
memcpy(buf, result.c_str(), result.length());
|
||||
return result.length();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue