diff --git a/ggml-cuda/rope.cu b/ggml-cuda/rope.cu index 8aba089f4..bf0342e32 100644 --- a/ggml-cuda/rope.cu +++ b/ggml-cuda/rope.cu @@ -291,7 +291,7 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { freq_factors = (const float *) src2->data; } } else { - GGML_ASSERT(src2 == nullptr && "TODO: freq_factors not implemented for mode 1"); + GGML_ASSERT(src2 == nullptr && "TODO: freq_factors not implemented for !is_neox"); } rope_corr_dims corr_dims; diff --git a/ggml-metal.m b/ggml-metal.m index 7bc75f39b..5d5ad20ad 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2263,18 +2263,8 @@ static enum ggml_status ggml_metal_graph_compute( GGML_ASSERT(!is_glm && "GLM RoPE not implemented in Metal"); - if (is_neox) { - // TODO: move these asserts to ggml.c - GGML_ASSERT(src1->type == GGML_TYPE_I32); - GGML_ASSERT(src1->ne[0] == ne2); - - if (id_src2 != nil) { - // TODO: move these asserts to ggml.c - GGML_ASSERT(src2->type == GGML_TYPE_F32); - GGML_ASSERT(src2->ne[0] >= n_dims / 2); - } - } else { - GGML_ASSERT(id_src2 == nil && "TODO: freq_factors not implemented for mode 1"); + if (!is_neox) { + GGML_ASSERT(id_src2 == nil && "TODO: freq_factors not implemented for !is_neox"); } id pipeline = nil;