tests : remove TMP_ATTN_BENCH
ggml-ci
This commit is contained in:
parent
1f77f49787
commit
ff2c64a9f4
1 changed files with 0 additions and 70 deletions
|
@ -15,9 +15,6 @@
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
// TODO: remove before merging
|
|
||||||
//#define TMP_ATTN_BENCH
|
|
||||||
|
|
||||||
static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
|
static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
|
||||||
// static RNG initialization (revisit if n_threads stops being constant)
|
// static RNG initialization (revisit if n_threads stops being constant)
|
||||||
static const size_t n_threads = std::thread::hardware_concurrency();
|
static const size_t n_threads = std::thread::hardware_concurrency();
|
||||||
|
@ -574,19 +571,9 @@ struct test_case {
|
||||||
// duplicate the op
|
// duplicate the op
|
||||||
size_t target_size = ggml_backend_is_cpu(backend) ? 1ULL << 33 : 1ULL << 35; // 8 GB CPU, 32 GB GPU
|
size_t target_size = ggml_backend_is_cpu(backend) ? 1ULL << 33 : 1ULL << 35; // 8 GB CPU, 32 GB GPU
|
||||||
int n_runs = std::min((size_t)gf->size - gf->n_nodes, target_size / op_size(out)) + 1;
|
int n_runs = std::min((size_t)gf->size - gf->n_nodes, target_size / op_size(out)) + 1;
|
||||||
#ifndef TMP_ATTN_BENCH
|
|
||||||
for (int i = 1; i < n_runs; i++) {
|
for (int i = 1; i < n_runs; i++) {
|
||||||
gf->nodes[gf->n_nodes++] = out;
|
gf->nodes[gf->n_nodes++] = out;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
int n_nodes = gf->n_nodes;
|
|
||||||
n_runs = 1000;
|
|
||||||
for (int i = 1; i < n_runs; i++) {
|
|
||||||
for (int j = 0; j < n_nodes; j++) {
|
|
||||||
gf->nodes[gf->n_nodes++] = gf->nodes[j];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// calculate memory
|
// calculate memory
|
||||||
size_t mem = n_runs * op_size(out);
|
size_t mem = n_runs * op_size(out);
|
||||||
|
@ -1522,50 +1509,6 @@ struct test_flash_attn_ext : public test_case {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef TMP_ATTN_BENCH
|
|
||||||
// ATTN
|
|
||||||
struct test_attn : public test_case {
|
|
||||||
const int64_t hs; // head size
|
|
||||||
const int64_t nh; // num heads
|
|
||||||
const int64_t kv; // kv size
|
|
||||||
const int64_t nb; // batch size
|
|
||||||
|
|
||||||
std::string op_desc(ggml_tensor * t) override {
|
|
||||||
return "ATTN";
|
|
||||||
|
|
||||||
GGML_UNUSED(t);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string vars() override {
|
|
||||||
return VARS_TO_STR4(hs, nh, kv, nb);
|
|
||||||
}
|
|
||||||
|
|
||||||
double max_nmse_err() override {
|
|
||||||
return 5e-4;
|
|
||||||
}
|
|
||||||
|
|
||||||
test_attn(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8)
|
|
||||||
: hs(hs), nh(nh), kv(kv), nb(nb) {}
|
|
||||||
|
|
||||||
ggml_tensor * build_graph(ggml_context * ctx) override {
|
|
||||||
ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs, nb, nh, 1);
|
|
||||||
ggml_tensor * k = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1);
|
|
||||||
ggml_tensor * v = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, hs, nh, 1); // transposed
|
|
||||||
ggml_tensor * mask = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, nb, 1, 1);
|
|
||||||
|
|
||||||
struct ggml_tensor * cur;
|
|
||||||
|
|
||||||
cur = ggml_mul_mat (ctx, k, q);
|
|
||||||
cur = ggml_soft_max_ext(ctx, cur, mask, nullptr, 1.0f/sqrtf(hs), 0.0f);
|
|
||||||
cur = ggml_mul_mat (ctx, v, cur);
|
|
||||||
cur = ggml_permute (ctx, cur, 0, 2, 1, 3);
|
|
||||||
cur = ggml_cont_2d (ctx, cur, hs*nh, nb);
|
|
||||||
|
|
||||||
return cur;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
enum llm_norm_type {
|
enum llm_norm_type {
|
||||||
LLM_NORM,
|
LLM_NORM,
|
||||||
LLM_NORM_RMS,
|
LLM_NORM_RMS,
|
||||||
|
@ -2230,18 +2173,6 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
||||||
test_cases.emplace_back(new test_timestep_embedding());
|
test_cases.emplace_back(new test_timestep_embedding());
|
||||||
test_cases.emplace_back(new test_leaky_relu());
|
test_cases.emplace_back(new test_leaky_relu());
|
||||||
|
|
||||||
#ifdef TMP_ATTN_BENCH
|
|
||||||
for (int hs : { 128, 256, 64, 80, }) {
|
|
||||||
for (int nh : { 32, }) {
|
|
||||||
for (int kv : { 512, 1024, 2048, 4096, }) {
|
|
||||||
for (int nb : { 1, 2, 4, 8, 512, 1024, 2048, }) {
|
|
||||||
test_cases.emplace_back(new test_attn (hs, nh, kv, nb));
|
|
||||||
test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
for (int hs : { 64, 80, 128, 256, }) {
|
for (int hs : { 64, 80, 128, 256, }) {
|
||||||
for (int nh : { 32, }) {
|
for (int nh : { 32, }) {
|
||||||
for (int kv : { 512, 1024, }) {
|
for (int kv : { 512, 1024, }) {
|
||||||
|
@ -2251,7 +2182,6 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
// these tests are disabled to save execution time, but they can be handy for debugging
|
// these tests are disabled to save execution time, but they can be handy for debugging
|
||||||
#if 0
|
#if 0
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue