@@ -9416,7 +9416,7 @@ static void diag_mask_inf_f32(const float * x, float * dst, const int ncols, con
94169416
94179417
94189418template <bool vals_smem, int ncols_template, int block_size_template>
9419- static void soft_max_f32(const float * x, const float * mask, const float *pos, float * dst, const int ncols_par,
9419+ static void soft_max_f32(const float * x, const float * mask, float * dst, const int ncols_par,
94209420 const int nrows_y, const float scale, const float max_bias, const float m0,
94219421 const float m1, uint32_t n_head_log2, const sycl::nd_item<3> &item_ct1, float *buf) {
94229422 const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
@@ -9430,7 +9430,7 @@ static void soft_max_f32(const float * x, const float * mask, const float *pos,
94309430 const int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
94319431 const int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
94329432
9433- float slope = 0 .0f;
9433+ float slope = 1 .0f;
94349434
94359435 // ALiBi
94369436 if (max_bias > 0.0f) {
@@ -9455,7 +9455,7 @@ static void soft_max_f32(const float * x, const float * mask, const float *pos,
94559455 const int ix = rowx*ncols + col;
94569456 const int iy = rowy*ncols + col;
94579457
9458- const float val = x[ix]*scale + (mask ? mask[iy] : 0.0f) + (pos ? slope*pos[col ] : 0.0f);
9458+ const float val = x[ix]*scale + (mask ? slope*mask[iy ] : 0.0f);
94599459
94609460 vals[col] = val;
94619461 max_val = sycl::max(max_val, val);
@@ -13017,7 +13017,7 @@ static void diag_mask_inf_f32_sycl(const float *x, float *dst,
1301713017}
1301813018
1301913019template <bool vals_smem, int ncols_template, int block_size_template>
13020- static void soft_max_f32_submitter(const float * x, const float * mask, const float *pos, float * dst, const int ncols_par,
13020+ static void soft_max_f32_submitter(const float * x, const float * mask, float * dst, const int ncols_par,
1302113021 const int nrows_y, const float scale, const float max_bias, const float m0,
1302213022 const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims,
1302313023 const size_t n_local_scratch, dpct::queue_ptr stream) {
@@ -13027,15 +13027,15 @@ static void soft_max_f32_submitter(const float * x, const float * mask, const fl
1302713027 cgh.parallel_for(
1302813028 sycl::nd_range<3>(block_nums * block_dims, block_dims),
1302913029 [=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
13030- soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, pos, dst, ncols_par,
13030+ soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, dst, ncols_par,
1303113031 nrows_y, scale, max_bias, m0,
1303213032 m1, n_head_log2, item_ct1,
1303313033 local_buf_acc.get_pointer());
1303413034 });
1303513035 });
1303613036}
1303713037
13038- static void soft_max_f32_sycl(const float * x, const float * mask, const float * pos,
13038+ static void soft_max_f32_sycl(const float * x, const float * mask,
1303913039 float * dst, const int ncols_x, const int nrows_x,
1304013040 const int nrows_y, const float scale, const float max_bias,
1304113041 dpct::queue_ptr stream) {
@@ -13057,60 +13057,60 @@ static void soft_max_f32_sycl(const float * x, const float * mask, const float *
1305713057 const size_t local_mem_size = stream->get_device().get_info<sycl::info::device::local_mem_size>();
1305813058 if (n_local_scratch*sizeof(float) < local_mem_size) {
1305913059 if (ncols_x > max_block_size) {
13060- soft_max_f32_submitter<true, 0, 0>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13060+ soft_max_f32_submitter<true, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
1306113061 max_bias, m0, m1, n_head_log2, block_nums,
1306213062 block_dims, n_local_scratch, stream);
1306313063 return;
1306413064 }
1306513065 switch (ncols_x) {
1306613066 case 32:
13067- soft_max_f32_submitter<true, 32, 32>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13067+ soft_max_f32_submitter<true, 32, 32>(x, mask, dst, ncols_x, nrows_y, scale,
1306813068 max_bias, m0, m1, n_head_log2, block_nums,
1306913069 block_dims, n_local_scratch, stream);
1307013070 break;
1307113071 case 64:
13072- soft_max_f32_submitter<true, 64, 64>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13072+ soft_max_f32_submitter<true, 64, 64>(x, mask, dst, ncols_x, nrows_y, scale,
1307313073 max_bias, m0, m1, n_head_log2, block_nums,
1307413074 block_dims, n_local_scratch, stream);
1307513075 break;
1307613076 case 128:
13077- soft_max_f32_submitter<true, 128, 128>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13077+ soft_max_f32_submitter<true, 128, 128>(x, mask, dst, ncols_x, nrows_y, scale,
1307813078 max_bias, m0, m1, n_head_log2, block_nums,
1307913079 block_dims, n_local_scratch, stream);
1308013080 break;
1308113081 case 256:
13082- soft_max_f32_submitter<true, 256, 256>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13082+ soft_max_f32_submitter<true, 256, 256>(x, mask, dst, ncols_x, nrows_y, scale,
1308313083 max_bias, m0, m1, n_head_log2, block_nums,
1308413084 block_dims, n_local_scratch, stream);
1308513085 break;
1308613086 case 512:
13087- soft_max_f32_submitter<true, 512, 512>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13087+ soft_max_f32_submitter<true, 512, 512>(x, mask, dst, ncols_x, nrows_y, scale,
1308813088 max_bias, m0, m1, n_head_log2, block_nums,
1308913089 block_dims, n_local_scratch, stream);
1309013090 break;
1309113091 case 1024:
13092- soft_max_f32_submitter<true, 1024, 1024>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13092+ soft_max_f32_submitter<true, 1024, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
1309313093 max_bias, m0, m1, n_head_log2, block_nums,
1309413094 block_dims, n_local_scratch, stream);
1309513095 break;
1309613096 case 2048:
13097- soft_max_f32_submitter<true, 2048, 1024>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13097+ soft_max_f32_submitter<true, 2048, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
1309813098 max_bias, m0, m1, n_head_log2, block_nums,
1309913099 block_dims, n_local_scratch, stream);
1310013100 break;
1310113101 case 4096:
13102- soft_max_f32_submitter<true, 4096, 1024>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13102+ soft_max_f32_submitter<true, 4096, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
1310313103 max_bias, m0, m1, n_head_log2, block_nums,
1310413104 block_dims, n_local_scratch, stream);
1310513105 break;
1310613106 default:
13107- soft_max_f32_submitter<true, 0, 0>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13107+ soft_max_f32_submitter<true, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
1310813108 max_bias, m0, m1, n_head_log2, block_nums,
1310913109 block_dims, n_local_scratch, stream);
1311013110 break;
1311113111 }
1311213112 } else {
13113- soft_max_f32_submitter<false, 0, 0>(x, mask, pos, dst, ncols_x, nrows_y, scale,
13113+ soft_max_f32_submitter<false, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
1311413114 max_bias, m0, m1, n_head_log2, block_nums,
1311513115 block_dims, WARP_SIZE, stream);
1311613116 }
@@ -14675,12 +14675,9 @@ inline void ggml_sycl_op_soft_max(const ggml_tensor *src0,
1467514675 GGML_ASSERT(src0->type == GGML_TYPE_F32);
1467614676 GGML_ASSERT( dst->type == GGML_TYPE_F32);
1467714677
14678- const ggml_tensor * src2 = dst->src[2];
14679-
14680- #pragma message("TODO: add ggml_sycl_op_soft_max() F16 src1 and src2 support")
14678+ #pragma message("TODO: add ggml_sycl_op_soft_max() F16 src1 support")
1468114679#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
1468214680 GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
14683- GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F32); // src2 contains positions and it is optional
1468414681
1468514682 const int64_t ne00 = src0->ne[0];
1468614683 const int64_t nrows_x = ggml_nrows(src0);
@@ -14692,25 +14689,7 @@ inline void ggml_sycl_op_soft_max(const ggml_tensor *src0,
1469214689 memcpy(&scale, dst->op_params + 0, sizeof(float));
1469314690 memcpy(&max_bias, dst->op_params + 1, sizeof(float));
1469414691
14695- // positions tensor
14696- float * src2_dd = nullptr;
14697- sycl_pool_alloc<float> src2_f;
14698-
14699- const bool use_src2 = src2 != nullptr;
14700-
14701- if (use_src2) {
14702- const bool src2_on_device = src2->backend == GGML_BACKEND_TYPE_GPU;
14703-
14704- if (src2_on_device) {
14705- ggml_tensor_extra_gpu * src2_extra = (ggml_tensor_extra_gpu *) src2->extra;
14706- src2_dd = (float *) src2_extra->data_device[g_main_device];
14707- } else {
14708- src2_dd = src2_f.alloc(ggml_nelements(src2));
14709- SYCL_CHECK(ggml_sycl_cpy_tensor_2d(src2_dd, src2, 0, 0, 0, 1, main_stream));
14710- }
14711- }
14712-
14713- soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, src2_dd, dst_dd, ne00,
14692+ soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00,
1471414693 nrows_x, nrows_y, scale, max_bias, main_stream);
1471514694}
1471614695
0 commit comments