73[[kernel, max_total_threads_per_threadgroup(WM * WN * 32)]]
void attention(
74 const device T* Q [[buffer(0)]],
75 const device T* K [[buffer(1)]],
76 const device T* V [[buffer(2)]],
77 device T* O [[buffer(3)]],
78 const constant
AttnParams* params [[buffer(4)]],
79 uint simd_lane_id [[thread_index_in_simdgroup]],
80 uint simd_group_id [[simdgroup_index_in_threadgroup]],
81 uint3 tid [[threadgroup_position_in_grid]],
82 uint3 lid [[thread_position_in_threadgroup]]) {
88 ulong3 tidl{tid.x, tid.y, tid.z};
90 Q += tidl.z * params->Q_strides[0] +
91 tidl.y * params->Q_strides[1] +
92 tidl.x * BQ * params->Q_strides[2];
94 ulong kv_head_idx = int(tid.y) / params->gqa_factor;
95 K += tidl.z * params->K_strides[0] +
96 kv_head_idx * params->K_strides[1];
98 V += tidl.z * params->V_strides[0] +
99 kv_head_idx * params->V_strides[1];
101 O += tidl.z * params->O_strides[0] +
102 tidl.y * params->O_strides[1] +
103 tidl.x * BQ * params->O_strides[2];
106 constexpr short padQ = 0;
107 constexpr short padK = 0;
108 constexpr short padV = 0;
110 constexpr short LDQ_tgp = BD + padQ;
111 constexpr short LDK_tgp = BK + padK;
112 constexpr short LDV_tgp = BD + padV;
114 threadgroup T Qs[BQ * (BD + padQ)];
115 threadgroup T Ks[(BK + padK) * BD];
116 threadgroup T Vs[BK * (BD + padV)];
147 QBlockLoader loader_q(
148 Q, params->Q_strides[2], Qs, simd_group_id, simd_lane_id);
149 KBlockLoader loader_k(
150 K, params->K_strides[2], Ks, simd_group_id, simd_lane_id);
151 VBlockLoader loader_v(
152 V, params->V_strides[2], Vs, simd_group_id, simd_lane_id);
157 constexpr short kFragSize = 8;
160 constexpr int kNWarps = WM * WN;
162 BQ >= (kNWarps * kFragSize) && BQ % (kNWarps * kFragSize) == 0,
163 "Each simdgroup must host atleast 1 simdgroup matrix along Q sequence.");
166 constexpr int TQ = BQ / (kNWarps * kFragSize);
168 constexpr int TK = BK / kFragSize;
170 constexpr int TD = BD / kFragSize;
172 static_assert(TQ == 1,
"Check TQ");
183 const short2 simd_coord = MMAFrag_acc_t::get_coord(simd_lane_id);
184 const short sm = simd_coord.y;
185 const short sn = simd_coord.x;
186 const short tm = kFragSize * TQ * simd_group_id;
188 const short Qs_offset = (tm + sm) * LDQ_tgp + sn;
189 const short Ks_offset = sm * LDK_tgp + sn;
190 const short Vs_offset = sm * LDV_tgp + sn;
192 constexpr short Qs_tile_stride = kFragSize;
193 constexpr short Ks_tile_stride = kFragSize * LDK_tgp;
195 threadgroup_barrier(mem_flags::mem_threadgroup);
198 if (!
align_Q &&
int(tid.x) == (params->NQ_aligned)) {
199 loader_q.load_safe(short2(BD, params->qL - params->NQ_aligned * BQ));
201 loader_q.load_unsafe();
203 loader_q.apply_inplace_op(ts);
206 constexpr short kRowsPT =
decltype(Stile)::kRowsPerThread;
208 AccumType max_score[kRowsPT];
209 AccumType sum_score[kRowsPT] = {0};
213 for (
short i = 0; i < kRowsPT; ++i) {
218 for (
int kb = 0; kb < params->NK; kb++) {
220 threadgroup_barrier(mem_flags::mem_threadgroup);
221 if (!
align_K && kb == (params->NK_aligned)) {
222 loader_k.load_safe(short2(BD, params->kL - params->NK_aligned * BK));
224 loader_k.load_unsafe();
227 threadgroup_barrier(mem_flags::mem_threadgroup);
232 for (
short dd = 0; dd < TD; dd++) {
233 simdgroup_barrier(mem_flags::mem_none);
235 Qtile.template load<T, 1, 1, LDQ_tgp, 1>(
236 &Qs[Qs_offset + dd * Qs_tile_stride]);
237 Ktile.template load<T, 1, 1, LDK_tgp, 1>(
238 &Ks[Ks_offset + dd * Ks_tile_stride]);
240 simdgroup_barrier(mem_flags::mem_none);
246 if (!
align_K && kb == (params->NK_aligned)) {
247 using stile_t =
decltype(Stile);
248 using selem_t =
typename stile_t::elem_type;
249 constexpr auto neg_inf = -metal::numeric_limits<selem_t>::infinity();
250 const short lim = params->kL - params->NK_aligned * BK;
253 for (
short i = 0; i < stile_t::kTileRows; i++) {
255 for (
short j = 0; j < stile_t::kTileCols; j++) {
256 short col_pos = sn + (j * stile_t::kFragCols);
258 for (
short jj = 0; jj < stile_t::MMAFrag_t::kElemCols; jj++) {
259 if ((col_pos + jj) >= lim) {
260 Stile.
frag_at(i, j)[jj] = neg_inf;
267 simdgroup_barrier(mem_flags::mem_none);
270 if (!
align_K && kb == (params->NK_aligned)) {
271 loader_v.load_safe(short2(BD, params->kL - params->NK_aligned * BK));
273 loader_v.load_unsafe();
279 AccumType new_max[kRowsPT];
280 AccumType factor[kRowsPT];
282 for (
short i = 0; i < kRowsPT; ++i) {
283 new_max[i] = max_score[i];
287 Stile.template row_reduce<MaxOp>(new_max);
290 Stile.template row_bin_op<ExpSubOp>(new_max);
294 for (
short i = 0; i < kRowsPT; ++i) {
295 factor[i] = fast::exp(max_score[i] - new_max[i]);
300 for (
short i = 0; i < kRowsPT; ++i) {
301 max_score[i] = new_max[i];
305 AccumType sum_score_tmp[kRowsPT] = {0};
306 Stile.template row_reduce<SumOp>(sum_score_tmp);
310 for (
short i = 0; i < kRowsPT; ++i) {
311 sum_score[i] = sum_score[i] * factor[i] + sum_score_tmp[i];
315 Otile.template row_bin_op<MulOp>(factor);
318 threadgroup_barrier(mem_flags::mem_threadgroup);
319 Vtile.template load<T, 1, 1, LDV_tgp, 1>(&Vs[Vs_offset]);
321 simdgroup_barrier(mem_flags::mem_none);
332 Otile.template row_bin_op<DivOp>(sum_score);
333 threadgroup_barrier(mem_flags::mem_none);
336 O += (tm + sm) * params->O_strides[2] + sn;
338 if (!
align_Q &&
int(tid.x) == (params->NQ_aligned)) {
340 short2(BD - sn, params->qL - BQ * params->NQ_aligned - (tm + sm));
342 if (dst_tile_dims.x <= 0 || dst_tile_dims.y <= 0)
345 Otile.template store_safe<T, 1, 1>(O, params->O_strides[2], dst_tile_dims);
347 Otile.template store<T, 1, 1>(O, params->O_strides[2]);