@@ -24,8 +24,8 @@ def DecomposeOp : Op<Transform_Dialect, "structured.decompose",
2424 (depthwise) convolutions, into combinations of lower-dimensional equivalents
2525 when possible.
2626
27- Return modes:
28- =============
27+ #### Return modes
28+
2929 This operation ignores non-Linalg ops and drops them in the return.
3030 If all the operations referred to by the `target` PDLOperation decompose
3131 properly, the transform succeeds. Otherwise the transform silently fails.
@@ -68,11 +68,11 @@ def GeneralizeOp : Op<Transform_Dialect, "structured.generalize",
6868 [FunctionalStyleTransformOpTrait, MemoryEffectsOpInterface,
6969 TransformOpInterface, TransformEachOpTrait]> {
7070 let description = [{
71- Transforms a named structued operation into the generic form with the
71+ Transforms a named structured operation into the generic form with the
7272 explicit attached region.
7373
74- Return modes:
75- =============
74+ #### Return modes
75+
7676 This operation ignores non-Linalg ops and drops them in the return.
7777 If all the operations referred to by the `target` PDLOperation generalize
7878 properly, the transform succeeds. Otherwise the transform silently fails.
@@ -100,8 +100,8 @@ def InterchangeOp : Op<Transform_Dialect, "structured.interchange",
100100 Interchanges the iterators of the operations pointed to by the target handle
101101 using the iterator interchange attribute.
102102
103- Return modes:
104- =============
103+ #### Return modes
104+
105105 This operation ignores non-linalg::Generic ops and drops them in the return.
106106 This operation fails if the interchange attribute is invalid.
107107 If all the operations referred to by the `target` PDLOperation interchange
@@ -134,8 +134,8 @@ def PadOp : Op<Transform_Dialect, "structured.pad",
134134 Pads the operations pointed to by the target handle using the options
135135 provides as operation attributes.
136136
137- Return modes:
138- =============
137+ #### Return modes
138+
139139 This operation ignores non-Linalg ops and drops them in the return.
140140 This operation may produce a definiteFailure if the padding fails for any
141141 reason.
@@ -174,8 +174,8 @@ def ScalarizeOp : Op<Transform_Dialect, "structured.scalarize",
174174 Indicates that ops of a specific kind in the given function should be
175175 scalarized (i.e. their dynamic dimensions tiled by 1).
176176
177- Return modes:
178- =============
177+ #### Return modes:
178+
179179 This operation ignores non-Linalg ops and drops them in the return.
180180 This operation produces `definiteFailure` if the scalarization fails for any
181181 reason.
@@ -259,8 +259,8 @@ def SplitReductionOp : Op<Transform_Dialect, "structured.split_reduction",
259259 - use_alloc: whether to use an alloc op to allocate the temporary
260260 tensor (default: do not use alloc op)
261261
262- Return modes:
263- =============
262+ #### Return modes
263+
264264 This operation ignores non-Linalg ops and drops them in the return.
265265 This operation produces `definiteFailure` if the splitting fails for any
266266 reason.
@@ -275,8 +275,8 @@ def SplitReductionOp : Op<Transform_Dialect, "structured.split_reduction",
275275 - the split op and
276276 - the result-combining op.
277277
278- Example (default: use_scaling_algorithm = false, use_alloc = false):
279- ====================================================================
278+ #### Example (default: ` use_scaling_algorithm = false, use_alloc = false` ):
279+
280280 ```
281281 %r = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>,
282282 affine_map<(d0) -> ()>],
@@ -314,8 +314,8 @@ def SplitReductionOp : Op<Transform_Dialect, "structured.split_reduction",
314314 } -> tensor<f32>
315315 ```
316316
317- Example (use_scaling_algorithm = true, use_alloc = true):
318- =========================================================
317+ #### Example (` use_scaling_algorithm = true, use_alloc = true` ):
318+
319319 Instead of introducing an ExpandShapeOp, this scaling-based implementation
320320 rewrites a reduction dimension `k` into `k * split_factor + kk`.
321321 The dimension `kk` is added as an extra parallel dimension to the
@@ -329,7 +329,7 @@ def SplitReductionOp : Op<Transform_Dialect, "structured.split_reduction",
329329 b. O(i, j) += O_i(kk, i, j)
330330 The intermediate tensor O_i is of shape (128/16)x3x5 == 8x3x5.
331331
332- Example:
332+ #### Example:
333333
334334 ```
335335 %0 = linalg.matmul ins(%A, %B: tensor<16x256xf32>, tensor<256x32xf32>)
@@ -439,8 +439,8 @@ def VectorizeOp : Op<Transform_Dialect, "structured.vectorize",
439439 Note that this transformation is invalidating the handles to any payload IR
440440 operation that is contained inside the vectorization target.
441441
442- Return modes:
443- =============
442+ #### Return modes:
443+
444444 This operation produces `definiteFailure` if vectorization fails for any
445445 reason.
446446 The operation always returns the handle to the target op that is expected
0 commit comments