Skip to content

Commit 5dafe66

Browse files
authored
[msan][NFCI] Refactor visitIntrinsicInst() into instruction families (llvm#154878)
Currently visitIntrinsicInst() is a long, partly unsorted list. This patch groups them into cross-platform, X86 SIMD, and Arm SIMD families, making the overall intent of visitIntrinsicInst() clearer: ``` void visitIntrinsicInst(IntrinsicInst &I) { if (maybeHandleCrossPlatformIntrinsic(I)) return; if (maybeHandleX86SIMDIntrinsic(I)) return; if (maybeHandleArmSIMDIntrinsic(I)) return; if (maybeHandleUnknownIntrinsic(I)) return; visitInstruction(I); } ``` There is one disadvantage: the compiler will not tell us if the switch statements in the handlers have overlapping coverage.
1 parent fefdb49 commit 5dafe66

File tree

1 file changed

+88
-49
lines changed

1 file changed

+88
-49
lines changed

llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp

Lines changed: 88 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -3263,7 +3263,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
32633263
return true;
32643264
}
32653265

3266-
/// Heuristically instrument unknown intrinsics.
3266+
/// Returns whether it was able to heuristically instrument unknown
3267+
/// intrinsics.
32673268
///
32683269
/// The main purpose of this code is to do something reasonable with all
32693270
/// random intrinsics we might encounter, most importantly - SIMD intrinsics.
@@ -3273,7 +3274,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
32733274
///
32743275
/// We special-case intrinsics where this approach fails. See llvm.bswap
32753276
/// handling as an example of that.
3276-
bool handleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
3277+
bool maybeHandleUnknownIntrinsicUnlogged(IntrinsicInst &I) {
32773278
unsigned NumArgOperands = I.arg_size();
32783279
if (NumArgOperands == 0)
32793280
return false;
@@ -3300,8 +3301,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
33003301
return false;
33013302
}
33023303

3303-
bool handleUnknownIntrinsic(IntrinsicInst &I) {
3304-
if (handleUnknownIntrinsicUnlogged(I)) {
3304+
bool maybeHandleUnknownIntrinsic(IntrinsicInst &I) {
3305+
if (maybeHandleUnknownIntrinsicUnlogged(I)) {
33053306
if (ClDumpHeuristicInstructions)
33063307
dumpInst(I);
33073308

@@ -5262,7 +5263,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
52625263
handleShadowOr(I);
52635264
}
52645265

5265-
void visitIntrinsicInst(IntrinsicInst &I) {
5266+
bool maybeHandleCrossPlatformIntrinsic(IntrinsicInst &I) {
52665267
switch (I.getIntrinsicID()) {
52675268
case Intrinsic::uadd_with_overflow:
52685269
case Intrinsic::sadd_with_overflow:
@@ -5342,6 +5343,32 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
53425343
handleVectorReduceWithStarterIntrinsic(I);
53435344
break;
53445345

5346+
case Intrinsic::scmp:
5347+
case Intrinsic::ucmp: {
5348+
handleShadowOr(I);
5349+
break;
5350+
}
5351+
5352+
case Intrinsic::fshl:
5353+
case Intrinsic::fshr:
5354+
handleFunnelShift(I);
5355+
break;
5356+
5357+
case Intrinsic::is_constant:
5358+
// The result of llvm.is.constant() is always defined.
5359+
setShadow(&I, getCleanShadow(&I));
5360+
setOrigin(&I, getCleanOrigin());
5361+
break;
5362+
5363+
default:
5364+
return false;
5365+
}
5366+
5367+
return true;
5368+
}
5369+
5370+
bool maybeHandleX86SIMDIntrinsic(IntrinsicInst &I) {
5371+
switch (I.getIntrinsicID()) {
53455372
case Intrinsic::x86_sse_stmxcsr:
53465373
handleStmxcsr(I);
53475374
break;
@@ -5392,6 +5419,15 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
53925419
break;
53935420
}
53945421

5422+
// Convert Packed Single Precision Floating-Point Values
5423+
// to Packed Signed Doubleword Integer Values
5424+
//
5425+
// <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
5426+
// (<16 x float>, <16 x i32>, i16, i32)
5427+
case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5428+
handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
5429+
break;
5430+
53955431
// Convert Packed Double Precision Floating-Point Values
53965432
// to Packed Single Precision Floating-Point Values
53975433
case Intrinsic::x86_sse2_cvtpd2ps:
@@ -5492,23 +5528,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
54925528
case Intrinsic::x86_mmx_psrli_q:
54935529
case Intrinsic::x86_mmx_psrai_w:
54945530
case Intrinsic::x86_mmx_psrai_d:
5495-
case Intrinsic::aarch64_neon_rshrn:
5496-
case Intrinsic::aarch64_neon_sqrshl:
5497-
case Intrinsic::aarch64_neon_sqrshrn:
5498-
case Intrinsic::aarch64_neon_sqrshrun:
5499-
case Intrinsic::aarch64_neon_sqshl:
5500-
case Intrinsic::aarch64_neon_sqshlu:
5501-
case Intrinsic::aarch64_neon_sqshrn:
5502-
case Intrinsic::aarch64_neon_sqshrun:
5503-
case Intrinsic::aarch64_neon_srshl:
5504-
case Intrinsic::aarch64_neon_sshl:
5505-
case Intrinsic::aarch64_neon_uqrshl:
5506-
case Intrinsic::aarch64_neon_uqrshrn:
5507-
case Intrinsic::aarch64_neon_uqshl:
5508-
case Intrinsic::aarch64_neon_uqshrn:
5509-
case Intrinsic::aarch64_neon_urshl:
5510-
case Intrinsic::aarch64_neon_ushl:
5511-
// Not handled here: aarch64_neon_vsli (vector shift left and insert)
55125531
handleVectorShiftIntrinsic(I, /* Variable */ false);
55135532
break;
55145533
case Intrinsic::x86_avx2_psllv_d:
@@ -5930,7 +5949,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
59305949
case Intrinsic::x86_avx512_max_pd_512: {
59315950
// These AVX512 variants contain the rounding mode as a trailing flag.
59325951
// Earlier variants do not have a trailing flag and are already handled
5933-
// by maybeHandleSimpleNomemIntrinsic(I, 0) via handleUnknownIntrinsic.
5952+
// by maybeHandleSimpleNomemIntrinsic(I, 0) via
5953+
// maybeHandleUnknownIntrinsic.
59345954
[[maybe_unused]] bool Success =
59355955
maybeHandleSimpleNomemIntrinsic(I, /*trailingFlags=*/1);
59365956
assert(Success);
@@ -5988,15 +6008,6 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
59886008
/*trailingVerbatimArgs=*/1);
59896009
break;
59906010

5991-
// Convert Packed Single Precision Floating-Point Values
5992-
// to Packed Signed Doubleword Integer Values
5993-
//
5994-
// <16 x i32> @llvm.x86.avx512.mask.cvtps2dq.512
5995-
// (<16 x float>, <16 x i32>, i16, i32)
5996-
case Intrinsic::x86_avx512_mask_cvtps2dq_512:
5997-
handleAVX512VectorConvertFPToInt(I, /*LastMask=*/false);
5998-
break;
5999-
60006011
// AVX512 PMOV: Packed MOV, with truncation
60016012
// Precisely handled by applying the same intrinsic to the shadow
60026013
case Intrinsic::x86_avx512_mask_pmov_dw_512:
@@ -6074,15 +6085,33 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
60746085
handleAVXGF2P8Affine(I);
60756086
break;
60766087

6077-
case Intrinsic::fshl:
6078-
case Intrinsic::fshr:
6079-
handleFunnelShift(I);
6080-
break;
6088+
default:
6089+
return false;
6090+
}
60816091

6082-
case Intrinsic::is_constant:
6083-
// The result of llvm.is.constant() is always defined.
6084-
setShadow(&I, getCleanShadow(&I));
6085-
setOrigin(&I, getCleanOrigin());
6092+
return true;
6093+
}
6094+
6095+
bool maybeHandleArmSIMDIntrinsic(IntrinsicInst &I) {
6096+
switch (I.getIntrinsicID()) {
6097+
case Intrinsic::aarch64_neon_rshrn:
6098+
case Intrinsic::aarch64_neon_sqrshl:
6099+
case Intrinsic::aarch64_neon_sqrshrn:
6100+
case Intrinsic::aarch64_neon_sqrshrun:
6101+
case Intrinsic::aarch64_neon_sqshl:
6102+
case Intrinsic::aarch64_neon_sqshlu:
6103+
case Intrinsic::aarch64_neon_sqshrn:
6104+
case Intrinsic::aarch64_neon_sqshrun:
6105+
case Intrinsic::aarch64_neon_srshl:
6106+
case Intrinsic::aarch64_neon_sshl:
6107+
case Intrinsic::aarch64_neon_uqrshl:
6108+
case Intrinsic::aarch64_neon_uqrshrn:
6109+
case Intrinsic::aarch64_neon_uqshl:
6110+
case Intrinsic::aarch64_neon_uqshrn:
6111+
case Intrinsic::aarch64_neon_urshl:
6112+
case Intrinsic::aarch64_neon_ushl:
6113+
// Not handled here: aarch64_neon_vsli (vector shift left and insert)
6114+
handleVectorShiftIntrinsic(I, /* Variable */ false);
60866115
break;
60876116

60886117
// TODO: handling max/min similarly to AND/OR may be more precise
@@ -6233,17 +6262,27 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
62336262
break;
62346263
}
62356264

6236-
case Intrinsic::scmp:
6237-
case Intrinsic::ucmp: {
6238-
handleShadowOr(I);
6239-
break;
6240-
}
6241-
62426265
default:
6243-
if (!handleUnknownIntrinsic(I))
6244-
visitInstruction(I);
6245-
break;
6266+
return false;
62466267
}
6268+
6269+
return true;
6270+
}
6271+
6272+
void visitIntrinsicInst(IntrinsicInst &I) {
6273+
if (maybeHandleCrossPlatformIntrinsic(I))
6274+
return;
6275+
6276+
if (maybeHandleX86SIMDIntrinsic(I))
6277+
return;
6278+
6279+
if (maybeHandleArmSIMDIntrinsic(I))
6280+
return;
6281+
6282+
if (maybeHandleUnknownIntrinsic(I))
6283+
return;
6284+
6285+
visitInstruction(I);
62476286
}
62486287

62496288
void visitLibAtomicLoad(CallBase &CB) {

0 commit comments

Comments
 (0)