@@ -115,7 +115,7 @@ fn u64_by_u64_div_rem(duo: u64, div: u64) -> (u64, u64) {
115115// microarchitecture can multiply and divide. We decide to be optimistic and assume `trifecta` is
116116// faster if the target pointer width is at least 64.
117117#[ cfg( all(
118- not( all( feature = "asm" , target_arch = "x86_64" ) ) ,
118+ not( all( not ( feature = "no- asm" ) , target_arch = "x86_64" ) ) ,
119119 not( any( target_pointer_width = "16" , target_pointer_width = "32" ) )
120120) ) ]
121121impl_trifecta ! (
@@ -131,7 +131,7 @@ impl_trifecta!(
131131// If the pointer width less than 64, then the target architecture almost certainly does not have
132132// the fast 64 to 128 bit widening multiplication needed for `trifecta` to be faster.
133133#[ cfg( all(
134- not( all( feature = "asm" , target_arch = "x86_64" ) ) ,
134+ not( all( not ( feature = "no- asm" ) , target_arch = "x86_64" ) ) ,
135135 any( target_pointer_width = "16" , target_pointer_width = "32" )
136136) ) ]
137137impl_delegate ! (
@@ -152,7 +152,7 @@ impl_delegate!(
152152///
153153/// If the quotient does not fit in a `u64`, a floating point exception occurs.
154154/// If `div == 0`, then a division by zero exception occurs.
155- #[ cfg( all( feature = "asm" , target_arch = "x86_64" ) ) ]
155+ #[ cfg( all( not ( feature = "no- asm" ) , target_arch = "x86_64" ) ) ]
156156#[ inline]
157157unsafe fn u128_by_u64_div_rem ( duo : u128 , div : u64 ) -> ( u64 , u64 ) {
158158 let duo_lo = duo as u64 ;
@@ -174,7 +174,7 @@ unsafe fn u128_by_u64_div_rem(duo: u128, div: u64) -> (u64, u64) {
174174}
175175
176176// use `asymmetric` instead of `trifecta` on x86_64
177- #[ cfg( all( feature = "asm" , target_arch = "x86_64" ) ) ]
177+ #[ cfg( all( not ( feature = "no- asm" ) , target_arch = "x86_64" ) ) ]
178178impl_asymmetric ! (
179179 u128_div_rem,
180180 zero_div_fn,
@@ -203,7 +203,7 @@ fn u32_by_u32_div_rem(duo: u32, div: u32) -> (u32, u32) {
203203// When not on x86 and the pointer width is not 64, use `delegate` since the division size is larger
204204// than register size.
205205#[ cfg( all(
206- not( all( feature = "asm" , target_arch = "x86" ) ) ,
206+ not( all( not ( feature = "no- asm" ) , target_arch = "x86" ) ) ,
207207 not( target_pointer_width = "64" )
208208) ) ]
209209impl_delegate ! (
@@ -220,7 +220,7 @@ impl_delegate!(
220220
221221// When not on x86 and the pointer width is 64, use `binary_long`.
222222#[ cfg( all(
223- not( all( feature = "asm" , target_arch = "x86" ) ) ,
223+ not( all( not ( feature = "no- asm" ) , target_arch = "x86" ) ) ,
224224 target_pointer_width = "64"
225225) ) ]
226226impl_binary_long ! (
@@ -238,7 +238,7 @@ impl_binary_long!(
238238///
239239/// If the quotient does not fit in a `u32`, a floating point exception occurs.
240240/// If `div == 0`, then a division by zero exception occurs.
241- #[ cfg( all( feature = "asm" , target_arch = "x86" ) ) ]
241+ #[ cfg( all( not ( feature = "no- asm" ) , target_arch = "x86" ) ) ]
242242#[ inline]
243243unsafe fn u64_by_u32_div_rem ( duo : u64 , div : u32 ) -> ( u32 , u32 ) {
244244 let duo_lo = duo as u32 ;
@@ -260,7 +260,7 @@ unsafe fn u64_by_u32_div_rem(duo: u64, div: u32) -> (u32, u32) {
260260}
261261
262262// use `asymmetric` instead of `delegate` on x86
263- #[ cfg( all( feature = "asm" , target_arch = "x86" ) ) ]
263+ #[ cfg( all( not ( feature = "no- asm" ) , target_arch = "x86" ) ) ]
264264impl_asymmetric ! (
265265 u64_div_rem,
266266 zero_div_fn,
0 commit comments