11//! The virtual memory representation of the MIR interpreter.
22
3+ use std:: borrow:: Cow ;
4+ use std:: convert:: TryFrom ;
5+ use std:: iter;
6+ use std:: ops:: { Deref , DerefMut , Range } ;
7+
8+ use rustc_ast:: ast:: Mutability ;
9+ use rustc_data_structures:: sorted_map:: SortedMap ;
10+ use rustc_target:: abi:: HasDataLayout ;
11+
312use super :: {
413 read_target_uint, write_target_uint, AllocId , InterpResult , Pointer , Scalar , ScalarMaybeUndef ,
514} ;
615
716use crate :: ty:: layout:: { Align , Size } ;
817
9- use rustc_ast:: ast:: Mutability ;
10- use rustc_data_structures:: sorted_map:: SortedMap ;
11- use rustc_target:: abi:: HasDataLayout ;
12- use std:: borrow:: Cow ;
13- use std:: iter;
14- use std:: ops:: { Deref , DerefMut , Range } ;
15-
1618// NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
1719// `src/librustc_mir/interpret/snapshot.rs`.
1820#[ derive( Clone , Debug , Eq , PartialEq , PartialOrd , Ord , Hash , RustcEncodable , RustcDecodable ) ]
@@ -90,7 +92,7 @@ impl<Tag> Allocation<Tag> {
9092 /// Creates a read-only allocation initialized by the given bytes
9193 pub fn from_bytes < ' a > ( slice : impl Into < Cow < ' a , [ u8 ] > > , align : Align ) -> Self {
9294 let bytes = slice. into ( ) . into_owned ( ) ;
93- let size = Size :: from_bytes ( bytes. len ( ) as u64 ) ;
95+ let size = Size :: from_bytes ( bytes. len ( ) ) ;
9496 Self {
9597 bytes,
9698 relocations : Relocations :: new ( ) ,
@@ -107,9 +109,8 @@ impl<Tag> Allocation<Tag> {
107109 }
108110
109111 pub fn undef ( size : Size , align : Align ) -> Self {
110- assert_eq ! ( size. bytes( ) as usize as u64 , size. bytes( ) ) ;
111112 Allocation {
112- bytes : vec ! [ 0 ; size. bytes ( ) as usize ] ,
113+ bytes : vec ! [ 0 ; size. bytes_usize ( ) ] ,
113114 relocations : Relocations :: new ( ) ,
114115 undef_mask : UndefMask :: new ( size, false ) ,
115116 size,
@@ -152,7 +153,7 @@ impl Allocation<(), ()> {
152153/// Raw accessors. Provide access to otherwise private bytes.
153154impl < Tag , Extra > Allocation < Tag , Extra > {
154155 pub fn len ( & self ) -> usize {
155- self . size . bytes ( ) as usize
156+ self . size . bytes_usize ( )
156157 }
157158
158159 /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
@@ -183,20 +184,15 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
183184 #[ inline]
184185 fn check_bounds ( & self , offset : Size , size : Size ) -> Range < usize > {
185186 let end = offset + size; // This does overflow checking.
186- assert_eq ! (
187- end. bytes( ) as usize as u64 ,
188- end. bytes( ) ,
189- "cannot handle this access on this host architecture"
190- ) ;
191- let end = end. bytes ( ) as usize ;
187+ let end = usize:: try_from ( end. bytes ( ) ) . expect ( "access too big for this host architecture" ) ;
192188 assert ! (
193189 end <= self . len( ) ,
194190 "Out-of-bounds access at offset {}, size {} in allocation of size {}" ,
195191 offset. bytes( ) ,
196192 size. bytes( ) ,
197193 self . len( )
198194 ) ;
199- ( offset. bytes ( ) as usize ) ..end
195+ offset. bytes_usize ( ) ..end
200196 }
201197
202198 /// The last argument controls whether we error out when there are undefined
@@ -294,11 +290,10 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
294290 cx : & impl HasDataLayout ,
295291 ptr : Pointer < Tag > ,
296292 ) -> InterpResult < ' tcx , & [ u8 ] > {
297- assert_eq ! ( ptr. offset. bytes( ) as usize as u64 , ptr. offset. bytes( ) ) ;
298- let offset = ptr. offset . bytes ( ) as usize ;
293+ let offset = ptr. offset . bytes_usize ( ) ;
299294 Ok ( match self . bytes [ offset..] . iter ( ) . position ( |& c| c == 0 ) {
300295 Some ( size) => {
301- let size_with_null = Size :: from_bytes ( ( size + 1 ) as u64 ) ;
296+ let size_with_null = Size :: from_bytes ( size) + Size :: from_bytes ( 1 ) ;
302297 // Go through `get_bytes` for checks and AllocationExtra hooks.
303298 // We read the null, so we include it in the request, but we want it removed
304299 // from the result, so we do subslicing.
@@ -343,7 +338,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
343338 let ( lower, upper) = src. size_hint ( ) ;
344339 let len = upper. expect ( "can only write bounded iterators" ) ;
345340 assert_eq ! ( lower, len, "can only write iterators with a precise length" ) ;
346- let bytes = self . get_bytes_mut ( cx, ptr, Size :: from_bytes ( len as u64 ) ) ?;
341+ let bytes = self . get_bytes_mut ( cx, ptr, Size :: from_bytes ( len) ) ?;
347342 // `zip` would stop when the first iterator ends; we want to definitely
348343 // cover all of `bytes`.
349344 for dest in bytes {
@@ -386,7 +381,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
386381 } else {
387382 match self . relocations . get ( & ptr. offset ) {
388383 Some ( & ( tag, alloc_id) ) => {
389- let ptr = Pointer :: new_with_tag ( alloc_id, Size :: from_bytes ( bits as u64 ) , tag) ;
384+ let ptr = Pointer :: new_with_tag ( alloc_id, Size :: from_bytes ( bits) , tag) ;
390385 return Ok ( ScalarMaybeUndef :: Scalar ( ptr. into ( ) ) ) ;
391386 }
392387 None => { }
@@ -433,7 +428,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
433428 } ;
434429
435430 let bytes = match val. to_bits_or_ptr ( type_size, cx) {
436- Err ( val) => val. offset . bytes ( ) as u128 ,
431+ Err ( val) => u128 :: from ( val. offset . bytes ( ) ) ,
437432 Ok ( data) => data,
438433 } ;
439434
@@ -524,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
524519 )
525520 } ;
526521 let start = ptr. offset ;
527- let end = start + size;
522+ let end = start + size; // `Size` addition
528523
529524 // Mark parts of the outermost relocations as undefined if they partially fall outside the
530525 // given range.
@@ -563,7 +558,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
563558 #[ inline]
564559 fn check_defined ( & self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
565560 self . undef_mask
566- . is_range_defined ( ptr. offset , ptr. offset + size)
561+ . is_range_defined ( ptr. offset , ptr. offset + size) // `Size` addition
567562 . or_else ( |idx| throw_ub ! ( InvalidUndefBytes ( Some ( Pointer :: new( ptr. alloc_id, idx) ) ) ) )
568563 }
569564
@@ -643,7 +638,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
643638 if defined. ranges . len ( ) <= 1 {
644639 self . undef_mask . set_range_inbounds (
645640 dest. offset ,
646- dest. offset + size * repeat,
641+ dest. offset + size * repeat, // `Size` operations
647642 defined. initial ,
648643 ) ;
649644 return ;
@@ -721,10 +716,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
721716 for i in 0 ..length {
722717 new_relocations. extend ( relocations. iter ( ) . map ( |& ( offset, reloc) | {
723718 // compute offset for current repetition
724- let dest_offset = dest. offset + ( i * size ) ;
719+ let dest_offset = dest. offset + size * i ; // `Size` operations
725720 (
726721 // shift offsets from source allocation to destination allocation
727- offset + dest_offset - src. offset ,
722+ ( offset + dest_offset) - src. offset , // `Size` operations
728723 reloc,
729724 )
730725 } ) ) ;
@@ -861,18 +856,18 @@ impl UndefMask {
861856 if amount. bytes ( ) == 0 {
862857 return ;
863858 }
864- let unused_trailing_bits = self . blocks . len ( ) as u64 * Self :: BLOCK_SIZE - self . len . bytes ( ) ;
859+ let unused_trailing_bits =
860+ u64:: try_from ( self . blocks . len ( ) ) . unwrap ( ) * Self :: BLOCK_SIZE - self . len . bytes ( ) ;
865861 if amount. bytes ( ) > unused_trailing_bits {
866862 let additional_blocks = amount. bytes ( ) / Self :: BLOCK_SIZE + 1 ;
867- assert_eq ! ( additional_blocks as usize as u64 , additional_blocks) ;
868863 self . blocks . extend (
869864 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
870- iter:: repeat ( 0 ) . take ( additional_blocks as usize ) ,
865+ iter:: repeat ( 0 ) . take ( usize:: try_from ( additional_blocks ) . unwrap ( ) ) ,
871866 ) ;
872867 }
873868 let start = self . len ;
874869 self . len += amount;
875- self . set_range_inbounds ( start, start + amount, new_state) ;
870+ self . set_range_inbounds ( start, start + amount, new_state) ; // `Size` operation
876871 }
877872}
878873
@@ -881,7 +876,5 @@ fn bit_index(bits: Size) -> (usize, usize) {
881876 let bits = bits. bytes ( ) ;
882877 let a = bits / UndefMask :: BLOCK_SIZE ;
883878 let b = bits % UndefMask :: BLOCK_SIZE ;
884- assert_eq ! ( a as usize as u64 , a) ;
885- assert_eq ! ( b as usize as u64 , b) ;
886- ( a as usize , b as usize )
879+ ( usize:: try_from ( a) . unwrap ( ) , usize:: try_from ( b) . unwrap ( ) )
887880}
0 commit comments