@@ -40,7 +40,9 @@ def test_resize_torchvision(self, height_scaling_factor, width_scaling_factor):
4040 height = int (NASA_VIDEO .get_height () * height_scaling_factor )
4141 width = int (NASA_VIDEO .get_width () * width_scaling_factor )
4242
43- decoder_resize = VideoDecoder (NASA_VIDEO .path , transforms = [v2 .Resize (size = (height , width ))])
43+ decoder_resize = VideoDecoder (
44+ NASA_VIDEO .path , transforms = [v2 .Resize (size = (height , width ))]
45+ )
4446 decoder_full = VideoDecoder (NASA_VIDEO .path )
4547 for frame_index in [0 , 10 , 17 , 100 , 230 , 389 ]:
4648 expected_shape = (NASA_VIDEO .get_num_color_channels (), height , width )
@@ -60,16 +62,19 @@ def test_resize_ffmpeg(self):
6062 width = 240
6163 expected_shape = (NASA_VIDEO .get_num_color_channels (), height , width )
6264 resize_filtergraph = f"scale={ width } :{ height } :flags=bilinear"
63- decoder_resize = VideoDecoder (NASA_VIDEO .path , transforms = [v2 .Resize (size = (height , width ))])
65+ decoder_resize = VideoDecoder (
66+ NASA_VIDEO .path , transforms = [v2 .Resize (size = (height , width ))]
67+ )
6468 for frame_index in [17 , 230 , 389 ]:
6569 frame_resize = decoder_resize [frame_index ]
66- frame_ref = NASA_VIDEO .get_frame_data_by_index (frame_index , filters = resize_filtergraph )
70+ frame_ref = NASA_VIDEO .get_frame_data_by_index (
71+ frame_index , filters = resize_filtergraph
72+ )
6773
6874 assert frame_resize .shape == expected_shape
6975 assert frame_ref .shape == expected_shape
7076 assert_frames_equal (frame_resize , frame_ref )
7177
72-
7378 def test_resize_fails (self ):
7479 with pytest .raises (
7580 ValueError ,
@@ -84,6 +89,7 @@ def test_transform_fails(self):
8489 ):
8590 VideoDecoder (NASA_VIDEO .path , transforms = [v2 .RandomHorizontalFlip (p = 1.0 )])
8691
92+
8793class TestCoreVideoDecoderTransformOps :
8894 # We choose arbitrary values for width and height scaling to get better
8995 # test coverage. Some pairs upscale the image while others downscale it.
0 commit comments