Skip to content

Commit 3a2df84

Browse files
committed
Lint
1 parent dd24dfa commit 3a2df84

File tree

3 files changed

+14
-9
lines changed

3 files changed

+14
-9
lines changed

src/torchcodec/decoders/_video_decoder.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ def __init__(
103103
dimension_order: Literal["NCHW", "NHWC"] = "NCHW",
104104
num_ffmpeg_threads: int = 1,
105105
device: Optional[Union[str, torch_device]] = "cpu",
106-
transforms: List[Any] = [], # TRANSFORMS TODO: what is the user-facing type?
106+
transforms: List[Any] = [], # TRANSFORMS TODO: what is the user-facing type?
107107
seek_mode: Literal["exact", "approximate"] = "exact",
108108
custom_frame_mappings: Optional[
109109
Union[str, bytes, io.RawIOBase, io.BufferedReader]
@@ -435,6 +435,7 @@ def _get_and_validate_stream_metadata(
435435
num_frames,
436436
)
437437

438+
438439
def make_transform_specs(transforms: List[Any]) -> str:
439440
from torchvision.transforms import v2
440441

@@ -447,11 +448,10 @@ def make_transform_specs(transforms: List[Any]) -> str:
447448
)
448449
transform_specs.append(f"resize, {transform.size[0]}, {transform.size[1]}")
449450
else:
450-
raise ValueError(
451-
f"Unsupported transform {transform}."
452-
)
451+
raise ValueError(f"Unsupported transform {transform}.")
453452
return ";".join(transform_specs)
454453

454+
455455
def _read_custom_frame_mappings(
456456
custom_frame_mappings: Union[str, bytes, io.RawIOBase, io.BufferedReader]
457457
) -> tuple[Tensor, Tensor, Tensor]:

test/generate_reference_resources.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,6 @@ def generate_nasa_13013_references():
133133
)
134134

135135

136-
137136
def generate_h265_video_references():
138137
# This video was generated by running the following:
139138
# conda install -c conda-forge x265

test/test_transform_ops.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,9 @@ def test_resize_torchvision(self, height_scaling_factor, width_scaling_factor):
4040
height = int(NASA_VIDEO.get_height() * height_scaling_factor)
4141
width = int(NASA_VIDEO.get_width() * width_scaling_factor)
4242

43-
decoder_resize = VideoDecoder(NASA_VIDEO.path, transforms=[v2.Resize(size=(height, width))])
43+
decoder_resize = VideoDecoder(
44+
NASA_VIDEO.path, transforms=[v2.Resize(size=(height, width))]
45+
)
4446
decoder_full = VideoDecoder(NASA_VIDEO.path)
4547
for frame_index in [0, 10, 17, 100, 230, 389]:
4648
expected_shape = (NASA_VIDEO.get_num_color_channels(), height, width)
@@ -60,16 +62,19 @@ def test_resize_ffmpeg(self):
6062
width = 240
6163
expected_shape = (NASA_VIDEO.get_num_color_channels(), height, width)
6264
resize_filtergraph = f"scale={width}:{height}:flags=bilinear"
63-
decoder_resize = VideoDecoder(NASA_VIDEO.path, transforms=[v2.Resize(size=(height, width))])
65+
decoder_resize = VideoDecoder(
66+
NASA_VIDEO.path, transforms=[v2.Resize(size=(height, width))]
67+
)
6468
for frame_index in [17, 230, 389]:
6569
frame_resize = decoder_resize[frame_index]
66-
frame_ref = NASA_VIDEO.get_frame_data_by_index(frame_index, filters=resize_filtergraph)
70+
frame_ref = NASA_VIDEO.get_frame_data_by_index(
71+
frame_index, filters=resize_filtergraph
72+
)
6773

6874
assert frame_resize.shape == expected_shape
6975
assert frame_ref.shape == expected_shape
7076
assert_frames_equal(frame_resize, frame_ref)
7177

72-
7378
def test_resize_fails(self):
7479
with pytest.raises(
7580
ValueError,
@@ -84,6 +89,7 @@ def test_transform_fails(self):
8489
):
8590
VideoDecoder(NASA_VIDEO.path, transforms=[v2.RandomHorizontalFlip(p=1.0)])
8691

92+
8793
class TestCoreVideoDecoderTransformOps:
8894
# We choose arbitrary values for width and height scaling to get better
8995
# test coverage. Some pairs upscale the image while others downscale it.

0 commit comments

Comments
 (0)