Skip to content

helpers

Classes:

Functions:

  • pre_ss

    Supersamples the input clip, applies a given function to the higher-resolution version,

  • scale_var_clip

    Scale a variable clip to constant or variable resolution.

BottomCrop module-attribute

BottomCrop: TypeAlias = int

LeftCrop module-attribute

LeftCrop: TypeAlias = int

RightCrop module-attribute

RightCrop: TypeAlias = int

TopCrop module-attribute

TopCrop: TypeAlias = int

CropAbs

Bases: NamedTuple

Methods:

Attributes:

height instance-attribute

height: int

left class-attribute instance-attribute

left: int = 0

top class-attribute instance-attribute

top: int = 0

width instance-attribute

width: int

to_rel

to_rel(base_clip: VideoNode) -> CropRel
Source code in vsscale/helpers.py
109
110
111
112
def to_rel(self, base_clip: vs.VideoNode) -> CropRel:
    return CropRel(
        self.left, base_clip.width - self.width - self.left, self.top, base_clip.height - self.height - self.top
    )

CropRel

Bases: NamedTuple

Attributes:

bottom class-attribute instance-attribute

bottom: int = 0

left class-attribute instance-attribute

left: int = 0

right class-attribute instance-attribute

right: int = 0

top class-attribute instance-attribute

top: int = 0

ScalingArgs dataclass

ScalingArgs(
    width: int,
    height: int,
    src_width: float,
    src_height: float,
    src_top: float,
    src_left: float,
    mode: str = "hw",
)

Methods:

Attributes:

height instance-attribute

height: int

mode class-attribute instance-attribute

mode: str = 'hw'

src_height instance-attribute

src_height: float

src_left instance-attribute

src_left: float

src_top instance-attribute

src_top: float

src_width instance-attribute

src_width: float

width instance-attribute

width: int

from_args classmethod

from_args(
    base_clip: VideoNode,
    height: int,
    width: int | None = None,
    *,
    src_top: float = ...,
    src_left: float = ...,
    mode: str = "hw"
) -> Self
from_args(
    base_clip: VideoNode,
    height: float,
    width: float | None = ...,
    base_height: int | None = ...,
    base_width: int | None = ...,
    src_top: float = ...,
    src_left: float = ...,
    crop: (
        tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] | CropRel | CropAbs
    ) = ...,
    mode: str = "hw",
) -> Self
from_args(
    base_clip: VideoNode,
    height: int | float,
    width: int | float | None = None,
    base_height: int | None = None,
    base_width: int | None = None,
    src_top: float = 0,
    src_left: float = 0,
    crop: (
        tuple[LeftCrop, RightCrop, TopCrop, BottomCrop]
        | CropRel
        | CropAbs
        | None
    ) = None,
    mode: str = "hw",
) -> Self

Get (de)scaling arguments for integer scaling.

Parameters:

  • base_clip

    (VideoNode) –

    Source clip.

  • height

    (int | float) –

    Target (de)scaling height. Casting to float will ensure fractional calculations.

  • width

    (int | float | None, default: None ) –

    Target (de)scaling width. Casting to float will ensure fractional calculations. If None, it will be calculated from the height and the aspect ratio of the base_clip.

  • base_height

    (int | None, default: None ) –

    The height from which to contain the clip. If None, it will be calculated from the height.

  • base_width

    (int | None, default: None ) –

    The width from which to contain the clip. If None, it will be calculated from the width.

  • src_top

    (float, default: 0 ) –

    Vertical offset.

  • src_left

    (float, default: 0 ) –

    Horizontal offset.

  • crop

    (tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] | CropRel | CropAbs | None, default: None ) –

    Tuple of cropping values, or relative/absolute crop specification.

  • mode

    (str, default: 'hw' ) –

    Scaling mode:

    • "w" means only the width is calculated.
    • "h" means only the height is calculated.
    • "hw or "wh" mean both width and height are calculated.

Returns:

  • Self

    ScalingArgs object suitable for scaling functions.

Source code in vsscale/helpers.py
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
@classmethod
def from_args(
    cls,
    base_clip: vs.VideoNode,
    height: int | float,
    width: int | float | None = None,
    base_height: int | None = None,
    base_width: int | None = None,
    src_top: float = 0,
    src_left: float = 0,
    crop: tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] | CropRel | CropAbs | None = None,
    mode: str = "hw",
) -> Self:
    """
    Get (de)scaling arguments for integer scaling.

    Args:
        base_clip: Source clip.
        height:  Target (de)scaling height. Casting to float will ensure fractional calculations.
        width: Target (de)scaling width. Casting to float will ensure fractional calculations. If None, it will be
            calculated from the height and the aspect ratio of the base_clip.
        base_height: The height from which to contain the clip. If None, it will be calculated from the height.
        base_width: The width from which to contain the clip. If None, it will be calculated from the width.
        src_top: Vertical offset.
        src_left: Horizontal offset.
        crop: Tuple of cropping values, or relative/absolute crop specification.
        mode: Scaling mode:

               - "w" means only the width is calculated.
               - "h" means only the height is calculated.
               - "hw or "wh" mean both width and height are calculated.

    Returns:
        ScalingArgs object suitable for scaling functions.
    """
    if crop:
        if isinstance(crop, CropAbs):
            crop = crop.to_rel(base_clip)
        elif isinstance(crop, CropRel):
            pass
        else:
            crop = CropRel(*crop)
    else:
        crop = CropRel()

    ratio_height = height / base_clip.height

    if width is None:
        width = get_w(height, base_clip, 2) if isinstance(height, int) else ratio_height * base_clip.width

    ratio_width = width / base_clip.width

    if all(
        [
            isinstance(height, int),
            isinstance(width, int),
            base_height is None,
            base_width is None,
            crop == (0, 0, 0, 0),
        ]
    ):
        return cls(int(width), int(height), int(width), int(height), src_top, src_left, mode)

    if base_height is None:
        base_height = mod2(ceil(height))

    if base_width is None:
        base_width = mod2(ceil(width))

    margin_left = (base_width - width) / 2 + ratio_width * crop.left
    margin_right = (base_width - width) / 2 + ratio_width * crop.right
    cropped_width = base_width - floor(margin_left) - floor(margin_right)

    margin_top = (base_height - height) / 2 + ratio_height * crop.top
    margin_bottom = (base_height - height) / 2 + ratio_height * crop.bottom
    cropped_height = base_height - floor(margin_top) - floor(margin_bottom)

    if isinstance(width, int) and crop.left == crop.right == 0:
        cropped_src_width = float(cropped_width)
    else:
        cropped_src_width = ratio_width * (base_clip.width - crop.left - crop.right)

    cropped_src_left = margin_left - floor(margin_left) + src_left

    if isinstance(height, int) and crop.top == crop.bottom == 0:
        cropped_src_height = float(cropped_height)
    else:
        cropped_src_height = ratio_height * (base_clip.height - crop.top - crop.bottom)

    cropped_src_top = margin_top - floor(margin_top) + src_top

    return cls(
        cropped_width,
        cropped_height,
        cropped_src_width,
        cropped_src_height,
        cropped_src_top,
        cropped_src_left,
        mode,
    )

kwargs

kwargs(clip_or_rate: VideoNode | float | None = None) -> KwargsT
Source code in vsscale/helpers.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
def kwargs(self, clip_or_rate: vs.VideoNode | float | None = None, /) -> KwargsT:
    kwargs = dict[str, Any]()

    do_h, do_w = self._do()

    if isinstance(clip_or_rate, (vs.VideoNode, NoneType)):
        up_rate_h, up_rate_w = self._up_rate(clip_or_rate)
    else:
        up_rate_h, up_rate_w = clip_or_rate, clip_or_rate

    if do_h:
        kwargs.update(src_height=self.src_height * up_rate_h, src_top=self.src_top * up_rate_h)

    if do_w:
        kwargs.update(src_width=self.src_width * up_rate_w, src_left=self.src_left * up_rate_w)

    return kwargs

pre_ss

pre_ss(
    clip: VideoNode,
    function: VSFunctionNoArgs[VideoNode, VideoNode],
    rfactor: float = 2.0,
    sp: type[
        MixedScalerProcess[_ScalerT, Unpack[_BaseScalerTs]]
    ] = ComplexSuperSamplerProcess[Lanczos],
    *,
    mod: int = 4,
    planes: Planes = None,
    func: FuncExcept | None = None
) -> VideoNode
pre_ss(
    clip: VideoNode,
    *,
    rfactor: float = 2.0,
    sp: MixedScalerProcess[_ScalerT, Unpack[_BaseScalerTs]],
    mod: int = 4,
    planes: Planes = None,
    func: FuncExcept | None = None
) -> VideoNode
pre_ss(
    clip: VideoNode,
    function: VSFunctionNoArgs[VideoNode, VideoNode],
    rfactor: float = 2.0,
    *,
    supersampler: ScalerLike | Callable[[VideoNode, int, int], VideoNode],
    downscaler: ScalerLike | Callable[[VideoNode, int, int], VideoNode],
    mod: int = 4,
    planes: Planes = None,
    func: FuncExcept | None = None
) -> VideoNode
pre_ss(
    clip: VideoNode,
    function: VSFunctionNoArgs[VideoNode, VideoNode] | None = None,
    rfactor: float = 2.0,
    sp: (
        type[MixedScalerProcess[_ScalerT, Unpack[_BaseScalerTs]]]
        | MixedScalerProcess[_ScalerT, Unpack[_BaseScalerTs]]
    ) = ComplexSuperSamplerProcess[Lanczos],
    supersampler: (
        ScalerLike | Callable[[VideoNode, int, int], VideoNode] | None
    ) = None,
    downscaler: (
        ScalerLike | Callable[[VideoNode, int, int], VideoNode] | None
    ) = None,
    mod: int = 4,
    planes: Planes = None,
    func: FuncExcept | None = None,
) -> VideoNode

Supersamples the input clip, applies a given function to the higher-resolution version, and then downscales it back to the original resolution.

This function generalizes the behavior of SuperSamplerProcess and ComplexSuperSamplerProcess.

  • Examples:

    out = pre_ss(clip, lambda clip: cool_function(clip, ...), planes=0)
    

    • Passing NNEDI3 as a supersampler:
      from vsaa import SuperSamplerProcess
      
      out = pre_ss(clip, lambda clip: cool_function(clip, ...), SuperSamplerProcess)
      
    • This works too:

      from vsaa import SuperSamplerProcess
      
      out = pre_ss(clip, sp=SuperSamplerProcess(function=lambda clip: cool_function(clip, ...)))
      

    • Specifying supersampler and downscaler:

      from vskernels import Point
      
      out = pre_ss(clip, lambda clip: cool_function(clip, ...), supersampler=Point, downscaler=Point, planes=0)
      

Parameters:

  • clip

    (VideoNode) –

    Source clip.

  • function

    (VSFunctionNoArgs[VideoNode, VideoNode] | None, default: None ) –

    A function to apply on the supersampled clip.

  • rfactor

    (float, default: 2.0 ) –

    Scaling factor for supersampling. Defaults to 2.

  • sp

    (type[MixedScalerProcess[_ScalerT, Unpack[_BaseScalerTs]]] | MixedScalerProcess[_ScalerT, Unpack[_BaseScalerTs]], default: ComplexSuperSamplerProcess[Lanczos] ) –

    A MixedScalerProcess instance or class. Default is ComplexSuperSamplerProcess[Lanczos]. It upscales with Lanczos and downscales with Point.

  • supersampler

    (ScalerLike | Callable[[VideoNode, int, int], VideoNode] | None, default: None ) –

    Scaler used to upscale the input clip if sp is not specified.

  • downscaler

    (ScalerLike | Callable[[VideoNode, int, int], VideoNode] | None, default: None ) –

    Downscaler used for undoing the upscaling done by the supersampler if sp is not specified.

  • mod

    (int, default: 4 ) –

    Ensures the supersampled resolution is a multiple of this value. Defaults to 4.

  • planes

    (Planes, default: None ) –

    Which planes to process.

  • func

    (FuncExcept | None, default: None ) –

    An optional function to use for error handling.

Returns:

  • VideoNode

    A clip with the given function applied at higher resolution, then downscaled back.

Source code in vsscale/helpers.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
def pre_ss(  # pyright: ignore[reportInconsistentOverload]
    clip: vs.VideoNode,
    function: VSFunctionNoArgs[vs.VideoNode, vs.VideoNode] | None = None,
    rfactor: float = 2.0,
    sp: type[MixedScalerProcess[_ScalerT, Unpack[_BaseScalerTs]]]
    | MixedScalerProcess[_ScalerT, Unpack[_BaseScalerTs]] = ComplexSuperSamplerProcess[Lanczos],  # type: ignore[assignment]
    supersampler: ScalerLike | Callable[[vs.VideoNode, int, int], vs.VideoNode] | None = None,
    downscaler: ScalerLike | Callable[[vs.VideoNode, int, int], vs.VideoNode] | None = None,
    mod: int = 4,
    planes: Planes = None,
    func: FuncExcept | None = None,
) -> vs.VideoNode:
    """
    Supersamples the input clip, applies a given function to the higher-resolution version,
    and then downscales it back to the original resolution.

    This function generalizes the behavior of
    [SuperSamplerProcess][vsaa.deinterlacers.SuperSamplerProcess] and
    [ComplexSuperSamplerProcess][vsscale.various.ComplexSuperSamplerProcess].

    - Examples:
        ```
        out = pre_ss(clip, lambda clip: cool_function(clip, ...), planes=0)
        ```

        - Passing NNEDI3 as a supersampler:
        ```
        from vsaa import SuperSamplerProcess

        out = pre_ss(clip, lambda clip: cool_function(clip, ...), SuperSamplerProcess)
        ```
        - This works too:
        ```
        from vsaa import SuperSamplerProcess

        out = pre_ss(clip, sp=SuperSamplerProcess(function=lambda clip: cool_function(clip, ...)))
        ```

        - Specifying `supersampler` and `downscaler`:
        ```
        from vskernels import Point

        out = pre_ss(clip, lambda clip: cool_function(clip, ...), supersampler=Point, downscaler=Point, planes=0)
        ```

    Args:
        clip: Source clip.
        function: A function to apply on the supersampled clip.
        rfactor: Scaling factor for supersampling. Defaults to 2.
        sp: A `MixedScalerProcess` instance or class.
            Default is `ComplexSuperSamplerProcess[Lanczos]`.
            It upscales with Lanczos and downscales with Point.
        supersampler: Scaler used to upscale the input clip if `sp` is not specified.
        downscaler: Downscaler used for undoing the upscaling done by the supersampler if `sp` is not specified.
        mod: Ensures the supersampled resolution is a multiple of this value. Defaults to 4.
        planes: Which planes to process.
        func: An optional function to use for error handling.

    Returns:
        A clip with the given function applied at higher resolution, then downscaled back.
    """
    func_util = FunctionUtil(clip, func or pre_ss, planes)

    args = (
        func_util.work_clip,
        mod_x(func_util.work_clip.width * rfactor, mod),
        mod_x(func_util.work_clip.height * rfactor, mod),
    )

    if isinstance(sp, MixedScalerProcess):
        return func_util.return_clip(sp.scale(*args))

    if supersampler and downscaler and function:
        ss = (
            Scaler.ensure_obj(supersampler, func_util.func).scale(*args)
            if is_scaler_like(supersampler)
            else supersampler(*args)
        )

        processed = function(ss)

        args = processed, clip.width, clip.height
        down = (
            Scaler.ensure_obj(downscaler, func_util.func).scale(*args)
            if is_scaler_like(downscaler)
            else downscaler(*args)
        )

        return func_util.return_clip(down)

    if function:
        return pre_ss(clip, sp=sp(function=function), rfactor=rfactor, mod=mod, planes=planes, func=func)

    raise CustomTypeError

scale_var_clip

scale_var_clip(
    clip: VideoNode,
    scaler: (
        Scaler
        | Callable[[Resolution], Scaler]
        | Callable[[tuple[int, int]], Scaler]
    ),
    width: (
        int
        | Callable[[Resolution], int]
        | Callable[[tuple[int, int]], int]
        | None
    ),
    height: (
        int | Callable[[Resolution], int] | Callable[[tuple[int, int]], int]
    ),
    shift: (
        tuple[float, float] | Callable[[tuple[int, int]], tuple[float, float]]
    ) = (0, 0),
    debug: bool = False,
) -> VideoNode

Scale a variable clip to constant or variable resolution.

Parameters:

Returns:

  • VideoNode

    Scaled clip.

Source code in vsscale/helpers.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def scale_var_clip(
    clip: vs.VideoNode,
    scaler: Scaler | Callable[[Resolution], Scaler] | Callable[[tuple[int, int]], Scaler],
    width: int | Callable[[Resolution], int] | Callable[[tuple[int, int]], int] | None,
    height: int | Callable[[Resolution], int] | Callable[[tuple[int, int]], int],
    shift: tuple[float, float] | Callable[[tuple[int, int]], tuple[float, float]] = (0, 0),
    debug: bool = False,
) -> vs.VideoNode:
    """
    Scale a variable clip to constant or variable resolution.

    Args:
        clip: Source clip.
        scaler: A scaler instance or a callable that returns a scaler instance.
        width: A width integer or a callable that returns the width. If None, it will be calculated from the height and
            the aspect ratio of the clip.
        height: A height integer or a callable that returns the height.
        shift: Optional top shift, left shift tuple or a callable that returns the shifts. Defaults to no shift.
        debug: If True, the `var_width` and `var_height` props will be added to the clip.

    Returns:
        Scaled clip.
    """
    _cached_clips = dict[str, vs.VideoNode]()

    no_accepts_var = list[Scaler]()

    def _eval_scale(f: vs.VideoFrame, n: int) -> vs.VideoNode:
        key = f"{f.width}_{f.height}"

        if key not in _cached_clips:
            res = Resolution(f.width, f.height)

            norm_scaler = scaler(res) if callable(scaler) else scaler
            norm_shift = shift(res) if callable(shift) else shift
            norm_height = height(res) if callable(height) else height

            if width is None:
                norm_width = get_w(norm_height, res.width / res.height)
            else:
                norm_width = width(res) if callable(width) else width

            part_scaler = partial(norm_scaler.scale, width=norm_width, height=norm_height, shift=norm_shift)

            scaled = clip
            if (scaled.width, scaled.height) != (norm_width, norm_height):
                if norm_scaler not in no_accepts_var:
                    try:
                        scaled = part_scaler(clip)
                    except BaseException:
                        no_accepts_var.append(norm_scaler)

                if norm_scaler in no_accepts_var:
                    const_clip = clip.resize.Point(res.width, res.height)

                    scaled = part_scaler(const_clip)

            if debug:
                scaled = scaled.std.SetFrameProps(var_width=res.width, var_height=res.height)

            _cached_clips[key] = scaled

        return _cached_clips[key]

    out_clip = clip if callable(width) or callable(height) else clip.std.BlankClip(width, height)

    return out_clip.std.FrameEval(_eval_scale, clip, clip)