Reorg for converters in hardtanh(FX Converter Refactor [5/N]) <Target: converter_reorg_proto> by apbose · Pull Request #1901 · pytorch/TensorRT

--- py/torch_tensorrt/fx/converters/aten_ops_converters.py	2023-05-11 23:31:10.187297 +0000
+++ py/torch_tensorrt/fx/converters/aten_ops_converters.py	2023-05-11 23:31:26.881529 +0000
@@ -209,17 +209,11 @@
    kwargs: Dict[str, Argument],
    name: str,
) -> Union[TRTTensor, Sequence[TRTTensor]]:

    return activation.hardtanh(
-        network,
-        target,
-        SourceIR.ATEN,
-        name,
-        args[0],
-        args[1],
-        args[2]
+        network, target, SourceIR.ATEN, name, args[0], args[1], args[2]
    )


@tensorrt_converter(torch.ops.aten.linear)
def aten_ops_linear(
--- py/torch_tensorrt/fx/converters/impl/activation.py	2023-05-11 23:31:10.187297 +0000
+++ py/torch_tensorrt/fx/converters/impl/activation.py	2023-05-11 23:31:27.090627 +0000
@@ -93,11 +93,11 @@
        source_ir,
        name,
        operation_type,
        input_val,
        alpha,
-        beta, 
+        beta,
        dyn_range_fn=hardtanh_dyn_range_fn,
    )


def relu(
--- py/torch_tensorrt/fx/converters/acc_ops_converters.py	2023-05-11 23:31:10.187297 +0000
+++ py/torch_tensorrt/fx/converters/acc_ops_converters.py	2023-05-11 23:31:28.868334 +0000
@@ -3596,11 +3596,11 @@
        target,
        SourceIR.ATEN,
        name,
        kwargs["input"],
        kwargs["min_val"],
-        kwargs["max_val"]
+        kwargs["max_val"],
    )


@tensorrt_converter(acc_ops.interpolate)
def acc_ops_interpolate(