Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove all torch.ones from tests #3494

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/deeplab_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def forward(self, *args):
class TestDeepLabV3(unittest.TestCase):
dl3 = DL3Wrapper()
dl3 = dl3.eval()
model_inputs = (torch.ones(1, 3, 224, 224),)
model_inputs = (torch.randn(1, 3, 224, 224),)

def test_fp32_dl3(self):

Expand Down
6 changes: 3 additions & 3 deletions backends/xnnpack/test/models/edsr.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,12 @@
import torch

from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Quantize
from torchsr.models import edsr_r16f64


class TestEDSR(unittest.TestCase):
edsr = edsr_r16f64(2, False).eval() # noqa
model_inputs = (torch.ones(1, 3, 224, 224),)
model_inputs = (torch.randn(1, 3, 224, 224),)

def test_fp32_edsr(self):
(
Expand All @@ -28,10 +27,11 @@ def test_fp32_edsr(self):
.run_method_and_compare_outputs()
)

@unittest.skip("T187799178: Debugging Numerical Issues with Calibration")
def test_qs8_edsr(self):
(
Tester(self.edsr, self.model_inputs)
.quantize(Quantize(calibrate=False))
.quantize()
.export()
.to_edge()
.partition()
Expand Down
4 changes: 3 additions & 1 deletion backends/xnnpack/test/models/emformer_rnnt.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,9 @@ def get_example_inputs(self):
)
return predict_inputs

@unittest.skip("T183426271")
@unittest.skip(
"T183426271: Emformer Predictor Takes too long to export + partition"
)
def test_fp32_emformer_predictor(self):
predictor = self.Predictor()
(
Expand Down
6 changes: 3 additions & 3 deletions backends/xnnpack/test/models/inception_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,13 @@

import torch
from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Quantize
from torchvision import models


class TestInceptionV3(unittest.TestCase):
# pyre-ignore
ic3 = models.inception_v3(weights="IMAGENET1K_V1").eval() # noqa
model_inputs = (torch.ones(1, 3, 224, 224),)
model_inputs = (torch.randn(1, 3, 224, 224),)

all_operators = {
"executorch_exir_dialects_edge__ops_aten_addmm_default",
Expand Down Expand Up @@ -45,6 +44,7 @@ def test_fp32_ic3(self):
.run_method_and_compare_outputs()
)

@unittest.skip("T187799178: Debugging Numerical Issues with Calibration")
def test_qs8_ic3(self):
# Quantization fuses away batchnorm, so it is no longer in the graph
ops_after_quantization = self.all_operators - {
Expand All @@ -53,7 +53,7 @@ def test_qs8_ic3(self):

(
Tester(self.ic3, self.model_inputs)
.quantize(Quantize(calibrate=False))
.quantize()
.export()
.to_edge()
.check(list(ops_after_quantization))
Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/inception_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

class TestInceptionV4(unittest.TestCase):
ic4 = inception_v4(pretrained=False).eval()
model_inputs = (torch.ones(3, 299, 299).unsqueeze(0),)
model_inputs = (torch.randn(3, 299, 299).unsqueeze(0),)

all_operators = {
"executorch_exir_dialects_edge__ops_aten_addmm_default",
Expand Down
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/llama2_et_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,5 +45,5 @@ def _test(self, dtype: torch.dtype = torch.float):
.dump_artifact()
.to_executorch()
.serialize()
.run_method_and_compare_outputs(atol=5e-2)
.run_method_and_compare_outputs(atol=5e-2, inputs=example_inputs)
)
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/mobilebert.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,5 +38,5 @@ def test_fp32_mobilebert(self):
.check_not(list(self.supported_ops))
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
.run_method_and_compare_outputs(inputs=self.example_inputs)
)
6 changes: 3 additions & 3 deletions backends/xnnpack/test/models/mobilenet_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,14 @@

import torch
from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Quantize
from torchvision import models
from torchvision.models.mobilenetv2 import MobileNet_V2_Weights


class TestMobileNetV2(unittest.TestCase):
mv2 = models.mobilenetv2.mobilenet_v2(weights=MobileNet_V2_Weights)
mv2 = mv2.eval()
model_inputs = (torch.ones(1, 3, 224, 224),)
model_inputs = (torch.randn(1, 3, 224, 224),)

all_operators = {
"executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default",
Expand Down Expand Up @@ -49,6 +48,7 @@ def test_fp32_mv2(self):
.run_method_and_compare_outputs(num_runs=10)
)

@unittest.skip("T187799178: Debugging Numerical Issues with Calibration")
def test_qs8_mv2(self):
# Quantization fuses away batchnorm, so it is no longer in the graph
ops_after_quantization = self.all_operators - {
Expand All @@ -64,7 +64,7 @@ def test_qs8_mv2(self):

(
Tester(self.mv2, self.model_inputs, dynamic_shapes=dynamic_shapes)
.quantize(Quantize(calibrate=False))
.quantize()
.export()
.to_edge()
.check(list(ops_after_quantization))
Expand Down
6 changes: 3 additions & 3 deletions backends/xnnpack/test/models/mobilenet_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,13 @@

import torch
from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Quantize
from torchvision import models


class TestMobileNetV3(unittest.TestCase):
mv3 = models.mobilenetv3.mobilenet_v3_small(pretrained=True)
mv3 = mv3.eval()
model_inputs = (torch.ones(1, 3, 224, 224),)
model_inputs = (torch.randn(1, 3, 224, 224),)
dynamic_shapes = (
{
2: torch.export.Dim("height", min=224, max=455),
Expand Down Expand Up @@ -51,6 +50,7 @@ def test_fp32_mv3(self):
.run_method_and_compare_outputs(num_runs=5)
)

@unittest.skip("T187799178: Debugging Numerical Issues with Calibration")
def test_qs8_mv3(self):
ops_after_quantization = self.all_operators - {
"executorch_exir_dialects_edge__ops_aten__native_batch_norm_legit_no_training_default",
Expand All @@ -59,7 +59,7 @@ def test_qs8_mv3(self):

(
Tester(self.mv3, self.model_inputs, dynamic_shapes=self.dynamic_shapes)
.quantize(Quantize(calibrate=False))
.quantize()
.export()
.to_edge()
.check(list(ops_after_quantization))
Expand Down
13 changes: 5 additions & 8 deletions backends/xnnpack/test/models/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,10 @@
import torchvision

from executorch.backends.xnnpack.test.tester import Tester
from executorch.backends.xnnpack.test.tester.tester import Quantize


class TestResNet18(unittest.TestCase):
inputs = (torch.ones(1, 3, 224, 224),)
inputs = (torch.randn(1, 3, 224, 224),)
dynamic_shapes = (
{
2: torch.export.Dim("height", min=224, max=455),
Expand Down Expand Up @@ -57,20 +56,18 @@ def _test_exported_resnet(self, tester):
def test_fp32_resnet18(self):
self._test_exported_resnet(Tester(torchvision.models.resnet18(), self.inputs))

@unittest.skip("T187799178: Debugging Numerical Issues with Calibration")
def test_qs8_resnet18(self):
quantized_tester = Tester(torchvision.models.resnet18(), self.inputs).quantize(
Quantize(calibrate=False)
)
quantized_tester = Tester(torchvision.models.resnet18(), self.inputs).quantize()
self._test_exported_resnet(quantized_tester)

def test_fp32_resnet18_dynamic(self):
self._test_exported_resnet(
Tester(self.DynamicResNet(), self.inputs, self.dynamic_shapes)
)

@unittest.skip("T187799178: Debugging Numerical Issues with Calibration")
def test_qs8_resnet18_dynamic(self):
self._test_exported_resnet(
Tester(self.DynamicResNet(), self.inputs, self.dynamic_shapes).quantize(
Quantize(calibrate=False)
)
Tester(self.DynamicResNet(), self.inputs, self.dynamic_shapes).quantize()
)
2 changes: 1 addition & 1 deletion backends/xnnpack/test/models/torchvision_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
class TestViT(unittest.TestCase):
vit = models.vision_transformer.vit_b_16(weights="IMAGENET1K_V1")
vit = vit.eval()
model_inputs = (torch.ones(1, 3, 224, 224),)
model_inputs = (torch.randn(1, 3, 224, 224),)
dynamic_shapes = (
{
2: torch.export.Dim("height", min=224, max=455),
Expand Down
10 changes: 5 additions & 5 deletions backends/xnnpack/test/ops/add.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,17 +58,17 @@ def _test_add(self, inputs):
)

def test_fp16_add(self):
inputs = (torch.ones(1).to(torch.float16), torch.ones(1).to(torch.float16))
inputs = (torch.randn(1).to(torch.float16), torch.randn(1).to(torch.float16))
self._test_add(inputs)

def test_fp32_add(self):
inputs = (torch.ones(1), torch.ones(1))
inputs = (torch.randn(1), torch.randn(1))
self._test_add(inputs)

def test_fp32_add_constant(self):
inputs = (torch.randn(4, 4, 4),)
(
Tester(self.AddConstant(torch.ones(4, 4, 4)), inputs)
Tester(self.AddConstant(torch.randn(4, 4, 4)), inputs)
.export()
.check_count({"torch.ops.aten.add.Tensor": 4})
.to_edge()
Expand All @@ -84,7 +84,7 @@ def test_fp32_add_constant(self):
def test_qs8_add_constant(self):
inputs = (torch.randn(4, 4, 4),)
(
Tester(self.AddConstant(torch.ones(4, 4, 4)), inputs)
Tester(self.AddConstant(torch.randn(4, 4, 4)), inputs)
.quantize()
.export()
.check_count({"torch.ops.aten.add.Tensor": 4})
Expand All @@ -95,7 +95,7 @@ def test_qs8_add_constant(self):
.check_not(["executorch_exir_dialects_edge__ops_aten_add_Tensor"])
.to_executorch()
.serialize()
.run_method_compare_outputs()
.run_method_and_compare_outputs()
)

def test_qs8_add(self):
Expand Down
54 changes: 27 additions & 27 deletions backends/xnnpack/test/ops/cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ def test_fp16_cat2(self):
Using Clamp2 because fp16 add is done in fp32 ATM. Need to fix that first.
"""
inputs = (
torch.ones(1, 2, 3).to(torch.float16),
torch.ones(3, 2, 3).to(torch.float16),
torch.randn(1, 2, 3).to(torch.float16),
torch.randn(3, 2, 3).to(torch.float16),
)
self._test_cat(self.Cat2(), inputs)

Expand All @@ -88,9 +88,9 @@ def test_fp16_cat3(self):
Using Clamp2 because fp16 add is done in fp32 ATM. Need to fix that first.
"""
inputs = (
torch.ones(1, 2, 3).to(torch.float16),
torch.ones(3, 2, 3).to(torch.float16),
torch.ones(2, 2, 3).to(torch.float16),
torch.randn(1, 2, 3).to(torch.float16),
torch.randn(3, 2, 3).to(torch.float16),
torch.randn(2, 2, 3).to(torch.float16),
)
self._test_cat(self.Cat3(), inputs)

Expand All @@ -99,44 +99,44 @@ def test_fp16_cat4(self):
Using Clamp2 because fp16 add is done in fp32 ATM. Need to fix that first.
"""
inputs = (
torch.ones(1, 2, 3).to(torch.float16),
torch.ones(3, 2, 3).to(torch.float16),
torch.ones(2, 2, 3).to(torch.float16),
torch.ones(5, 2, 3).to(torch.float16),
torch.randn(1, 2, 3).to(torch.float16),
torch.randn(3, 2, 3).to(torch.float16),
torch.randn(2, 2, 3).to(torch.float16),
torch.randn(5, 2, 3).to(torch.float16),
)
self._test_cat(self.Cat4(), inputs)

def test_fp32_cat2(self):
inputs = (torch.ones(1, 2, 3), torch.ones(3, 2, 3))
inputs = (torch.randn(1, 2, 3), torch.randn(3, 2, 3))
self._test_cat(self.Cat2(), inputs)

def test_fp32_cat3(self):
inputs = (torch.ones(1, 2, 3), torch.ones(3, 2, 3), torch.ones(2, 2, 3))
inputs = (torch.randn(1, 2, 3), torch.randn(3, 2, 3), torch.randn(2, 2, 3))
self._test_cat(self.Cat3(), inputs)

def test_fp32_cat4(self):
inputs = (
torch.ones(1, 2, 3),
torch.ones(3, 2, 3),
torch.ones(2, 2, 3),
torch.ones(5, 2, 3),
torch.randn(1, 2, 3),
torch.randn(3, 2, 3),
torch.randn(2, 2, 3),
torch.randn(5, 2, 3),
)
self._test_cat(self.Cat4(), inputs)

def test_qs8_cat2(self):
inputs = (torch.ones(1, 2, 3), torch.ones(3, 2, 3))
inputs = (torch.randn(1, 2, 3), torch.randn(3, 2, 3))
self._test_cat(self.Cat2(), inputs, cat_num=2, quant=True)

def test_qs8_cat3(self):
inputs = (torch.ones(1, 2, 3), torch.ones(3, 2, 3), torch.ones(2, 2, 3))
inputs = (torch.randn(1, 2, 3), torch.randn(3, 2, 3), torch.randn(2, 2, 3))
self._test_cat(self.Cat3(), inputs, cat_num=3, quant=True)

def test_qs8_cat4(self):
inputs = (
torch.ones(1, 2, 3),
torch.ones(3, 2, 3),
torch.ones(2, 2, 3),
torch.ones(5, 2, 3),
torch.randn(1, 2, 3),
torch.randn(3, 2, 3),
torch.randn(2, 2, 3),
torch.randn(5, 2, 3),
)
self._test_cat(self.Cat4(), inputs, cat_num=4, quant=True)

Expand All @@ -145,11 +145,11 @@ def test_fp32_cat_unsupported(self):
XNNPACK only supports concatenating up to 4 values, so it should not delegate here.
"""
inputs = (
torch.ones(1, 2, 3),
torch.ones(3, 2, 3),
torch.ones(2, 2, 3),
torch.ones(5, 2, 3),
torch.ones(1, 2, 3),
torch.randn(1, 2, 3),
torch.randn(3, 2, 3),
torch.randn(2, 2, 3),
torch.randn(5, 2, 3),
torch.randn(1, 2, 3),
)
(
Tester(self.Cat5(), inputs)
Expand All @@ -169,7 +169,7 @@ def forward(self, x, y):
return torch.cat([x, y], -1)

def test_fp32_cat_negative_dim(self):
inputs = (torch.ones(3, 2, 3), torch.ones(3, 2, 1))
inputs = (torch.randn(3, 2, 3), torch.randn(3, 2, 1))
self._test_cat(self.CatNegativeDim(), inputs)

class CatNhwc(torch.nn.Module):
Expand Down
12 changes: 9 additions & 3 deletions backends/xnnpack/test/ops/div.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,15 +43,21 @@ def _test_div(self, inputs):
)

def test_fp16_div(self):
inputs = (torch.ones(1).to(torch.float16), torch.ones(1).to(torch.float16))
# Adding 4 to move distribution away from 0, 4 Std Dev should be far enough
inputs = (
(torch.randn(1) + 4).to(torch.float16),
(torch.randn(1) + 4).to(torch.float16),
)
self._test_div(inputs)

def test_fp32_div(self):
inputs = (torch.ones(1), torch.ones(1))
# Adding 4 to move distribution away from 0, 4 Std Dev should be far enough
inputs = (torch.randn(1) + 4, torch.randn(1) + 4)
self._test_div(inputs)

def test_fp32_div_single_input(self):
inputs = (torch.ones(1),)
# Adding 4 to move distribution away from 0, 4 Std Dev should be far enough
inputs = (torch.randn(1) + 4,)
(
Tester(self.DivSingleInput(), inputs)
.export()
Expand Down