使用 Huggingface 变压器进行动态量化时出现“未找到量化操作引擎”错误

joe*_*oel 5 quantization deep-learning pytorch

我正在尝试对 Huggingface 库中的 pytorch 预训练模型进行动态量化(量化权重和激活)。我已经参考了此链接,发现动态量化最合适。我将在 CPU 上使用量化模型。

链接到这里的拥抱模型。

火炬版本:1.6.0(通过pip安装)

预训练模型

tokenizer = AutoTokenizer.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext")
model = AutoModel.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext")
Run Code Online (Sandbox Code Playgroud)

动态量化

quantized_model = torch.quantization.quantize_dynamic(
    model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8
)

print(quantized_model)
Run Code Online (Sandbox Code Playgroud)

错误

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-7-df2355c17e0b> in <module>
      1 quantized_model = torch.quantization.quantize_dynamic(
----> 2     model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8
      3 )
      4 
      5 print(quantized_model)

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/quantization/quantize.py in quantize_dynamic(model, qconfig_spec, dtype, mapping, inplace)
    283     model.eval()
    284     propagate_qconfig_(model, qconfig_spec)
--> 285     convert(model, mapping, inplace=True)
    286     _remove_qconfig(model)
    287     return model

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/quantization/quantize.py in convert(module, mapping, inplace)
    363     for name, mod in module.named_children():
    364         if type(mod) not in SWAPPABLE_MODULES:
--> 365             convert(mod, mapping, inplace=True)
    366         reassign[name] = swap_module(mod, mapping)
    367 

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/quantization/quantize.py in convert(module, mapping, inplace)
    363     for name, mod in module.named_children():
    364         if type(mod) not in SWAPPABLE_MODULES:
--> 365             convert(mod, mapping, inplace=True)
    366         reassign[name] = swap_module(mod, mapping)
    367 

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/quantization/quantize.py in convert(module, mapping, inplace)
    363     for name, mod in module.named_children():
    364         if type(mod) not in SWAPPABLE_MODULES:
--> 365             convert(mod, mapping, inplace=True)
    366         reassign[name] = swap_module(mod, mapping)
    367 

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/quantization/quantize.py in convert(module, mapping, inplace)
    363     for name, mod in module.named_children():
    364         if type(mod) not in SWAPPABLE_MODULES:
--> 365             convert(mod, mapping, inplace=True)
    366         reassign[name] = swap_module(mod, mapping)
    367 

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/quantization/quantize.py in convert(module, mapping, inplace)
    363     for name, mod in module.named_children():
    364         if type(mod) not in SWAPPABLE_MODULES:
--> 365             convert(mod, mapping, inplace=True)
    366         reassign[name] = swap_module(mod, mapping)
    367 

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/quantization/quantize.py in convert(module, mapping, inplace)
    364         if type(mod) not in SWAPPABLE_MODULES:
    365             convert(mod, mapping, inplace=True)
--> 366         reassign[name] = swap_module(mod, mapping)
    367 
    368     for key, value in reassign.items():

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/quantization/quantize.py in swap_module(mod, mapping)
    393             )
    394             device = next(iter(devices)) if len(devices) > 0 else None
--> 395             new_mod = mapping[type(mod)].from_float(mod)
    396             if device:
    397                 new_mod.to(device)

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/nn/quantized/dynamic/modules/linear.py in from_float(cls, mod)
    101         else:
    102             raise RuntimeError('Unsupported dtype specified for dynamic quantized Linear!')
--> 103         qlinear = Linear(mod.in_features, mod.out_features, dtype=dtype)
    104         qlinear.set_weight_bias(qweight, mod.bias)
    105         return qlinear

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/nn/quantized/dynamic/modules/linear.py in __init__(self, in_features, out_features, bias_, dtype)
     33 
     34     def __init__(self, in_features, out_features, bias_=True, dtype=torch.qint8):
---> 35         super(Linear, self).__init__(in_features, out_features, bias_, dtype=dtype)
     36         # We don't muck around with buffers or attributes or anything here
     37         # to keep the module simple. *everything* is simply a Python attribute.

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/nn/quantized/modules/linear.py in __init__(self, in_features, out_features, bias_, dtype)
    150             raise RuntimeError('Unsupported dtype specified for quantized Linear!')
    151 
--> 152         self._packed_params = LinearPackedParams(dtype)
    153         self._packed_params.set_weight_bias(qweight, bias)
    154         self.scale = 1.0

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/nn/quantized/modules/linear.py in __init__(self, dtype)
     18         elif self.dtype == torch.float16:
     19             wq = torch.zeros([1, 1], dtype=torch.float)
---> 20         self.set_weight_bias(wq, None)
     21 
     22     @torch.jit.export

~/.virtualenvs/python3/lib64/python3.6/site-packages/torch/nn/quantized/modules/linear.py in set_weight_bias(self, weight, bias)
     24         # type: (torch.Tensor, Optional[torch.Tensor]) -> None
     25         if self.dtype == torch.qint8:
---> 26             self._packed_params = torch.ops.quantized.linear_prepack(weight, bias)
     27         elif self.dtype == torch.float16:
     28             self._packed_params = torch.ops.quantized.linear_prepack_fp16(weight, bias)

RuntimeError: Didn't find engine for operation quantized::linear_prepack NoQEngine
Run Code Online (Sandbox Code Playgroud)

Han*_*ton 3

qnnpack跑步时在列表中吗print(torch.backends.quantized.supported_engines)

torch.backends.quantized.engine = 'qnnpack'对你有用吗?