Sha*_*ran 6 python computer-vision deep-learning tensorflow tensorrt
我使用 Nvidia 的 Transfer Learning Toolkit (TLT) 进行训练,然后使用 tlt-converter 将 .etlt 模型转换为 .engine 文件。
我想在 python 中使用这个 .engine 文件进行推理。但由于我使用 TLT 进行训练,因此我没有任何冻结图或 pb 文件,而这些都是 TensorRT 推理教程所需要的。
我想知道在 .engine 文件上是否可以进行 python 推理。如果没有,支持哪些转换(UFF,ONNX)来实现这一点?
小智 7
Python 推理可以通过 .engine 文件实现。下面的示例从磁盘加载 .trt 文件(实际上与 .engine 文件相同)并执行单个推理。
在此项目中,我在使用之前使用onnx2trt可执行文件将 ONNX 模型转换为 TRT 模型。您甚至可以使用 ONNX 作为中间件将 PyTorch 模型转换为 TRT。
import tensorrt as trt
import numpy as np
import os
import pycuda.driver as cuda
import pycuda.autoinit
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
class TrtModel:
def __init__(self,engine_path,max_batch_size=1,dtype=np.float32):
self.engine_path = engine_path
self.dtype = dtype
self.logger = trt.Logger(trt.Logger.WARNING)
self.runtime = trt.Runtime(self.logger)
self.engine = self.load_engine(self.runtime, self.engine_path)
self.max_batch_size = max_batch_size
self.inputs, self.outputs, self.bindings, self.stream = self.allocate_buffers()
self.context = self.engine.create_execution_context()
@staticmethod
def load_engine(trt_runtime, engine_path):
trt.init_libnvinfer_plugins(None, "")
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
def allocate_buffers(self):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in self.engine:
size = trt.volume(self.engine.get_binding_shape(binding)) * self.max_batch_size
host_mem = cuda.pagelocked_empty(size, self.dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
bindings.append(int(device_mem))
if self.engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def __call__(self,x:np.ndarray,batch_size=2):
x = x.astype(self.dtype)
np.copyto(self.inputs[0].host,x.ravel())
for inp in self.inputs:
cuda.memcpy_htod_async(inp.device, inp.host, self.stream)
self.context.execute_async(batch_size=batch_size, bindings=self.bindings, stream_handle=self.stream.handle)
for out in self.outputs:
cuda.memcpy_dtoh_async(out.host, out.device, self.stream)
self.stream.synchronize()
return [out.host.reshape(batch_size,-1) for out in self.outputs]
if __name__ == "__main__":
batch_size = 1
trt_engine_path = os.path.join("..","models","main.trt")
model = TrtModel(trt_engine_path)
shape = model.engine.get_binding_shape(0)
data = np.random.randint(0,255,(batch_size,*shape[1:]))/255
result = model(data,batch_size)
Run Code Online (Sandbox Code Playgroud)
大家注意安全!
归档时间: |
|
查看次数: |
1885 次 |
最近记录: |