Dan*_*ien 9 c++ custom-operator tensorflow
为了学习如何编写自定义TensorFlow操作,我按照添加新操作教程并制作了一个"add_b"操作,b为每个输入值添加一个标量.
add_b_op.cc:
#define EIGEN_USE_THREADS
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
using namespace tensorflow;
REGISTER_OP("AddB")
.Attr("T: {float, double}")
.Input("input: T")
.Input("b: T")
.Output("output: T")
.SetShapeFn([] (shape_inference::InferenceContext* c) -> Status {
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &out));
return shape_inference::UnchangedShape(c);
})
//----------------------------------------------------------------------
.Doc(R"doc(
Adds `b` to each input.
input: The input values.
b: A number to add to each input value.
)doc");
template <typename T>
class AddBCpuOp : public OpKernel {
public:
explicit AddBCpuOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input_tensor = context->input(0);
const auto input = input_tensor.flat<T>();
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
&output_tensor));
auto output = output_tensor->flat<T>();
const Eigen::ThreadPoolDevice& d = context->eigen_device<Eigen::ThreadPoolDevice>();
// Note: The mistake of adding 1 instead of `b` is intentional to be able to distinguish
// the CPU and GPU implementations.
output.device(d) = input + static_cast<T>(1);
}
};
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T"),
AddBCpuOp<float>);
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_CPU)
.TypeConstraint<double>("T"),
AddBCpuOp<double>);
#if GOOGLE_CUDA
template <typename T>
bool LaunchAddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output);
template <typename T>
class AddBGpuOp : public OpKernel {
public:
explicit AddBGpuOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input_tensor = context->input(0);
const auto input = input_tensor.flat<T>();
const Tensor& b_tensor = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(b_tensor.shape()),
errors::InvalidArgument("add_b expects a scalar for `b`."));
const auto b = b_tensor.scalar<T>();
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
&output_tensor));
auto output = output_tensor->flat<T>();
OP_REQUIRES(context, LaunchAddBKernel(input.data(), input.dimension(0), b.data(), output.data()),
errors::Internal("add_b: LaunchAddBKernel() failed."));
}
};
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T"),
AddBGpuOp<float>);
REGISTER_KERNEL_BUILDER(
Name("AddB")
.Device(DEVICE_GPU)
.TypeConstraint<double>("T"),
AddBGpuOp<double>);
#endif // if GOOGLE_CUDA
Run Code Online (Sandbox Code Playgroud)
add_b_op.cu.cc
template <typename T, int BLOCK_DIM_X>
__global__ void AddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output) {
const int i = blockIdx.x * BLOCK_DIM_X + threadIdx.x;
if (i < n) {
d_output[i] = d_input[i] + *d_b;
}
}
template <typename T>
bool LaunchAddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output) {
if (n <= 0) return true;
constexpr int BLOCK_DIM_X = 256;
AddBKernel<T, BLOCK_DIM_X><<<n / BLOCK_DIM_X + (n % BLOCK_DIM_X != 0), BLOCK_DIM_X>>>(d_input, n, d_b, d_output);
return true;
}
// Explicit instantiations.
template bool LaunchAddBKernel<float>(const float *__restrict__, int, const float *__restrict__, float *__restrict__);
template bool LaunchAddBKernel<double>(const double *__restrict__, int, const double *__restrict__, double *__restrict__);
Run Code Online (Sandbox Code Playgroud)
我故意在CPU实现中引入了一个错误,以便能够区分是使用CPU还是GPU实现.
当我测试我的自定义操作时:
from __future__ import print_function
import tensorflow as tf
module = tf.load_op_library('custom_ops.so')
with tf.Session(config = tf.ConfigProto(log_device_placement = True)):
print(module.add_b([5., 4., 3., 2., 1.], 8.).eval())
Run Code Online (Sandbox Code Playgroud)
我得到以下输出:
I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] OS X does not support NUMA - returning NUMA node zero I tensorflow/core/common_runtime/gpu/gpu_device.cc:951] Found device 0 with properties: name: GeForce GT 750M major: 3 minor: 0 memoryClockRate (GHz) 0.9255 pciBusID 0000:01:00.0 Total memory: 2.00GiB Free memory: 1.80GiB I tensorflow/core/common_runtime/gpu/gpu_device.cc:972] DMA: 0 I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] 0: Y I tensorflow/core/common_runtime/gpu/gpu_device.cc:1041] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0) Device mapping: /job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0 I tensorflow/core/common_runtime/direct_session.cc:252] Device mapping: /job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0 AddB: /job:localhost/replica:0/task:0/gpu:0 I tensorflow/core/common_runtime/simple_placer.cc:819] AddB: /job:localhost/replica:0/task:0/gpu:0 AddB/b: /job:localhost/replica:0/task:0/gpu:0 I tensorflow/core/common_runtime/simple_placer.cc:819] AddB/b: /job:localhost/replica:0/task:0/gpu:0 AddB/input: /job:localhost/replica:0/task:0/gpu:0 I tensorflow/core/common_runtime/simple_placer.cc:819] AddB/input: /job:localhost/replica:0/task:0/gpu:0 [ 6. 5. 4. 3. 2.]
"设备放置日志"似乎表示正在GPU上执行操作,但输出表明正在使用CPU实现.
当我注释掉实现的两个REGISTER_KERNEL_BUILDER()注册DEVICE_CPU,重新编译和重新测试时,我得到了预期的输出[ 13. 12. 11. 10. 9.],但是有一个错误:
E tensorflow/core/common_runtime/executor.cc:334] Executor failed to create kernel. Not found: No registered 'AddB' OpKernel for CPU devices compatible with node AddB = AddB[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](AddB/input, AddB/b)
. Registered: device='GPU'; T in [DT_FLOAT]
device='GPU'; T in [DT_DOUBLE]
[[Node: AddB = AddB[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](AddB/input, AddB/b)]]
该错误消息对我来说似乎是一个错误,因为虽然错误显示"Executor无法创建内核",但显然创建了一个内核来运行GPU上的op.
为什么要使用CPU实现而不是GPU实现?
如果这很重要,这里有关于我的开发设置的详细信息:
export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc2-py2-none-any.whl更新我发现是否选择了CPU或GPU实现取决于输入的大小.使用此测试脚本:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time
NUM_VALUES = 1310720
input = np.arange(0, NUM_VALUES, dtype = float)
module = tf.load_op_library('custom_ops.so')
with tf.Session(config = tf.ConfigProto(log_device_placement = True)):
start = time(); print(module.add_b(input, 8.).eval()); end = time(); print(end - start)
Run Code Online (Sandbox Code Playgroud)
..当NUM_VALUES是1310720或更少时,则使用CPU实现.当NUM_VALUES是1310721或更多时,则使用GPU实现.
是否有(1310720*8字节/双=)10 MiB截止?如果是这样,我该如何覆盖呢?AddB()操作很简单,但对于更复杂的自定义操作,10 MiB可能太大,无法选择GPU实现.
我刚刚阅读了TensorFlow 问题 #2054 - 在 GPU 上手动放置具有 CPU 和 GPU 实现的自定义运算符将始终运行 CPU 版本,并且运行 CPU 实现的行为似乎是 TensorFlow 的一个功能,称为“常量折叠”。当 TensorFlow 在第一次运行之前优化图时,涉及常量的操作通常在 CPU 上进行评估,因为我们的想法是 CPU 和 GPU 实现应该产生相同的结果。说得通。
禁用此行为的两种方法是:
禁用图形优化:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time
NUM_VALUES = 10
input = np.arange(0, NUM_VALUES, dtype = float)
custom_ops_module = tf.load_op_library('custom_ops.so')
config = tf.ConfigProto(log_device_placement = True)
config.graph_options.optimizer_options.opt_level = -1
with tf.Session(config = config):
start = time(); print(custom_ops_module.add_b(input, 8.).eval()); end = time(); print(end - start)
Run Code Online (Sandbox Code Playgroud)不使用常量,例如将值输入占位符:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time
NUM_VALUES = 10
custom_ops_module = tf.load_op_library('custom_ops.so')
graph = tf.Graph()
with graph.as_default():
input = tf.placeholder(tf.float64, shape = (NUM_VALUES,))
b = tf.placeholder(tf.float64, shape = ())
result = custom_ops_module.add_b(input, b)
with tf.Session(graph = graph, config = tf.ConfigProto(log_device_placement = True)) as session:
feed_dict = {
input: np.arange(0, NUM_VALUES, dtype = float),
b: 8.,
}
start = time(); print(session.run([result], feed_dict = feed_dict)); end = time(); print(end - start)
Run Code Online (Sandbox Code Playgroud)| 归档时间: |
|
| 查看次数: |
1005 次 |
| 最近记录: |