#include <algorithm>
#include <iostream>
#include <string>
#include <vector>
#define BOOST_SPIRIT_UNICODE // We'll use unicode (UTF8) all throughout
#include <boost/spirit/include/qi.hpp>
#include <boost/spirit/include/qi_parse.hpp>
#include <boost/spirit/include/support_standard_wide.hpp>
void parse_simple_string()
{
namespace qi = boost::spirit::qi;
namespace encoding = boost::spirit::unicode;
//namespace stw = boost::spirit::standard_wide;
typedef std::wstring::const_iterator iterator_type;
std::vector<std::wstring> result;
std::wstring const input = LR"(12,3","ab,cd","G,G\"GG","kkk","10,\"0","99987","PPP","??)";
qi::rule<iterator_type, std::wstring()> key = +(qi::unicode::char_ - qi::lit(L"\",\""));
qi::phrase_parse(input.begin(), input.end(),
key % qi::lit(L"\",\""),
encoding::space,
result);
//std::copy(result.rbegin(), result.rend(), std::ostream_iterator<std::wstring, wchar_t> (std::wcout, L"\n"));
for(auto const &data : result) std::wcout<<data<<std::endl;
}
Run Code Online (Sandbox Code Playgroud)
我研究了这篇文章如何使用Boost Spirit来解析中文(unicode utf-16)? 并按照指南,但无法解析"你好" …
QtQuick1示例 我无法在QtQuick2上运行它
screenCapture.hpp
#include <QObject>
class QString;
class QQuickView;
class screenCapture : public QObject
{
Q_OBJECT
public:
explicit screenCapture(QQuickView *parent = 0);
public slots:
void capture(QString const &path) const;
private:
QQuickView *currentView_;
};
Run Code Online (Sandbox Code Playgroud)
screenCapture.cpp
#include <QPixmap>
#include <QQuickView>
#include <QString>
#include "screenCapture.hpp"
screenCapture::screenCapture(QQuickView *currentView) :
QObject(0), currentView_(currentView)
{
}
void screenCapture::capture(QString const &path) const
{
QPixmap::grabWidget(currentView_).save(path);
}
Run Code Online (Sandbox Code Playgroud)
main.cpp中
#include <QGuiApplication>
#include <QQuickPaintedItem>
#include <QQuickView>
#include <QQmlContext>
#include "screenCapture.hpp"
int main(int argc, char *argv[])
{
QGuiApplication app(argc, argv);
qmlRegisterType<screenCapture>("Image", 1, 0, …Run Code Online (Sandbox Code Playgroud) 使用openGL进行一些图像处理,第一个实验是将彩色图像转换为灰色,一切都很好,除了我不想显示小部件.
如果我不调用"show()",QGLWidget将不会开始渲染纹理我可以渲染纹理而不显示小部件吗?QGLWidget是一个正确的工具吗?
部分代码
#include <QDebug>
#include "toGray.hpp"
toGray::toGray(std::string const &vertex_file, std::string const &fragment_file, QWidget *parent)
:basicGLWidget(vertex_file, fragment_file, parent) //read shaders, compile them, allocate VBO
{
}
void toGray::initializeVertexBuffer()
{
std::vector<GLfloat> const vertex{
-1.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 0.0f, 1.0f,
1.0f, -1.0f, 0.0f, 1.0f,
-1.0f, -1.0f, 0.0f, 1.0f,
};
initializeVertexBufferImpl(vertex); //copy the data into QOpenGLBuffer
QImage img(":/simpleGPGPU/images/emili.jpg");
texture_addr_ = bindTexture(img);
resize(img.width(), img.height());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, …Run Code Online (Sandbox Code Playgroud) #include <atomic>
#include <iostream>
#include <thread>
class atomicAcquireRelease00
{
public:
atomicAcquireRelease00() : x(false), y(false), z(0) {}
void run()
{
std::thread a(&atomicAcquireRelease00::write_x, this);
std::thread b(&atomicAcquireRelease00::write_y, this);
std::thread c(&atomicAcquireRelease00::read_x_then_y, this);
std::thread d(&atomicAcquireRelease00::read_y_then_x, this);
a.join();
b.join();
c.join();
d.join();
std::cout<<"z == "<<z.load()<<std::endl;
}
private:
void write_x()
{
x.store(true, std::memory_order_release); //(1)
}
void write_y()
{
y.store(true, std::memory_order_release); //(2)
}
void read_x_then_y()
{
while(!x.load(std::memory_order_acquire)); //(3)
if(y.load(std::memory_order_acquire)){ //(4)
++z;
}
}
void read_y_then_x()
{
while(!y.load(std::memory_order_acquire)); //(5)
if(x.load(std::memory_order_acquire)){ //(6)
++z;
}
}
private:
std::atomic<bool> x, y;
std::atomic<int> z; …Run Code Online (Sandbox Code Playgroud) imread create的默认像素类型是什么?我测试了不同的图像,它们都给了我带有不同通道的unsigned char.如果我不明确地询问它,imread是否会创建带有signed char的像素类型?
cv::Mat img = cv::imread("lena.jpg", -1); //always give me unsigned char
Run Code Online (Sandbox Code Playgroud)
我检查了cv :: imread的文档,但它没有说明imread create的默认像素.
文档链接 http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string&filename,int flags)
For some reasons, I can't setup the parameter of the state machine by constructor So I would like to access the public member function of the meta state machine by the back end. That is, I want something like
typedef msm::back::state_machine<player_> player;
player p;
p.get_front_end(); //get the address of the front end
p.get_front_end().set_param(34) //call the member function of front end
Run Code Online (Sandbox Code Playgroud)
Is this possible?Thanks
The other solution is pass the parameter into the state machine by Event.
p.process_event(open_theme(34));
Run Code Online (Sandbox Code Playgroud) 锐化由glsl开发的代码(来源)
uniform sampler2D sampler;
uniform vec2 offset[9];
uniform int kernel[9];
void main()
{
vec4 sum = vec4(0.0);
int i;
for (i = 0; i < 9; i++) {
vec4 color = texture2D(sampler, gl_TexCoord[0].st + offset[i]);
sum += color * kernel[i];
}
gl_FragColor = sum
}
Run Code Online (Sandbox Code Playgroud)
适用于4通道图像,如果我希望它在单个通道上工作怎么办?我找不到类似vec1的东西,我有其他解决方案而不是将单个通道图像转换为3通道图像吗?
在这个网站(教程)上,它向我们展示了如何绘制 cv::HoughLines 检测到的线,但我不明白它如何找到线之间的点。
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b)); //??
pt1.y = cvRound(y0 + 1000*(a)); //??
pt2.x = cvRound(x0 - 1000*(-b)); //??
pt2.y = cvRound(y0 - 1000*(a)); //??
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
Run Code Online (Sandbox Code Playgroud)
openCV 食谱中的示例,我可以理解这些代码的原因,但它更详细
for(auto const &data : lines){
float …Run Code Online (Sandbox Code Playgroud) 以下是八度代码(kmeans的一部分)
centroidSum = zeros(K);
valueSum = zeros(K, n);
for i = 1 : m
for j = 1 : K
if(idx(i) == j)
centroidSum(j) = centroidSum(j) + 1;
valueSum(j, :) = valueSum(j, :) + X(i, :);
end
end
end
Run Code Online (Sandbox Code Playgroud)
代码有效,是否可以对代码进行矢量化?没有if语句很容易对代码进行矢量化,但是如何使用if语句对代码进行矢量化?
如果我有这样的类型
std::vector<int> const value = ...
Run Code Online (Sandbox Code Playgroud)
哪一个是更好的解决方案?
风格1:
for(auto v : value){
//do something
}
Run Code Online (Sandbox Code Playgroud)
风格2:
for(auto &&v : value){
//do something
}
Run Code Online (Sandbox Code Playgroud)
风格3:
for(auto const v : value){
//do something
}
Run Code Online (Sandbox Code Playgroud)
所有这些都保持了类型的常量.
样式2是最通用的解决方案.
根据我所知,对于像int,double等原始类型,传递值更喜欢通过const引用传递,所以我认为如果我们知道向量的类型是原始类型,则样式1和样式3优于样式2.如果这个问题听起来很愚蠢,请原谅我.
我怎样才能让tensorflow使用特定的gpu进行推断?
部分源代码
std::unique_ptr<tensorflow::Session> session;
Status const load_graph_status = LoadGraph(graph_path, &session);
if (!load_graph_status.ok()) {
LOG(ERROR) << "LoadGraph ERROR!!!!"<< load_graph_status;
return -1;
}
std::vector<Tensor> resized_tensors;
Status const read_tensor_status = ReadTensorFromImageFile(image_path, &resized_tensors);
if (!read_tensor_status.ok()) {
LOG(ERROR) << read_tensor_status;
return -1;
}
std::vector<Tensor> outputs;
Status run_status = session->Run({{input_layer, resized_tensor}},
output_layer, {}, &outputs);
Run Code Online (Sandbox Code Playgroud)
到目前为止一切都很好,但是当我执行Run时,tensorflow总是选择相同的gpu,我是否有办法指定要执行的gpu?
如果您需要完整的源代码,我将它们放在pastebin上
编辑:看起来options.config.mutable_gpu_options()-> set_visible_device_list(“ 0”)工作,但我不确定。
我想检测是否有渐晕的图片,但找不到测量它的方法。我通过“渐晕指标、渐晕检测、渐晕分类”等关键字进行搜索,它们都会引导我找到“创建渐晕滤镜”或“渐晕校正”等主题。有什么指标可以做到这一点吗?就像从0到1的分数一样,分数越低,图像越不可能出现渐晕效应。我提出的简单解决方案之一是测量图像的亮度通道。
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
using namespace std;
int main()
{
auto img = imread("my_pic.jpg");
cvtcolor(img, img, cv::COLOR_BGR2LAB);
vector<Mat> lab_img;
split(img, lab_img);
auto const sum_val = sum(lab_img[0])[0] / lab_img[0].total();
//use sum_val as threshold
}
Run Code Online (Sandbox Code Playgroud)
另一个解决方案是通过 CNN 训练分类器,我可以使用渐晕滤波器来生成具有/不具有渐晕效果的图像。请给我一些建议,谢谢。