|
|
572d9c |
/*
|
|
|
572d9c |
......... 2015 Ivan Mahonin
|
|
|
572d9c |
|
|
|
572d9c |
This program is free software: you can redistribute it and/or modify
|
|
|
572d9c |
it under the terms of the GNU General Public License as published by
|
|
|
572d9c |
the Free Software Foundation, either version 3 of the License, or
|
|
|
572d9c |
(at your option) any later version.
|
|
|
572d9c |
|
|
|
572d9c |
This program is distributed in the hope that it will be useful,
|
|
|
572d9c |
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
572d9c |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
572d9c |
GNU General Public License for more details.
|
|
|
572d9c |
|
|
|
572d9c |
You should have received a copy of the GNU General Public License
|
|
|
572d9c |
along with this program. If not, see <http: licenses="" www.gnu.org="">.</http:>
|
|
|
572d9c |
*/
|
|
|
572d9c |
|
|
|
f83e6b |
#include <cassert></cassert>
|
|
|
f83e6b |
|
|
|
f83e6b |
#include <algorithm></algorithm>
|
|
|
6b0407 |
#include <iostream></iostream>
|
|
|
f83e6b |
|
|
|
572d9c |
#include "clrender.h"
|
|
|
f83e6b |
#include "measure.h"
|
|
|
572d9c |
|
|
|
572d9c |
|
|
|
572d9c |
using namespace std;
|
|
|
572d9c |
|
|
|
572d9c |
|
|
|
f83e6b |
ClRender::ClRender(ClContext &cl):
|
|
|
f83e6b |
cl(cl),
|
|
|
f83e6b |
contour_program(),
|
|
|
013f0c |
contour_draw_kernel(),
|
|
|
2517eb |
contour_draw_workgroup_size(),
|
|
|
f83e6b |
surface(),
|
|
|
013f0c |
paths_buffer(),
|
|
|
f83e6b |
mark_buffer(),
|
|
|
67876c |
surface_image(),
|
|
|
c7fa36 |
prev_event()
|
|
|
f83e6b |
{
|
|
|
b09c5d |
contour_program = cl.load_program("contour-fs.cl");
|
|
|
2517eb |
assert(contour_program);
|
|
|
2517eb |
|
|
|
013f0c |
contour_draw_kernel = clCreateKernel(contour_program, "draw", NULL);
|
|
|
013f0c |
assert(contour_draw_kernel);
|
|
|
2517eb |
|
|
|
2517eb |
cl.err |= clGetKernelWorkGroupInfo(
|
|
|
2517eb |
contour_draw_kernel,
|
|
|
2517eb |
cl.device,
|
|
|
2517eb |
CL_KERNEL_WORK_GROUP_SIZE,
|
|
|
2517eb |
sizeof(contour_draw_workgroup_size),
|
|
|
2517eb |
&contour_draw_workgroup_size,
|
|
|
2517eb |
NULL );
|
|
|
2517eb |
assert(!cl.err);
|
|
|
572d9c |
}
|
|
|
572d9c |
|
|
|
572d9c |
ClRender::~ClRender() {
|
|
|
f83e6b |
send_surface(NULL);
|
|
|
013f0c |
send_paths(NULL, 0);
|
|
|
013f0c |
clReleaseKernel(contour_draw_kernel);
|
|
|
f83e6b |
clReleaseProgram(contour_program);
|
|
|
f83e6b |
}
|
|
|
f83e6b |
|
|
|
f83e6b |
void ClRender::send_surface(Surface *surface) {
|
|
|
c7fa36 |
if (!surface && !this->surface) return;
|
|
|
f83e6b |
|
|
|
c7fa36 |
cl.err |= clFinish(cl.queue);
|
|
|
5890eb |
assert(!cl.err);
|
|
|
c7fa36 |
prev_event = NULL;
|
|
|
f83e6b |
|
|
|
f83e6b |
if (this->surface) {
|
|
|
f83e6b |
clReleaseMemObject(mark_buffer);
|
|
|
67876c |
clReleaseMemObject(surface_image);
|
|
|
f83e6b |
}
|
|
|
f83e6b |
|
|
|
f83e6b |
this->surface = surface;
|
|
|
f83e6b |
|
|
|
f83e6b |
if (this->surface) {
|
|
|
5890eb |
//Measure t("ClRender::send_surface");
|
|
|
f83e6b |
|
|
|
f83e6b |
mark_buffer = clCreateBuffer(
|
|
|
f83e6b |
cl.context, CL_MEM_READ_WRITE,
|
|
|
2517eb |
(surface->count() + 2)*sizeof(cl_int2), NULL,
|
|
|
2517eb |
&cl.err );
|
|
|
2517eb |
assert(!cl.err);
|
|
|
f83e6b |
assert(mark_buffer);
|
|
|
f83e6b |
|
|
|
2517eb |
char zero = 0;
|
|
|
2517eb |
cl.err |= clEnqueueFillBuffer(
|
|
|
2517eb |
cl.queue, mark_buffer,
|
|
|
2517eb |
&zero, 1,
|
|
|
2517eb |
0, surface->count()*sizeof(cl_int2),
|
|
|
c7fa36 |
0, NULL, NULL );
|
|
|
c7fa36 |
assert(!cl.err);
|
|
|
c7fa36 |
|
|
|
2517eb |
surface_image = clCreateBuffer(
|
|
|
2517eb |
cl.context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
|
|
|
2517eb |
surface->count()*sizeof(Color), surface->data,
|
|
|
2517eb |
&cl.err );
|
|
|
2517eb |
assert(!cl.err);
|
|
|
2517eb |
assert(surface_image);
|
|
|
2517eb |
|
|
|
2517eb |
cl.err |= clSetKernelArg(contour_draw_kernel, 0, sizeof(surface->width), &surface->width);
|
|
|
2517eb |
cl.err |= clSetKernelArg(contour_draw_kernel, 1, sizeof(surface->width), &surface->height);
|
|
|
2517eb |
cl.err |= clSetKernelArg(contour_draw_kernel, 2, sizeof(mark_buffer), &mark_buffer);
|
|
|
013f0c |
cl.err |= clSetKernelArg(contour_draw_kernel, 3, sizeof(surface_image), &surface_image);
|
|
|
c7fa36 |
assert(!cl.err);
|
|
|
f83e6b |
|
|
|
c7fa36 |
cl.err |= clFinish(cl.queue);
|
|
|
5890eb |
assert(!cl.err);
|
|
|
f83e6b |
}
|
|
|
f83e6b |
}
|
|
|
f83e6b |
|
|
|
f83e6b |
Surface* ClRender::receive_surface() {
|
|
|
f83e6b |
if (surface) {
|
|
|
5890eb |
//Measure t("ClRender::receive_surface");
|
|
|
f83e6b |
|
|
|
2517eb |
cl.err |= clEnqueueReadBuffer(
|
|
|
c7fa36 |
cl.queue, surface_image, CL_FALSE,
|
|
|
2517eb |
0, surface->count()*sizeof(Color), surface->data,
|
|
|
6b0407 |
prev_event ? 1 : 0,
|
|
|
6b0407 |
prev_event ? &prev_event : NULL,
|
|
|
6b0407 |
NULL );
|
|
|
5890eb |
assert(!cl.err);
|
|
|
c7fa36 |
|
|
|
c7fa36 |
cl.err |= clFinish(cl.queue);
|
|
|
c7fa36 |
assert(!cl.err);
|
|
|
028154 |
prev_event = NULL;
|
|
|
f83e6b |
}
|
|
|
f83e6b |
return surface;
|
|
|
572d9c |
}
|
|
|
572d9c |
|
|
|
2517eb |
void ClRender::remove_paths() {
|
|
|
013f0c |
if (paths_buffer) {
|
|
|
2517eb |
cl.err |= clFinish(cl.queue);
|
|
|
2517eb |
assert(!cl.err);
|
|
|
2517eb |
prev_event = NULL;
|
|
|
2517eb |
|
|
|
013f0c |
clReleaseMemObject(paths_buffer);
|
|
|
013f0c |
paths_buffer = NULL;
|
|
|
f83e6b |
}
|
|
|
2517eb |
}
|
|
|
2517eb |
|
|
|
2517eb |
void ClRender::send_paths(const void *paths, int size) {
|
|
|
2517eb |
if (!paths_buffer && (!paths || size <= 0)) return;
|
|
|
2517eb |
|
|
|
2517eb |
remove_paths();
|
|
|
f83e6b |
|
|
|
013f0c |
if (paths && size > 0) {
|
|
|
c7fa36 |
//Measure t("ClRender::send_path");
|
|
|
f83e6b |
|
|
|
013f0c |
paths_buffer = clCreateBuffer(
|
|
|
2517eb |
cl.context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
|
|
|
2517eb |
size, const_cast<void*>(paths),</void*>
|
|
|
2517eb |
&cl.err );
|
|
|
5890eb |
assert(!cl.err);
|
|
|
2517eb |
assert(paths_buffer);
|
|
|
f83e6b |
|
|
|
2517eb |
cl.err |= clSetKernelArg(contour_draw_kernel, 4, sizeof(paths_buffer), &paths_buffer);
|
|
|
5890eb |
assert(!cl.err);
|
|
|
c7fa36 |
}
|
|
|
c7fa36 |
}
|
|
|
f83e6b |
|
|
|
013f0c |
void ClRender::draw() {
|
|
|
c7fa36 |
//Measure t("ClRender::contour");
|
|
|
f83e6b |
|
|
|
013f0c |
cl_event event = prev_event;
|
|
|
2517eb |
size_t count = contour_draw_workgroup_size;
|
|
|
b09c5d |
size_t group_size = count;
|
|
|
c7fa36 |
cl.err |= clEnqueueNDRangeKernel(
|
|
|
c7fa36 |
cl.queue,
|
|
|
013f0c |
contour_draw_kernel,
|
|
|
c7fa36 |
1,
|
|
|
2517eb |
NULL,
|
|
|
2517eb |
&count,
|
|
|
b09c5d |
&group_size,
|
|
|
013f0c |
event ? 1 : 0,
|
|
|
013f0c |
event ? &event : NULL,
|
|
|
c7fa36 |
&prev_event );
|
|
|
c7fa36 |
assert(!cl.err);
|
|
|
572d9c |
}
|
|
|
572d9c |
|
|
|
f29469 |
void ClRender::wait() {
|
|
|
f29469 |
if (prev_event) {
|
|
|
013f0c |
cl.err |= clWaitForEvents(1, &prev_event);
|
|
|
013f0c |
assert(!cl.err);
|
|
|
f29469 |
prev_event = NULL;
|
|
|
f29469 |
}
|
|
|
f29469 |
}
|
|
|
572d9c |
|
|
|
b09c5d |
|
|
|
b09c5d |
// ------------------------------------------------
|
|
|
b09c5d |
|
|
|
b09c5d |
|
|
|
b09c5d |
ClRender2::ClRender2(ClContext &cl):
|
|
|
b09c5d |
cl(cl),
|
|
|
b09c5d |
contour_program(),
|
|
|
b09c5d |
contour_reset_kernel(),
|
|
|
b09c5d |
contour_paths_kernel(),
|
|
|
b09c5d |
contour_draw_kernel(),
|
|
|
b09c5d |
surface(),
|
|
|
b09c5d |
points_count(),
|
|
|
b09c5d |
paths_buffer(),
|
|
|
b09c5d |
points_buffer(),
|
|
|
b09c5d |
samples_buffer(),
|
|
|
b09c5d |
surface_image(),
|
|
|
b09c5d |
prev_event()
|
|
|
b09c5d |
{
|
|
|
b09c5d |
contour_program = cl.load_program("contour-sort.cl");
|
|
|
b09c5d |
assert(contour_program);
|
|
|
b09c5d |
|
|
|
b09c5d |
contour_reset_kernel = clCreateKernel(contour_program, "reset", &cl.err);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
assert(contour_reset_kernel);
|
|
|
b09c5d |
|
|
|
b09c5d |
contour_paths_kernel = clCreateKernel(contour_program, "paths", &cl.err);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
assert(contour_paths_kernel);
|
|
|
b09c5d |
|
|
|
b09c5d |
contour_draw_kernel = clCreateKernel(contour_program, "draw", &cl.err);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
assert(contour_draw_kernel);
|
|
|
b09c5d |
|
|
|
b09c5d |
samples_buffer = clCreateBuffer(
|
|
|
b09c5d |
cl.context, CL_MEM_READ_WRITE,
|
|
|
b09c5d |
1024*1024*1024, NULL,
|
|
|
b09c5d |
&cl.err );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
assert(samples_buffer);
|
|
|
b09c5d |
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_reset_kernel, 0, sizeof(samples_buffer), &samples_buffer);
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_paths_kernel, 2, sizeof(samples_buffer), &samples_buffer);
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_draw_kernel, 2, sizeof(samples_buffer), &samples_buffer);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
ClRender2::~ClRender2() {
|
|
|
b09c5d |
remove_paths();
|
|
|
b09c5d |
remove_surface();
|
|
|
b09c5d |
|
|
|
b09c5d |
cl.err |= clReleaseMemObject(samples_buffer);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
samples_buffer = NULL;
|
|
|
b09c5d |
|
|
|
b09c5d |
clReleaseKernel(contour_reset_kernel);
|
|
|
b09c5d |
clReleaseKernel(contour_paths_kernel);
|
|
|
b09c5d |
clReleaseKernel(contour_draw_kernel);
|
|
|
b09c5d |
clReleaseProgram(contour_program);
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
void ClRender2::remove_surface() {
|
|
|
b09c5d |
wait();
|
|
|
b09c5d |
|
|
|
b09c5d |
if (surface) {
|
|
|
b09c5d |
cl.err |= clReleaseMemObject(surface_image);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
surface = NULL;
|
|
|
b09c5d |
}
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
void ClRender2::send_surface(Surface *surface) {
|
|
|
b09c5d |
if (!surface && !this->surface) return;
|
|
|
b09c5d |
|
|
|
b09c5d |
remove_surface();
|
|
|
b09c5d |
|
|
|
b09c5d |
assert(surface);
|
|
|
b09c5d |
this->surface = surface;
|
|
|
b09c5d |
|
|
|
b09c5d |
//Measure t("ClRender::send_surface");
|
|
|
b09c5d |
|
|
|
b09c5d |
surface_image = clCreateBuffer(
|
|
|
b09c5d |
cl.context, CL_MEM_READ_WRITE,
|
|
|
b09c5d |
surface->count()*sizeof(Color), NULL,
|
|
|
b09c5d |
&cl.err );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
assert(surface_image);
|
|
|
b09c5d |
|
|
|
b09c5d |
cl.err |= clEnqueueWriteBuffer(
|
|
|
b09c5d |
cl.queue, surface_image, false,
|
|
|
b09c5d |
0, surface->count()*sizeof(Color), surface->data,
|
|
|
b09c5d |
0, NULL, NULL );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_paths_kernel, 0, sizeof(surface->width), &surface->width);
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_paths_kernel, 1, sizeof(surface->height), &surface->height);
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_draw_kernel, 0, sizeof(surface->width), &surface->width);
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_draw_kernel, 1, sizeof(surface_image), &surface_image);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
Surface* ClRender2::receive_surface() {
|
|
|
b09c5d |
if (surface) {
|
|
|
b09c5d |
//Measure t("ClRender::receive_surface");
|
|
|
b09c5d |
|
|
|
b09c5d |
cl.err |= clEnqueueReadBuffer(
|
|
|
b09c5d |
cl.queue, surface_image, CL_FALSE,
|
|
|
b09c5d |
0, surface->count()*sizeof(Color), surface->data,
|
|
|
b09c5d |
prev_event ? 1 : 0,
|
|
|
b09c5d |
prev_event ? &prev_event : NULL,
|
|
|
b09c5d |
NULL );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
|
|
|
b09c5d |
wait();
|
|
|
b09c5d |
}
|
|
|
b09c5d |
return surface;
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
void ClRender2::remove_paths() {
|
|
|
b09c5d |
wait();
|
|
|
b09c5d |
|
|
|
b09c5d |
if (paths_buffer) {
|
|
|
b09c5d |
cl.err |= clReleaseMemObject(paths_buffer);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
paths_buffer = NULL;
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
if (points_buffer) {
|
|
|
b09c5d |
cl.err |= clReleaseMemObject(points_buffer);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
points_buffer = NULL;
|
|
|
b09c5d |
points_count = 0;
|
|
|
b09c5d |
}
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
void ClRender2::send_paths(const Path *paths, int paths_count, const Point *points, int points_count) {
|
|
|
b09c5d |
remove_paths();
|
|
|
b09c5d |
|
|
|
b09c5d |
assert(paths);
|
|
|
b09c5d |
assert(paths_count > 0);
|
|
|
b09c5d |
|
|
|
b09c5d |
assert(points);
|
|
|
b09c5d |
assert(points_count > 0);
|
|
|
b09c5d |
|
|
|
b09c5d |
paths_buffer = clCreateBuffer(
|
|
|
b09c5d |
cl.context, CL_MEM_READ_ONLY,
|
|
|
b09c5d |
paths_count*sizeof(Path), NULL,
|
|
|
b09c5d |
&cl.err );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
assert(paths_buffer);
|
|
|
b09c5d |
|
|
|
b09c5d |
cl.err |= clEnqueueWriteBuffer(
|
|
|
b09c5d |
cl.queue, paths_buffer, false,
|
|
|
b09c5d |
0, paths_count*sizeof(Path), paths,
|
|
|
b09c5d |
0, NULL, NULL );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
|
|
|
b09c5d |
points_buffer = clCreateBuffer(
|
|
|
b09c5d |
cl.context, CL_MEM_READ_ONLY,
|
|
|
b09c5d |
points_count*sizeof(Point), NULL,
|
|
|
b09c5d |
&cl.err );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
assert(points_buffer);
|
|
|
b09c5d |
this->points_count = points_count;
|
|
|
b09c5d |
|
|
|
b09c5d |
cl.err |= clEnqueueWriteBuffer(
|
|
|
b09c5d |
cl.queue, points_buffer, false,
|
|
|
b09c5d |
0, points_count*sizeof(Point), points,
|
|
|
b09c5d |
0, NULL, NULL );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_paths_kernel, 3, sizeof(points_buffer), &points_buffer);
|
|
|
b09c5d |
cl.err |= clSetKernelArg(contour_draw_kernel, 3, sizeof(paths_buffer), &paths_buffer);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
|
|
|
b09c5d |
wait();
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
void ClRender2::draw() {
|
|
|
b09c5d |
//Measure t("ClRender::contour");
|
|
|
b09c5d |
|
|
|
b09c5d |
cl_event prepare_event;
|
|
|
b09c5d |
cl_event paths_event;
|
|
|
b09c5d |
|
|
|
b09c5d |
size_t count = surface->height;
|
|
|
b09c5d |
cl.err |= clEnqueueNDRangeKernel(
|
|
|
b09c5d |
cl.queue,
|
|
|
b09c5d |
contour_reset_kernel,
|
|
|
b09c5d |
1,
|
|
|
b09c5d |
NULL,
|
|
|
b09c5d |
&count,
|
|
|
b09c5d |
NULL,
|
|
|
b09c5d |
prev_event ? 1 : 0,
|
|
|
b09c5d |
prev_event ? &prev_event : NULL,
|
|
|
b09c5d |
&prepare_event );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
|
|
|
b09c5d |
count = points_count - 1;
|
|
|
b09c5d |
cl.err |= clEnqueueNDRangeKernel(
|
|
|
b09c5d |
cl.queue,
|
|
|
b09c5d |
contour_paths_kernel,
|
|
|
b09c5d |
1,
|
|
|
b09c5d |
NULL,
|
|
|
b09c5d |
&count,
|
|
|
b09c5d |
NULL,
|
|
|
b09c5d |
1,
|
|
|
b09c5d |
&prepare_event,
|
|
|
b09c5d |
&paths_event );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
|
|
|
b09c5d |
count = surface->height;
|
|
|
b09c5d |
cl.err |= clEnqueueNDRangeKernel(
|
|
|
b09c5d |
cl.queue,
|
|
|
b09c5d |
contour_draw_kernel,
|
|
|
b09c5d |
1,
|
|
|
b09c5d |
NULL,
|
|
|
b09c5d |
&count,
|
|
|
b09c5d |
NULL,
|
|
|
b09c5d |
1,
|
|
|
b09c5d |
&paths_event,
|
|
|
b09c5d |
&prev_event );
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
void ClRender2::wait() {
|
|
|
b09c5d |
cl.err |= clFinish(cl.queue);
|
|
|
b09c5d |
assert(!cl.err);
|
|
|
b09c5d |
prev_event = NULL;
|
|
|
b09c5d |
}
|
|
|
b09c5d |
|
|
|
b09c5d |
|
|
|
105dfe |
// ------------------------------------------------
|
|
|
105dfe |
|
|
|
105dfe |
|
|
|
105dfe |
ClRender3::ClRender3(ClContext &cl):
|
|
|
105dfe |
cl(cl),
|
|
|
105dfe |
contour_program(),
|
|
|
105dfe |
contour_clear_kernel(),
|
|
|
105dfe |
contour_path_kernel(),
|
|
|
105dfe |
contour_fill_kernel(),
|
|
|
105dfe |
surface(),
|
|
|
105dfe |
points_buffer(),
|
|
|
105dfe |
mark_buffer(),
|
|
|
105dfe |
surface_image(),
|
|
|
105dfe |
prev_event()
|
|
|
105dfe |
{
|
|
|
105dfe |
contour_program = cl.load_program("contour-base.cl");
|
|
|
105dfe |
assert(contour_program);
|
|
|
105dfe |
|
|
|
105dfe |
contour_clear_kernel = clCreateKernel(contour_program, "clear", &cl.err);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
assert(contour_clear_kernel);
|
|
|
105dfe |
|
|
|
105dfe |
contour_path_kernel = clCreateKernel(contour_program, "path", &cl.err);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
assert(contour_path_kernel);
|
|
|
105dfe |
|
|
|
105dfe |
contour_fill_kernel = clCreateKernel(contour_program, "fill", &cl.err);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
assert(contour_fill_kernel);
|
|
|
105dfe |
}
|
|
|
105dfe |
|
|
|
105dfe |
ClRender3::~ClRender3() {
|
|
|
105dfe |
send_points(NULL, 0);
|
|
|
105dfe |
send_surface(NULL);
|
|
|
105dfe |
|
|
|
105dfe |
clReleaseKernel(contour_path_kernel);
|
|
|
105dfe |
clReleaseKernel(contour_fill_kernel);
|
|
|
105dfe |
clReleaseProgram(contour_program);
|
|
|
105dfe |
}
|
|
|
105dfe |
|
|
|
105dfe |
void ClRender3::send_surface(Surface *surface) {
|
|
|
105dfe |
if (this->surface) {
|
|
|
105dfe |
wait();
|
|
|
105dfe |
cl.err |= clReleaseMemObject(surface_image);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
surface_image = NULL;
|
|
|
105dfe |
}
|
|
|
105dfe |
|
|
|
105dfe |
this->surface = surface;
|
|
|
105dfe |
|
|
|
105dfe |
if (this->surface) {
|
|
|
105dfe |
//Measure t("ClRender::send_surface");
|
|
|
105dfe |
|
|
|
105dfe |
int zero_mark[4] = { };
|
|
|
105dfe |
|
|
|
105dfe |
surface_image = clCreateBuffer(
|
|
|
105dfe |
cl.context, CL_MEM_READ_WRITE,
|
|
|
105dfe |
surface->count()*sizeof(Color), NULL,
|
|
|
105dfe |
&cl.err );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
assert(surface_image);
|
|
|
105dfe |
|
|
|
105dfe |
mark_buffer = clCreateBuffer(
|
|
|
105dfe |
cl.context, CL_MEM_READ_WRITE,
|
|
|
105dfe |
surface->count()*sizeof(zero_mark), NULL,
|
|
|
105dfe |
&cl.err );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
assert(mark_buffer);
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clEnqueueWriteBuffer(
|
|
|
105dfe |
cl.queue, surface_image, false,
|
|
|
105dfe |
0, surface->count()*sizeof(Color), surface->data,
|
|
|
105dfe |
0, NULL, NULL );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_clear_kernel, 0, sizeof(surface->width), &surface->width);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_clear_kernel, 1, sizeof(surface->height), &surface->height);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_clear_kernel, 2, sizeof(mark_buffer), &mark_buffer);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
size_t count = surface->count();
|
|
|
105dfe |
cl.err |= clEnqueueNDRangeKernel(
|
|
|
105dfe |
cl.queue, contour_clear_kernel,
|
|
|
105dfe |
1, NULL, &count, NULL,
|
|
|
105dfe |
0, NULL, NULL );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_path_kernel, 0, sizeof(surface->width), &surface->width);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_path_kernel, 1, sizeof(surface->height), &surface->height);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_path_kernel, 2, sizeof(mark_buffer), &mark_buffer);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_fill_kernel, 0, sizeof(surface->width), &surface->width);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_fill_kernel, 1, sizeof(surface->height), &surface->height);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_fill_kernel, 2, sizeof(mark_buffer), &mark_buffer);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_fill_kernel, 3, sizeof(surface_image), &surface_image);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
wait();
|
|
|
105dfe |
}
|
|
|
105dfe |
}
|
|
|
105dfe |
|
|
|
105dfe |
Surface* ClRender3::receive_surface() {
|
|
|
105dfe |
if (surface) {
|
|
|
105dfe |
//Measure t("ClRender::receive_surface");
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clEnqueueReadBuffer(
|
|
|
105dfe |
cl.queue, surface_image, CL_FALSE,
|
|
|
105dfe |
0, surface->count()*sizeof(Color), surface->data,
|
|
|
105dfe |
prev_event ? 1 : 0,
|
|
|
105dfe |
prev_event ? &prev_event : NULL,
|
|
|
105dfe |
NULL );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
wait();
|
|
|
105dfe |
}
|
|
|
105dfe |
return surface;
|
|
|
105dfe |
}
|
|
|
105dfe |
|
|
|
105dfe |
void ClRender3::send_points(const vec2f *points, int count) {
|
|
|
105dfe |
if (points_buffer) {
|
|
|
105dfe |
wait();
|
|
|
105dfe |
cl.err |= clReleaseMemObject(points_buffer);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
points_buffer = NULL;
|
|
|
105dfe |
}
|
|
|
105dfe |
|
|
|
105dfe |
if (points && count > 0) {
|
|
|
105dfe |
points_buffer = clCreateBuffer(
|
|
|
105dfe |
cl.context, CL_MEM_READ_ONLY,
|
|
|
105dfe |
count*sizeof(vec2f), NULL,
|
|
|
105dfe |
&cl.err );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
assert(points_buffer);
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clEnqueueWriteBuffer(
|
|
|
105dfe |
cl.queue, points_buffer, false,
|
|
|
105dfe |
0, count*sizeof(vec2f), points,
|
|
|
105dfe |
0, NULL, NULL );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_path_kernel, 3, sizeof(points_buffer), &points_buffer);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
wait();
|
|
|
105dfe |
}
|
|
|
105dfe |
}
|
|
|
105dfe |
|
|
|
105dfe |
void ClRender3::draw(const Path &path) {
|
|
|
105dfe |
//Measure t("ClRender::contour");
|
|
|
105dfe |
|
|
|
105dfe |
assert(surface);
|
|
|
105dfe |
assert(points_buffer);
|
|
|
105dfe |
|
|
|
105dfe |
int miny = max(0, path.miny);
|
|
|
105dfe |
int maxy = min(surface->height, path.maxy);
|
|
|
105dfe |
int invert_int = path.invert ? 1 : 0;
|
|
|
105dfe |
int evenodd_int = path.evenodd ? 1 : 0;
|
|
|
105dfe |
if (miny >= maxy || path.begin >= path.end) return;
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_path_kernel, 4, sizeof(path.begin), &path.begin);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_path_kernel, 5, sizeof(path.begin), &path.end);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_fill_kernel, 1, sizeof(maxy), &maxy); // restrict height
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_fill_kernel, 4, sizeof(path.color), &path.color);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_fill_kernel, 5, sizeof(invert_int), &invert_int);
|
|
|
105dfe |
cl.err |= clSetKernelArg(contour_fill_kernel, 6, sizeof(evenodd_int), &evenodd_int);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
|
|
|
105dfe |
cl_event path_event;
|
|
|
105dfe |
|
|
|
105dfe |
size_t group_size = 1;
|
|
|
105dfe |
|
|
|
105dfe |
size_t offset = path.begin;
|
|
|
105dfe |
size_t count = ((path.end - path.begin - 1)/group_size + 1)*group_size;
|
|
|
105dfe |
cl.err |= clEnqueueNDRangeKernel(
|
|
|
105dfe |
cl.queue,
|
|
|
105dfe |
contour_path_kernel,
|
|
|
105dfe |
1,
|
|
|
105dfe |
&offset,
|
|
|
105dfe |
&count,
|
|
|
105dfe |
NULL,//&group_size,
|
|
|
105dfe |
prev_event ? 1 : 0,
|
|
|
105dfe |
prev_event ? &prev_event : NULL,
|
|
|
105dfe |
&path_event );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
|
|
|
105dfe |
offset = miny;
|
|
|
105dfe |
count = ((maxy - miny - 1)/group_size + 1)*group_size;
|
|
|
105dfe |
cl.err |= clEnqueueNDRangeKernel(
|
|
|
105dfe |
cl.queue,
|
|
|
105dfe |
contour_fill_kernel,
|
|
|
105dfe |
1,
|
|
|
105dfe |
&offset,
|
|
|
105dfe |
&count,
|
|
|
105dfe |
NULL,//&group_size,
|
|
|
105dfe |
1,
|
|
|
105dfe |
&path_event,
|
|
|
105dfe |
&prev_event );
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
}
|
|
|
105dfe |
|
|
|
105dfe |
void ClRender3::wait() {
|
|
|
105dfe |
cl.err |= clFinish(cl.queue);
|
|
|
105dfe |
assert(!cl.err);
|
|
|
105dfe |
prev_event = NULL;
|
|
|
105dfe |
}
|
|
|
105dfe |
|