Skip to content
This repository has been archived by the owner on Oct 19, 2020. It is now read-only.

Commit

Permalink
Browse files Browse the repository at this point in the history
CG compiles. Untested.
  • Loading branch information
Andreas Kloeckner committed Jul 30, 2009
1 parent 9aff29c commit 9e03ba8
Show file tree
Hide file tree
Showing 6 changed files with 285 additions and 37 deletions.
28 changes: 25 additions & 3 deletions include/iterative-cuda.hpp
Expand Up @@ -72,6 +72,8 @@ namespace iterative_cuda
gpu_vector(gpu_vector const &src);
~gpu_vector();

gpu_vector &operator=(gpu_vector const &src);

index_type size() const;

void from_cpu(value_type *cpu);
Expand All @@ -85,6 +87,14 @@ namespace iterative_cuda
gpu_vector const &x,
value_type b,
gpu_vector const &y);

void set_to_linear_combination(
value_type a,
gpu_vector const &x,
value_type b0,
gpu_vector const &b1,
gpu_vector const &y);

gpu_vector *dot(gpu_vector const &b) const;
};

Expand Down Expand Up @@ -166,13 +176,23 @@ namespace iterative_cuda



void synchronize_gpu();
template <typename GpuVector>
class diagonal_preconditioner_pimpl;




template <typename GpuVector>
class diagonal_preconditioner_pimpl;
template <class GpuVector>
class identity_preconditioner : private noncopyable
{
public:
typedef GpuVector gpu_vector_type;

void operator()(gpu_vector_type &result, gpu_vector_type const &op)
{
result = op;
}
};



Expand Down Expand Up @@ -201,6 +221,8 @@ namespace iterative_cuda



void synchronize_gpu();

template <typename ValueType, typename IndexType, typename Operator, typename Preconditioner>
void gpu_cg(
const Operator &a,
Expand Down
146 changes: 146 additions & 0 deletions src/cg.hpp
@@ -0,0 +1,146 @@
/*
Iterative CUDA is licensed to you under the MIT/X Consortium license:
Copyright (c) 2009 Andreas Kloeckner.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the Software), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/



#ifndef AAFADFJ_ITERATIVE_CUDA_CG_HPP_SEEN
#define AAFADFJ_ITERATIVE_CUDA_CG_HPP_SEEN




#include <iterative-cuda.hpp>
#include "reduction.hpp"




namespace iterative_cuda
{
template <typename ValueType, typename IndexType, typename Operator, typename Preconditioner>
inline void gpu_cg(
const Operator &a,
const Preconditioner m_inv,
gpu_vector<ValueType, IndexType> const &x,
gpu_vector<ValueType, IndexType> const &b,
ValueType tol=1e-8,
unsigned max_iterations=0
)
{
typedef gpu_vector<ValueType, IndexType> vector_t;
typedef vector_t gpu_scalar_t; // such is life. :/
typedef typename vector_t::value_type scalar_t;

if (a.row_count() != a.column_count())
throw std::runtime_error("gpu_cg: A is not quadratic");

unsigned n = a.row_count();

std::auto_ptr<vector_t> norm_b_squared_gpu(b.dot(b));
scalar_t norm_b_squared;
norm_b_squared_gpu.to_cpu(&norm_b_squared);

if (norm_b_squared == 0)
{
x = b;
return;
}

if (max_iterations == 0)
max_iterations = n;

// typed up from J.R. Shewchuk,
// An Introduction to the Conjugate Gradient Method
// Without the Agonizing Pain, Edition 1 1/4 [8/1994]
// Appendix B3

unsigned iterations = 0;
vector_t ax(n);
vector_t residual(n);
a(ax, x);
residual.set_to_linear_combination(1, b, -1, ax);

vector_t d(n);
m_inv(d, residual);

std::auto_ptr<gpu_scalar_t> delta_new(
residual.dot(d));

scalar_t delta_0;
delta_new->to_cpu(&delta_0);

while (iterations < max_iterations)
{
vector_t q(n);
a(q, d);

gpu_scalar_t alpha(1);
divide(alpha, *delta_new, *d.dot(q));

x.set_to_linear_combination(1, x, 1, alpha, d);

bool calculate_real_residual = iterations % 50 == 0;

if (calculate_real_residual)
{
a(ax, x);
residual.set_to_linear_combination(1, b, -1, ax);
}
else
residual.set_to_linear_combination(1, residual, -1, alpha, q);

vector_t s(n);
m_inv(s, residual);

std::auto_ptr<gpu_scalar_t> delta_old(delta_new);
delta_new = std::auto_ptr<gpu_scalar_t>(
residual.dot(s));

if (calculate_real_residual)
{
// Only terminate the loop on the basis of a "real" residual.

scalar_t delta_new_host;
delta_new->to_cpu(&delta_new_host);
if (std::abs(delta_new) < tol*tol * std::abs(delta_0))
break;
}

gpu_scalar_t beta(1);
divide(beta, *delta_new, *delta_old);

d.set_to_linear_combination(1, s, 1, beta, d);

iterations++;
}

if (iterations == max_iterations)
throw std::runtime_error("cg failed to converge");
}
}




#endif
2 changes: 1 addition & 1 deletion src/diag-preconditioner.hpp
Expand Up @@ -63,7 +63,7 @@ namespace iterative_cuda
inline void diagonal_preconditioner<GpuVector>::operator()(
gpu_vector_type &result, gpu_vector_type const &op)
{
product(op, *pimpl->vec, result);
multiply(result, op, *pimpl->vec);
}
}

Expand Down
78 changes: 57 additions & 21 deletions src/elementwise.hpp
Expand Up @@ -95,9 +95,10 @@ namespace iterative_cuda

template <class ValueType>
__global__ void lc2_kernel(
ValueType *z,
ValueType a, ValueType const *x,
ValueType b, ValueType const *y,
ValueType *z, unsigned n)
unsigned n)
{
unsigned tid = threadIdx.x;
unsigned total_threads = gridDim.x*blockDim.x;
Expand All @@ -113,58 +114,60 @@ namespace iterative_cuda

template <class ValueType>
__global__ void lc2p_kernel(
ValueType const *a_ptr, ValueType const *x,
ValueType const *b_ptr, ValueType const *y,
ValueType *z, unsigned n)
ValueType *z,
ValueType a, ValueType const *x,
ValueType b0, ValueType const *b1_ptr, ValueType const *y,
unsigned n)
{
unsigned tid = threadIdx.x;
unsigned total_threads = gridDim.x*blockDim.x;
unsigned cta_start = blockDim.x*blockIdx.x;
unsigned i;

ValueType a = *a_ptr;
ValueType b = *b_ptr;
ValueType b1 = *b1_ptr;

for (i = cta_start + tid; i < n; i += total_threads)
z[i] = a*x[i] + b*y[i];
z[i] = a*x[i] + b0*b1*y[i];
}




template <class VT, class IT>
void lc2(
gpu_vector<VT, IT> &z,
VT a, gpu_vector<VT, IT> const &x,
VT b, gpu_vector<VT, IT> const &y,
gpu_vector<VT, IT> &z)
VT b, gpu_vector<VT, IT> const &y
)
{
dim3 grid, block;
splay(x.size(), grid, block);
lc2_kernel<VT><<<grid, block>>>(
a, x.ptr(), b, y.ptr(), z.ptr(), x.size());
z.ptr(), a, x.ptr(), b, y.ptr(), x.size());
}




template <class VT, class IT>
void lc2(
gpu_vector<VT, IT> const &a, gpu_vector<VT, IT> const &x,
gpu_vector<VT, IT> const &b, gpu_vector<VT, IT> const &y,
gpu_vector<VT, IT> &z)
gpu_vector<VT, IT> &z,
VT a, gpu_vector<VT, IT> const &x,
VT b0, gpu_vector<VT, IT> const &b1, gpu_vector<VT, IT> const &y
)
{
dim3 grid, block;
splay(x.size(), grid, block);
lc2p_kernel<VT><<<grid, block>>>(
a.ptr(), x.ptr(), b.ptr(), y.ptr(), z.ptr(), x.size());
z.ptr(), a, x.ptr(), b0, b1.ptr(), y.ptr(), x.size());
}




template <class ValueType>
__global__ void product_kernel(
ValueType const *x, ValueType const *y, ValueType *z, unsigned n)
__global__ void multiply_kernel(
ValueType *z, ValueType const *x, ValueType const *y, unsigned n)
{
unsigned tid = threadIdx.x;
unsigned total_threads = gridDim.x*blockDim.x;
Expand All @@ -179,15 +182,48 @@ namespace iterative_cuda


template <class VT, class IT>
void product(
void multiply(
gpu_vector<VT, IT> &z,
gpu_vector<VT, IT> const &x,
gpu_vector<VT, IT> const &y
)
{
dim3 grid, block;
splay(x.size(), grid, block);
multiply_kernel<VT><<<grid, block>>>(
z.ptr(), x.ptr(), y.ptr(), x.size());
}




template <class ValueType>
__global__ void divide_kernel(
ValueType *z, ValueType const *x, ValueType const *y, unsigned n)
{
unsigned tid = threadIdx.x;
unsigned total_threads = gridDim.x*blockDim.x;
unsigned cta_start = blockDim.x*blockIdx.x;
unsigned i;

for (i = cta_start + tid; i < n; i += total_threads)
z[i] = x[i]/y[i];
}




template <class VT, class IT>
void divide(
gpu_vector<VT, IT> &z,
gpu_vector<VT, IT> const &x,
gpu_vector<VT, IT> const &y,
gpu_vector<VT, IT> &z)
gpu_vector<VT, IT> const &y
)
{
dim3 grid, block;
splay(x.size(), grid, block);
product_kernel<VT><<<grid, block>>>(
x.ptr(), y.ptr(), z.ptr(), x.size());
divide_kernel<VT><<<grid, block>>>(
z.ptr(), x.ptr(), y.ptr(), x.size());
}
}

Expand Down

0 comments on commit 9e03ba8

Please sign in to comment.