Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#include "cs.h"
/* solve Lx=b where x and b are dense. x=b on input, solution on output. */
csi cs_lsolve (const cs *L, double *x)
{
csi p, j, n, *Lp, *Li ;
double *Lx ;
if (!CS_CSC (L) || !x) return (0) ; /* check inputs */
n = L->n ; Lp = L->p ; Li = L->i ; Lx = L->x ;
for (j = 0 ; j < n ; j++)
{
x [j] /= Lx [Lp [j]] ;
for (p = Lp [j]+1 ; p < Lp [j+1] ; p++)
{
x [Li [p]] -= Lx [p] * x [j] ;
}
}
return (1) ;
}
/* solve Lx=b where x and b are dense. x=b on input, solution on output. */
//csi cs_lsolve_gpu (const cs *L, double *x)
// typedef struct cs_sparse /* matrix in compressed-column or triplet form */
// {
// csi nzmax ; /* maximum number of entries */
// csi m ; /* number of rows */
// csi n ; /* number of columns */
// csi *p ; /* column pointers (size n+1) or col indices (size nzmax) */
// csi *i ; /* row indices, size nzmax */
// double *x ; /* numerical values, size nzmax */
// csi nz ; /* # of entries in triplet matrix, -1 for compressed-col */
// } cs ;
__global__
void cs_lsolve_gpu (int n, const int *Lp, const int *Li, const double *Lx, double *x)
{
int p, j;
// double *Lx ;
// n = L->n ;
// Lp = L->p ;
// Li = L->i ;
// Lx = L->x ;
for (j = 0 ; j < n ; j++)
{
x [j] /= Lx [Lp [j]] ;
for (p = Lp [j]+1 ; p < Lp [j+1] ; p++)
{
x [Li [p]] -= Lx [p] * x [j] ;
}
}
return (1) ;
}
#include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Prefetch the data to the GPU
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(x, N*sizeof(float), device, NULL);
cudaMemPrefetchAsync(y, N*sizeof(float), device, NULL);
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}