42 cusolverSpHandle_t handle =
nullptr;
43 cudaStream_t stream =
nullptr;
44 cusparseMatDescr_t descr =
nullptr;
46 void* d_val_idx =
nullptr;
47 void* d_col_idx =
nullptr;
48 void* d_row_ptr =
nullptr;
51 cusolverSpCreate(&handle);
52 cudaStreamCreate(&stream);
53 cusolverSpSetStream(handle, stream);
54 cusparseCreateMatDescr(&descr);
55 cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
56 cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
57 cudaMalloc(&d_row_ptr,
sizeof(
int) * (this->
n_rows + 1));
60 void release()
const {
61 if(handle) cusolverSpDestroy(handle);
62 if(stream) cudaStreamDestroy(stream);
63 if(descr) cusparseDestroyMatDescr(descr);
64 if(d_row_ptr) cudaFree(d_row_ptr);
68 const size_t n_val =
sizeof(ET) * csr_mat.n_elem;
69 const size_t n_col =
sizeof(int) * csr_mat.n_elem;
71 cudaMalloc(&d_val_idx, n_val);
72 cudaMalloc(&d_col_idx, n_col);
74 cudaMemcpyAsync(d_val_idx, csr_mat.val_mem(), n_val, cudaMemcpyHostToDevice, stream);
75 cudaMemcpyAsync(d_col_idx, csr_mat.col_mem(), n_col, cudaMemcpyHostToDevice, stream);
76 cudaMemcpyAsync(d_row_ptr, csr_mat.row_mem(),
sizeof(
int) * (csr_mat.n_rows + 1llu), cudaMemcpyHostToDevice, stream);
79 void device_dealloc()
const {
80 if(d_val_idx) cudaFree(d_val_idx);
81 if(d_col_idx) cudaFree(d_col_idx);
90 SparseMatCUDA(
const uword in_row,
const uword in_col,
const uword in_elem = 0)
91 :
SparseMat<
T>(in_row, in_col, in_elem) { acquire(); }
90 SparseMatCUDA(
const uword in_row,
const uword in_col,
const uword in_elem = 0) {
…}
105 unique_ptr<MetaMat<T>>
make_copy()
override {
return std::make_unique<SparseMatCUDA>(*
this); }
109 if(!this->factored) {
115 this->factored =
true;
118 const size_t n_rhs = (std::is_same_v<T, float> ||
Precision::MIXED == this->setting.precision ?
sizeof(float) :
sizeof(double)) * B.n_elem;
123 cudaMalloc(&d_b, n_rhs);
124 cudaMalloc(&d_x, n_rhs);
129 if constexpr(std::is_same_v<T, float>) {
130 cudaMemcpyAsync(d_b, B.memptr(), n_rhs, cudaMemcpyHostToDevice, stream);
132 for(
auto I = 0llu; I < B.n_elem; I += B.n_rows) code += cusolverSpScsrlsvqr(handle,
int(this->n_rows),
int(this->triplet_mat.n_elem), descr, (
float*)d_val_idx, (
int*)d_row_ptr, (
int*)d_col_idx, (
float*)d_b + I,
float(this->setting.tolerance), 3, (
float*)d_x + I, &singularity);
134 X.set_size(arma::size(B));
136 cudaMemcpyAsync(X.memptr(), d_x, n_rhs, cudaMemcpyDeviceToHost, stream);
138 cudaDeviceSynchronize();
141 cudaMemcpyAsync(d_b, B.memptr(), n_rhs, cudaMemcpyHostToDevice, stream);
143 for(
auto I = 0llu; I < B.n_elem; I += B.n_rows) code += cusolverSpDcsrlsvqr(handle,
int(this->n_rows),
int(this->triplet_mat.n_elem), descr, (
double*)d_val_idx, (
int*)d_row_ptr, (
int*)d_col_idx, (
double*)d_b + I, this->setting.tolerance, 3, (
double*)d_x + I, &singularity);
145 X.set_size(arma::size(B));
147 cudaMemcpyAsync(X.memptr(), d_x, n_rhs, cudaMemcpyDeviceToHost, stream);
149 cudaDeviceSynchronize();
152 X = arma::zeros(arma::size(B));
154 mat full_residual = B;
156 auto multiplier = norm(full_residual);
158 auto counter = std::uint8_t{0};
159 while(counter++ < this->setting.iterative_refinement) {
160 if(multiplier < this->setting.tolerance)
break;
162 auto residual = conv_to<fmat>::from(full_residual / multiplier);
164 cudaMemcpyAsync(d_b, residual.memptr(), n_rhs, cudaMemcpyHostToDevice, stream);
167 for(
auto I = 0llu; I < B.n_elem; I += B.n_rows) code += cusolverSpScsrlsvqr(handle,
int(this->n_rows),
int(this->triplet_mat.n_elem), descr, (
float*)d_val_idx, (
int*)d_row_ptr, (
int*)d_col_idx, (
float*)d_b + I,
float(this->setting.tolerance), 3, (
float*)d_x + I, &singularity);
170 cudaMemcpyAsync(residual.memptr(), d_x, n_rhs, cudaMemcpyDeviceToHost, stream);
172 cudaDeviceSynchronize();
174 const mat incre = multiplier * conv_to<mat>::from(residual);
178 suanpan_debug(
"Mixed precision algorithm multiplier: {:.5E}.\n", multiplier = arma::norm(full_residual -= this->
operator*(incre)));
182 if(d_b) cudaFree(d_b);
183 if(d_x) cudaFree(d_x);