Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleanup compiler warnings: unused variables/typedefs, uninitialised vars, etc. #2933

Merged
merged 9 commits into from
Aug 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions stan/math/fwd/fun/mdivide_left.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,6 @@ template <typename T1, typename T2,
inline Eigen::Matrix<value_type_t<T2>, T1::RowsAtCompileTime,
T2::ColsAtCompileTime>
mdivide_left(const T1& A, const T2& b) {
constexpr int S1 = T1::RowsAtCompileTime;
constexpr int C2 = T2::ColsAtCompileTime;

check_square("mdivide_left", "A", A);
check_multiplicable("mdivide_left", "A", A, "b", b);
if (A.size() == 0) {
Expand Down
1 change: 0 additions & 1 deletion stan/math/fwd/fun/mdivide_left_tri_low.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ inline Eigen::Matrix<value_type_t<T2>, T1::RowsAtCompileTime,
T2::ColsAtCompileTime>
mdivide_left_tri_low(const T1& A, const T2& b) {
constexpr int S1 = T1::RowsAtCompileTime;
constexpr int C2 = T2::ColsAtCompileTime;

check_square("mdivide_left_tri_low", "A", A);
check_multiplicable("mdivide_left_tri_low", "A", A, "b", b);
Expand Down
1 change: 0 additions & 1 deletion stan/math/fwd/fun/mdivide_right.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ inline Eigen::Matrix<value_type_t<EigMat2>, EigMat1::RowsAtCompileTime,
mdivide_right(const EigMat1& A, const EigMat2& b) {
using T = typename value_type_t<EigMat2>::Scalar;
constexpr int R1 = EigMat1::RowsAtCompileTime;
constexpr int C1 = EigMat1::ColsAtCompileTime;
constexpr int R2 = EigMat2::RowsAtCompileTime;
constexpr int C2 = EigMat2::ColsAtCompileTime;

Expand Down
1 change: 0 additions & 1 deletion stan/math/fwd/fun/mdivide_right_tri_low.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ inline Eigen::Matrix<value_type_t<EigMat2>, EigMat1::RowsAtCompileTime,
mdivide_right_tri_low(const EigMat1& A, const EigMat2& b) {
using T = typename value_type_t<EigMat2>::Scalar;
constexpr int R1 = EigMat1::RowsAtCompileTime;
constexpr int C1 = EigMat1::ColsAtCompileTime;
constexpr int R2 = EigMat2::RowsAtCompileTime;
constexpr int C2 = EigMat2::ColsAtCompileTime;
check_square("mdivide_right_tri_low", "b", b);
Expand Down
1 change: 0 additions & 1 deletion stan/math/opencl/kernel_generator/broadcast.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ class broadcast_
* @return pair of indices - bottom and top diagonal
*/
inline std::pair<int, int> extreme_diagonals() const {
int bottom, top;
std::pair<int, int> arg_diags
= this->template get_arg<0>().extreme_diagonals();
return {Colwise ? std::numeric_limits<int>::min() : arg_diags.first,
Expand Down
2 changes: 0 additions & 2 deletions stan/math/opencl/mrrr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,6 @@ inline void mrrr_cl(const Eigen::Ref<const Eigen::VectorXd> diagonal,
cluster_end--; // now this is the index of the last element of the
// cluster
if (cluster_end > i) { // cluster
double_d a = high[cluster_end - 1], b = low[cluster_end];
double_d max_shift
= (high[cluster_end - 1] - low[cluster_end]) / min_rel_sep;
double_d next_shift;
Expand All @@ -396,7 +395,6 @@ inline void mrrr_cl(const Eigen::Ref<const Eigen::VectorXd> diagonal,

i = cluster_end;
} else { // isolated eigenvalue
int twist_idx;
const double_d low_gap
= i == block.start
? double_d(std::numeric_limits<double>::infinity())
Expand Down
1 change: 0 additions & 1 deletion stan/math/opencl/prim/bernoulli_cdf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ return_type_t<T_prob_cl> bernoulli_cdf(const T_n_cl& n,
using T_partials_return = partials_return_t<T_prob_cl>;
using std::isnan;
constexpr bool is_n_vector = !is_stan_scalar<T_n_cl>::value;
constexpr bool is_theta_vector = !is_stan_scalar<T_prob_cl>::value;

check_consistent_sizes(function, "Random variable", n,
"Probability parameter", theta);
Expand Down
1 change: 0 additions & 1 deletion stan/math/opencl/prim/bernoulli_lccdf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ return_type_t<T_prob_cl> bernoulli_lccdf(const T_n_cl& n,
using T_partials_return = partials_return_t<T_prob_cl>;
using std::isnan;
constexpr bool is_n_vector = !is_stan_scalar<T_n_cl>::value;
constexpr bool is_theta_vector = !is_stan_scalar<T_prob_cl>::value;

check_consistent_sizes(function, "Random variable", n,
"Probability parameter", theta);
Expand Down
1 change: 0 additions & 1 deletion stan/math/opencl/prim/bernoulli_lcdf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ return_type_t<T_prob_cl> bernoulli_lcdf(const T_n_cl& n,
using T_partials_return = partials_return_t<T_prob_cl>;
using std::isnan;
constexpr bool is_n_vector = !is_stan_scalar<T_n_cl>::value;
constexpr bool is_theta_vector = !is_stan_scalar<T_prob_cl>::value;

check_consistent_sizes(function, "Random variable", n,
"Probability parameter", theta);
Expand Down
1 change: 0 additions & 1 deletion stan/math/opencl/prim/bernoulli_logit_lpmf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ return_type_t<T_prob_cl> bernoulli_logit_lpmf(const T_n_cl& n,
using T_partials_return = partials_return_t<T_prob_cl>;
using std::isnan;
constexpr bool is_n_vector = !is_stan_scalar<T_n_cl>::value;
constexpr bool is_theta_vector = !is_stan_scalar<T_prob_cl>::value;

check_consistent_sizes(function, "Random variable", n,
"Probability parameter", theta);
Expand Down
1 change: 0 additions & 1 deletion stan/math/opencl/prim/dot_self.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ namespace math {
template <typename T,
require_all_kernel_expressions_and_none_scalar_t<T>* = nullptr>
inline auto dot_self(const T& a) {
const char* function = "dot_self(OpenCL)";
return sum(elt_multiply(a, a));
}

Expand Down
1 change: 0 additions & 1 deletion stan/math/opencl/prim/poisson_log_lpmf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ return_type_t<T_log_rate_cl> poisson_log_lpmf(const T_n_cl& n,
using std::isinf;
using std::isnan;
constexpr bool is_n_vector = !is_stan_scalar<T_n_cl>::value;
constexpr bool is_alpha_vector = !is_stan_scalar<T_log_rate_cl>::value;

check_consistent_sizes(function, "Random variable", n, "Log rate parameter",
alpha);
Expand Down
1 change: 0 additions & 1 deletion stan/math/opencl/prim/poisson_lpmf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ return_type_t<T_rate_cl> poisson_lpmf(const T_n_cl& n,
using T_partials_return = partials_return_t<T_rate_cl>;
using std::isinf;
constexpr bool is_n_vector = !is_stan_scalar<T_n_cl>::value;
constexpr bool is_lambda_vector = !is_stan_scalar<T_rate_cl>::value;

check_consistent_sizes(function, "Random variable", n, "Rate parameter",
lambda);
Expand Down
7 changes: 4 additions & 3 deletions stan/math/opencl/rev/matrix_power.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,16 +34,17 @@ inline var_value<matrix_cl<double>> matrix_power(const var_value<T>& M,
if (M.size() == 0)
return M;

size_t N = M.rows();
andrjohns marked this conversation as resolved.
Show resolved Hide resolved
auto N = M.rows();

if (n == 0) {
return diag_matrix(constant(1.0, M.rows(), 1));
return diag_matrix(constant(1.0, N, 1));
}
if (n == 1) {
return M;
}

arena_t<std::vector<matrix_cl<double>>> arena_powers(n + 1);
arena_powers[0] = diag_matrix(constant(1.0, M.rows(), 1));
arena_powers[0] = diag_matrix(constant(1.0, N, 1));
arena_powers[1] = M.val();
for (size_t i = 2; i <= n; ++i) {
arena_powers[i] = arena_powers[1] * arena_powers[i - 1];
Expand Down
3 changes: 2 additions & 1 deletion stan/math/opencl/rev/softmax.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
#define STAN_MATH_OPENCL_REV_SOFTMAX_HPP
#ifdef STAN_OPENCL

#include <stan/math/opencl/prim/log_sum_exp.hpp>
#include <stan/math/opencl/prim/dot_product.hpp>
#include <stan/math/opencl/prim/softmax.hpp>
#include <stan/math/opencl/kernel_generator.hpp>
#include <stan/math/rev/core.hpp>
#include <stan/math/rev/fun/value_of.hpp>
Expand Down
1 change: 0 additions & 1 deletion stan/math/prim/fun/cholesky_factor_constrain.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ cholesky_factor_constrain(const T& x, int M, int N) {
"((N * (N + 1)) / 2 + (M - N) * N)",
((N * (N + 1)) / 2 + (M - N) * N));
Eigen::Matrix<T_scalar, Eigen::Dynamic, Eigen::Dynamic> y(M, N);
T_scalar zero(0);
int pos = 0;

const auto& x_ref = to_ref(x);
Expand Down
1 change: 0 additions & 1 deletion stan/math/prim/fun/gp_exponential_cov.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,6 @@ gp_exponential_cov(const std::vector<Eigen::Matrix<T_x1, -1, 1>> &x1,
}

T_s sigma_sq = square(sigma);
T_l temp;

std::vector<Eigen::Matrix<return_type_t<T_x1, T_l>, -1, 1>> x1_new
= divide_columns(x1, length_scale);
Expand Down
2 changes: 0 additions & 2 deletions stan/math/prim/fun/grad_2F1.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ TupleT grad_2F1_impl_ab(const T1& a1, const T2& a2, const T3& b1, const T_z& z,
int sign_z = sign(z);
auto log_z = log(abs(z));

double log_precision = log(precision);
int log_t_new_sign = 1.0;
int log_t_old_sign = 1.0;

Expand Down Expand Up @@ -190,7 +189,6 @@ TupleT grad_2F1_impl(const T1& a1, const T2& a2, const T3& b1, const T_z& z,
if (calc_z) {
auto hyper1 = hypergeometric_2F1(a1_euler, a2_euler, b1, z_euler);
auto hyper2 = hypergeometric_2F1(1 + a2, 1 - a1 + b1, 1 + b1, z_euler);
auto pre_mult = a2 * pow(1 - z, -1 - a2);
std::get<3>(grad_tuple_rtn)
= a2 * pow(1 - z, -1 - a2) * hyper1
+ (a2 * (b1 - a1) * pow(1 - z, -a2)
Expand Down
4 changes: 2 additions & 2 deletions stan/math/prim/fun/lb_free.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ inline auto lb_free(T&& y, L&& lb) {
template <typename T, typename L, require_not_std_vector_t<L>* = nullptr>
inline auto lb_free(const std::vector<T> y, const L& lb) {
auto&& lb_ref = to_ref(lb);
andrjohns marked this conversation as resolved.
Show resolved Hide resolved
std::vector<decltype(lb_free(y[0], lb))> ret(y.size());
std::vector<decltype(lb_free(y[0], lb_ref))> ret(y.size());
std::transform(y.begin(), y.end(), ret.begin(),
[&lb](auto&& yy) { return lb_free(yy, lb); });
[&lb_ref](auto&& yy) { return lb_free(yy, lb_ref); });
return ret;
}

Expand Down
1 change: 0 additions & 1 deletion stan/math/prim/fun/stan_print.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ void stan_print(std::ostream* o, const T& x) {
template <typename T, require_tuple_t<T>*>
void stan_print(std::ostream* o, const T& x) {
*o << '(';
constexpr auto tuple_size = std::tuple_size<std::decay_t<T>>::value;
size_t i = 0;
stan::math::for_each(
[&i, o](auto&& elt) {
Expand Down
5 changes: 3 additions & 2 deletions stan/math/prim/functor/hcubature.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,6 @@ double hcubature(const F& integrand, const T_pars& pars, const int& dim,
int numevals
= (dim == 1) ? 15 : 1 + 4 * dim + 2 * dim * (dim - 1) + std::pow(2, dim);
int evals_per_box = numevals;
int kdiv = kdivide;
double error = err;
double val = result;

Expand All @@ -491,7 +490,9 @@ double hcubature(const F& integrand, const T_pars& pars, const int& dim,
std::vector<double> mb(box.b);
mb[box.kdiv] -= w;

double result_1, result_2, err_1, err_2, kdivide_1, kdivide_2;
double result_1, result_2, err_1, err_2;
double kdivide_1 = math::NOT_A_NUMBER;
double kdivide_2 = math::NOT_A_NUMBER;

if (dim == 1) {
std::tie(result_1, err_1)
Expand Down
3 changes: 2 additions & 1 deletion stan/math/prim/functor/operands_and_partials.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@ class ops_partials_edge;
* for this specialization must be a `Arithmetic`
*/
template <typename ViewElt, typename Op>
struct ops_partials_edge<ViewElt, Op, require_st_arithmetic<Op>> {
class ops_partials_edge<ViewElt, Op, require_st_arithmetic<Op>> {
public:
using inner_op = std::conditional_t<is_eigen<value_type_t<Op>>::value,
value_type_t<Op>, Op>;
using partials_t = empty_broadcast_array<ViewElt, inner_op>;
Expand Down
2 changes: 1 addition & 1 deletion stan/math/prim/functor/partials_propagator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ namespace math {
namespace internal {

template <typename ReturnType, typename Enable, typename... Ops>
struct partials_propagator;
class partials_propagator;

/** \ingroup type_trait
* \callergraph
Expand Down
12 changes: 6 additions & 6 deletions stan/math/prim/prob/gaussian_dlm_obs_rng.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,22 +97,22 @@ inline Eigen::MatrixXd gaussian_dlm_obs_rng(const Eigen::MatrixXd &F,
int r = F.cols(); // number of variables
int n = G.rows(); // number of states
andrjohns marked this conversation as resolved.
Show resolved Hide resolved

check_size_match(function, "rows of F", F.rows(), "rows of G", G.rows());
check_size_match(function, "rows of F", F.rows(), "rows of G", n);
check_finite(function, "F", F);
check_square(function, "G", G);
check_finite(function, "G", G);
check_size_match(function, "rows of V", V.rows(), "cols of F", F.cols());
check_size_match(function, "rows of V", V.rows(), "cols of F", r);
check_finite(function, "V", V);
check_positive(function, "V rows", V.rows());
check_symmetric(function, "V", V);
check_size_match(function, "rows of W", W.rows(), "rows of G", G.rows());
check_size_match(function, "rows of W", W.rows(), "rows of G", n);
check_finite(function, "W", W);
check_positive(function, "W rows", W.rows());
check_symmetric(function, "W", W);
check_size_match(function, "rows of W", W.rows(), "rows of G", G.rows());
check_size_match(function, "size of m0", m0.size(), "rows of G", G.rows());
check_size_match(function, "rows of W", W.rows(), "rows of G", n);
check_size_match(function, "size of m0", m0.size(), "rows of G", n);
check_finite(function, "m0", m0);
check_size_match(function, "rows of C0", C0.rows(), "rows of G", G.rows());
check_size_match(function, "rows of C0", C0.rows(), "rows of G", n);
check_finite(function, "C0", C0);
check_positive(function, "C0 rows", C0.rows());
check_symmetric(function, "C0", C0);
Expand Down
2 changes: 0 additions & 2 deletions stan/math/prim/prob/multi_student_t_lpdf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ return_type_t<T_y, T_dof, T_loc, T_scale> multi_student_t_lpdf(
check_positive(function, "Degrees of freedom parameter", nu);
check_finite(function, "Degrees of freedom parameter", nu);

size_t num_y = size_mvt(y);
size_t num_mu = size_mvt(mu);
check_consistent_sizes_mvt(function, "y", y, "mu", mu);

vector_seq_view<T_y> y_vec(y);
Expand Down
1 change: 0 additions & 1 deletion stan/math/prim/prob/skew_double_exponential_lccdf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ return_type_t<T_y, T_loc, T_scale, T_skewness> skew_double_exponential_lccdf(
scalar_seq_view<std::decay_t<decltype(sigma_val)>> sigma_vec(sigma_val);
scalar_seq_view<std::decay_t<decltype(tau_val)>> tau_vec(tau_val);

const int size_sigma = stan::math::size(sigma);
const auto N = max_size(y, mu, sigma, tau);
auto inv_sigma_val = to_ref(inv(sigma_val));
scalar_seq_view<decltype(inv_sigma_val)> inv_sigma(inv_sigma_val);
Expand Down
2 changes: 1 addition & 1 deletion stan/math/rev/core/grad.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace math {
* <p>This function does not recover any memory from the computation.
*
*/
static void grad() {
static inline void grad() {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This and the similar change in set_zero_all_adjoints_nested seem like they're different from the rest of the changes here - why do these functions become inline?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Static functions that aren't used in a given translation unit give an 'unused-function' warning, but this is suppressed if the static function is declared inline: https://sourceware.org/legacy-ml/gdb/2015-02/msg00064.html

These are small enough functions that I think inlining is fine imo

size_t end = ChainableStack::instance_->var_stack_.size();
size_t beginning = empty_nested() ? 0 : end - nested_size();
for (size_t i = end; i-- > beginning;) {
Expand Down
2 changes: 1 addition & 1 deletion stan/math/rev/core/operator_subtraction.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ template <typename Arith, require_arithmetic_t<Arith>* = nullptr>
inline var operator-(Arith a, const var& b) {
return make_callback_vari(
a - b.vi_->val_,
[bvi = b.vi_, a](const auto& vi) mutable { bvi->adj_ -= vi.adj_; });
[bvi = b.vi_](const auto& vi) mutable { bvi->adj_ -= vi.adj_; });
}

/**
Expand Down
8 changes: 4 additions & 4 deletions stan/math/rev/core/precomputed_gradients.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ class precomputed_gradients_vari_template : public vari {
size_(size),
varis_(varis),
gradients_(gradients),
container_operands_(index_apply<N_containers>([&, this](auto... Is) {
container_operands_(index_apply<N_containers>([&](auto... Is) {
return std::make_tuple(to_arena(std::get<Is>(container_operands))...);
})),
container_gradients_(index_apply<N_containers>([&, this](auto... Is) {
container_gradients_(index_apply<N_containers>([&](auto... Is) {
return std::make_tuple(
to_arena(std::get<Is>(container_gradients))...);
})) {
Expand Down Expand Up @@ -123,10 +123,10 @@ class precomputed_gradients_vari_template : public vari {
vars.size())),
gradients_(ChainableStack::instance_->memalloc_.alloc_array<double>(
vars.size())),
container_operands_(index_apply<N_containers>([&, this](auto... Is) {
container_operands_(index_apply<N_containers>([&](auto... Is) {
return std::make_tuple(to_arena(std::get<Is>(container_operands))...);
})),
container_gradients_(index_apply<N_containers>([&, this](auto... Is) {
container_gradients_(index_apply<N_containers>([&](auto... Is) {
return std::make_tuple(
to_arena(std::get<Is>(container_gradients))...);
})) {
Expand Down
2 changes: 1 addition & 1 deletion stan/math/rev/core/set_zero_all_adjoints_nested.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace math {
* It is preferred to use the <code>nested_rev_autodiff</code> class for
* nested autodiff class as it handles recovery of memory automatically.
*/
static void set_zero_all_adjoints_nested() {
static inline void set_zero_all_adjoints_nested() {
if (empty_nested()) {
throw std::logic_error(
"empty_nested() must be false before calling"
Expand Down
4 changes: 2 additions & 2 deletions stan/math/rev/core/vari.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -848,7 +848,7 @@ class vari_value<T, require_eigen_sparse_base_t<T>> : public vari_base,
*/
template <typename S, require_convertible_t<S&, T>* = nullptr>
explicit vari_value(S&& x)
: adj_(x), val_(std::forward<S>(x)), chainable_alloc() {
: chainable_alloc(), adj_(x), val_(std::forward<S>(x)) {
this->set_zero_adjoint();
ChainableStack::instance_->var_stack_.push_back(this);
}
Expand All @@ -871,7 +871,7 @@ class vari_value<T, require_eigen_sparse_base_t<T>> : public vari_base,
*/
template <typename S, require_convertible_t<S&, T>* = nullptr>
vari_value(S&& x, bool stacked)
: adj_(x), val_(std::forward<S>(x)), chainable_alloc() {
: chainable_alloc(), adj_(x), val_(std::forward<S>(x)) {
this->set_zero_adjoint();
if (stacked) {
ChainableStack::instance_->var_stack_.push_back(this);
Expand Down
25 changes: 11 additions & 14 deletions stan/math/rev/fun/beta.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,9 @@ inline var beta(const var& a, const var& b) {
*/
inline var beta(const var& a, double b) {
auto digamma_ab = digamma(a.val()) - digamma(a.val() + b);
return make_callback_var(beta(a.val(), b),
[a, b, digamma_ab](auto& vi) mutable {
a.adj() += vi.adj() * digamma_ab * vi.val();
});
return make_callback_var(beta(a.val(), b), [a, digamma_ab](auto& vi) mutable {
a.adj() += vi.adj() * digamma_ab * vi.val();
});
}

/**
Expand Down Expand Up @@ -180,10 +179,9 @@ inline auto beta(const Scalar& a, const VarMat& b) {
auto digamma_ab = to_arena((digamma(arena_b.val()).array()
- digamma(arena_a + arena_b.val().array()))
* beta_val.array());
return make_callback_var(
beta_val, [arena_a, arena_b, digamma_ab](auto& vi) mutable {
arena_b.adj().array() += vi.adj().array() * digamma_ab.array();
});
return make_callback_var(beta_val, [arena_b, digamma_ab](auto& vi) mutable {
arena_b.adj().array() += vi.adj().array() * digamma_ab.array();
});
}
}

Expand All @@ -210,12 +208,11 @@ inline auto beta(const VarMat& a, const Scalar& b) {
double arena_b = value_of(b);
auto digamma_ab = to_arena(digamma(arena_a.val()).array()
- digamma(arena_a.val().array() + arena_b));
return make_callback_var(beta(arena_a.val(), arena_b),
[arena_a, arena_b, digamma_ab](auto& vi) mutable {
arena_a.adj().array() += vi.adj().array()
* digamma_ab
* vi.val().array();
});
return make_callback_var(
beta(arena_a.val(), arena_b), [arena_a, digamma_ab](auto& vi) mutable {
arena_a.adj().array()
+= vi.adj().array() * digamma_ab * vi.val().array();
});
} else if (!is_constant<Scalar>::value) {
arena_t<promote_scalar_t<double, VarMat>> arena_a = value_of(a);
var arena_b = b;
Expand Down
Loading