Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Automatic merge of math/develop #1

Merged
merged 60 commits into from
Jul 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
60 commits
Select commit Hold shift + click to select a range
7a6efab
First draft of promotion
mborland Apr 24, 2023
5dcf5a9
Test beta dist for F64, F32, and F16
mborland Apr 24, 2023
4c753c2
Reverse 128bits and prefer F64 over long double when it's 64bits
mborland Apr 25, 2023
2f0b0ed
Add more testing
mborland Apr 25, 2023
bc79263
Add compile tests for F32 and F64
mborland May 2, 2023
2b1d5b6
Cast assignment of promoted type
mborland May 2, 2023
be37704
Explicit casting of promoted types
mborland May 2, 2023
5d044d5
Remove cast for output iterator
mborland May 9, 2023
6a3a89b
Add testing to quadrature
mborland May 11, 2023
3c2a04e
Add gcc-13 to drone config
mborland May 11, 2023
ac4605b
Fix Minw-64 C++23 compile failures.
jzmaddock May 13, 2023
b3f361c
Fix the promote_args<float32_t, float32_t> case.
jzmaddock May 14, 2023
58264c0
Add test case for promote_args.
jzmaddock May 14, 2023
c5b4d28
Suppress lots of warnings for std::float32_t.
jzmaddock May 15, 2023
ac8765b
Update drone to use Ubuntu 23.04 for g++13
mborland May 15, 2023
fa8a83f
Enable testing of new floats
mborland May 15, 2023
d217813
Fix stack overflow on test_finite_singular_boundary with _Float64
mborland May 16, 2023
8433636
Fix remaining warnings from float32.cpp.
jzmaddock May 16, 2023
6104363
Add float128 testing
mborland May 16, 2023
5045047
Use charconv instead of streaming operator for std::float128_t
mborland May 16, 2023
0c1920b
Move macro definitions to config
mborland May 16, 2023
a0360d8
Use charconv in convert_from_string for arithmetic types
mborland May 16, 2023
edf6597
inline template specialization
mborland May 16, 2023
55eaf05
Add additional conversion function for C++23 types
mborland May 16, 2023
00facdc
Fix cardinal cubic b spline for std::float32_t
mborland May 17, 2023
f86ea8e
Begin adding interpolator tests
mborland May 17, 2023
41be4fb
Improve to/from_chars configuration.
jzmaddock May 21, 2023
51d7011
Fix formatting and missing header
mborland May 22, 2023
c4c8c69
Add wavelet transform test with additional casts
mborland May 22, 2023
aef5713
Add rsqrt test and update type traits for control path
mborland May 22, 2023
ca31dcf
Add AGM test
mborland May 22, 2023
faaf475
Fix stack overflow in cohen acceleration from GCC bug
mborland May 22, 2023
ebcc430
Add casting to whittaker shannon for conversion rank errors
mborland May 22, 2023
d1bd7b6
Add casting and tests
mborland May 22, 2023
823fcd4
Add test and cast to finite differences
mborland May 30, 2023
c249bfe
Fix -Wreturn-type
mborland May 30, 2023
6d37555
Collected autodiff fixes
mborland May 30, 2023
ae56ab2
Fix conversion errors in digamma
mborland May 31, 2023
7a66a98
Fix for autodiff 3
mborland May 31, 2023
b66264f
Collected special functions warning fixes
mborland May 31, 2023
a7f98db
Add casts to all two argument cmath functions to work around GCC bug
mborland May 31, 2023
ff1a265
Add to test_constants
mborland May 31, 2023
a6bc6c7
Add <stdfloat> constructor to real_concept
mborland Jun 5, 2023
bc9d4b1
Fix failures and warnings in test_autodiff_6
mborland Jun 5, 2023
677f3b6
Collected fixes for test_autodiff_8
mborland Jun 5, 2023
f51e1b9
Disable numeric_limits specialization for GCC-14
mborland Jun 7, 2023
26de5b9
Merge pull request #993 from boostorg/float128
mborland Jun 8, 2023
7887d43
Numerical evaluation of Fourier transform of Daubechies scaling funct…
NAThompson Jun 13, 2023
7ce0570
Small github actions updates (#995)
sdarwin Jun 15, 2023
7a0e8e0
More casting of pow
mborland Jun 16, 2023
37df734
Add integer exponent function to chebyshev detail
mborland Jun 16, 2023
0117f4a
Fix multiprecision concept failure
mborland Jun 16, 2023
ef423e8
Restore drone config
mborland Jun 16, 2023
0852c16
median_absolute_deviation bug fix for non-zero center (#997)
rasmushenningsson Jun 19, 2023
e62a284
Fix multiprecision failures
mborland Jun 19, 2023
b57749d
Fix casting errors
mborland Jun 20, 2023
0a014fd
Fix for expression template use in chebyshev.hpp.
jzmaddock Jun 21, 2023
481ce0d
Update libraries.json to only include 1 library key (#994)
mborland Jun 26, 2023
851b357
Fix pessimization on unaffected platforms
mborland Jun 27, 2023
8bb0d16
Merge pull request #978 from boostorg/cpp23-float
mborland Jun 28, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .drone.star
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,15 @@ windowsglobalimage="cppalliance/dronevs2019"
def main(ctx):

things_to_test = [ "special_fun", "distribution_tests", "mp", "misc", "interpolators", "quadrature", "autodiff", "long-running-tests", "float128_tests" ]
gcc13_things_to_test = [ "special_fun", "distribution_tests", "mp", "misc", "interpolators", "quadrature", "autodiff", "long-running-tests", "float128_tests", "new_floats" ]
sanitizer_test = [ "special_fun", "distribution_tests", "misc", "interpolators", "quadrature", "float128_tests" ]
gnu_5_stds = [ "gnu++14", "c++14" ]
gnu_6_stds = [ "gnu++14", "c++14", "gnu++17", "c++17" ]
clang_6_stds = [ "c++14", "c++17" ]
gnu_9_stds = [ "gnu++14", "c++14", "gnu++17", "c++17", "gnu++2a", "c++2a" ]
clang_10_stds = [ "c++14", "c++17", "c++2a" ]
gnu_non_native = [ "gnu++17" ]
gcc13_stds = [ "c++23" ]

result = []

Expand Down Expand Up @@ -59,6 +61,9 @@ def main(ctx):
result.append(linux_cxx("Ubuntu g++ ARM64" + cxx + " " + suite, "g++", packages="g++", buildtype="boost", image="cppalliance/droneubuntu2204:multiarch", arch="arm64", environment={'TOOLSET': 'gcc', 'COMPILER': 'g++', 'CXXSTD': cxx, 'TEST_SUITE': suite, }, globalenv=globalenv))
for cxx in gnu_non_native:
result.append(osx_cxx("M1 Clang " + cxx + " " + suite, "clang++", buildscript="drone", buildtype="boost", xcode_version="14.1", environment={'TOOLSET': 'clang', 'CXXSTD': cxx, 'TEST_SUITE': suite, 'DEFINE': 'BOOST_MATH_NO_REAL_CONCEPT_TESTS,BOOST_MATH_NO_LONG_DOUBLE_MATH_FUNCTIONS,BOOST_MATH_MULTI_ARCH_CI_RUN', }, globalenv=globalenv))
for suite in gcc13_things_to_test:
for cxx in gcc13_stds:
result.append(linux_cxx("Ubuntu g++-13 " + cxx + " " + suite, "g++-13", packages="g++-13", buildtype="boost", image="cppalliance/droneubuntu2304:1", environment={'TOOLSET': 'gcc', 'COMPILER': 'g++-13', 'CXXSTD': cxx, 'TEST_SUITE': suite, }, globalenv=globalenv))

return result

Expand Down
11 changes: 6 additions & 5 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ on:
branches:
- master
- develop
- feature/**
pull_request:
release:
types: [published, created, edited]
Expand Down Expand Up @@ -47,7 +48,7 @@ jobs:
if: steps.retry1.outcome=='failure'
run: sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
- name: Install packages
run: sudo apt install g++-12 clang-14 libgmp-dev libmpfr-dev libfftw3-dev
run: sudo apt-get install -y g++-12 clang-14 libgmp-dev libmpfr-dev libfftw3-dev
- name: Checkout main boost
run: git clone -b develop --depth 1 https://github.com/boostorg/boost.git ../boost-root
- name: Update tools/boostdep
Expand Down Expand Up @@ -105,7 +106,7 @@ jobs:
if: steps.retry1.outcome=='failure'
run: sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
- name: Install packages
run: sudo apt install g++-9 g++-11 clang-9 clang-10 libgmp-dev libmpfr-dev libfftw3-dev
run: sudo apt-get install -y g++-9 g++-11 clang-9 clang-10 libgmp-dev libmpfr-dev libfftw3-dev
- name: Checkout main boost
run: git clone -b develop --depth 1 https://github.com/boostorg/boost.git ../boost-root
- name: Update tools/boostdep
Expand Down Expand Up @@ -364,7 +365,7 @@ jobs:
if: steps.retry1.outcome=='failure'
run: sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
- name: Install packages
run: sudo apt install g++-10 libgmp-dev libmpfr-dev libfftw3-dev
run: sudo apt-get install -y g++-10 libgmp-dev libmpfr-dev libfftw3-dev
- name: Checkout main boost
run: git clone -b develop --depth 1 https://github.com/boostorg/boost.git ../boost-root
- name: Update tools/boostdep
Expand Down Expand Up @@ -404,7 +405,7 @@ jobs:
if: steps.retry1.outcome=='failure'
run: sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
- name: Install packages
run: sudo apt install clang-10 libgmp-dev libmpfr-dev libfftw3-dev
run: sudo apt-get install -y clang-10 libgmp-dev libmpfr-dev libfftw3-dev libtbb-dev
- name: Checkout main boost
run: git clone -b develop --depth 1 https://github.com/boostorg/boost.git ../boost-root
- name: Update tools/boostdep
Expand Down Expand Up @@ -448,7 +449,7 @@ jobs:
if: steps.retry1.outcome=='failure'
run: sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
- name: Install packages
run: sudo apt install g++-10 libgmp-dev libmpfr-dev libfftw3-dev
run: sudo apt-get install -y g++-10 libgmp-dev libmpfr-dev libfftw3-dev
- name: Checkout main boost
run: git clone -b develop --depth 1 https://github.com/boostorg/boost.git ../boost-root
- name: Update tools/boostdep
Expand Down
Binary file added doc/graphs/fourier_transform_daubechies.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
42 changes: 42 additions & 0 deletions doc/sf/daubechies.qbk
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,48 @@ The 2 vanishing moment scaling function.
[$../graphs/daubechies_8_scaling.svg]
The 8 vanishing moment scaling function.

Boost.Math also provides numerical evaluation of the Fourier transform of these functions.
This is useful in sparse recovery problems where the measurements are taken in the Fourier basis.
The usage is exhibited below:

#include <boost/math/special_functions/fourier_transform_daubechies_scaling.hpp>
using boost::math::fourier_transform_daubechies_scaling;
// Evaluate the Fourier transform of the 4-vanishing moment Daubechies scaling function at ω=1.8:
std::complex<float> hat_phi = fourier_transform_daubechies_scaling<float, 4>(1.8f);

The Fourier transform convention is unitary with the sign of the imaginary unit being given in Daubechies Ten Lectures.
In particular, this means that `fourier_transform_daubechies_scaling<float, p>(0.0)` returns 1/sqrt(2π).

The implementation computes an infinite product of trigonometric polynomials as can be found from recursive application of the identity 𝓕[φ](ω) = m(ω/2)𝓕[φ](ω/2).
This is neither particularly fast nor accurate, but there appears to be no literature on this extremely useful topic, and hence the naive method must suffice.

[$../graphs/fourier_transform_daubechies.png]

A benchmark can be found in `reporting/performance/fourier_transform_daubechies_performance.cpp`; the results on a ~2021 M1 Macbook pro are presented below:


Run on (10 X 24.1212 MHz CPU s)
CPU Caches:
L1 Data 64 KiB (x10)
L1 Instruction 128 KiB (x10)
L2 Unified 4096 KiB (x5)
Load Average: 1.33, 1.52, 1.62
-----------------------------------------------------------
Benchmark Time
-----------------------------------------------------------
FourierTransformDaubechiesScaling<double, 1> 70.3 ns
FourierTransformDaubechiesScaling<double, 2> 330 ns
FourierTransformDaubechiesScaling<double, 3> 335 ns
FourierTransformDaubechiesScaling<double, 4> 364 ns
FourierTransformDaubechiesScaling<double, 5> 386 ns
FourierTransformDaubechiesScaling<double, 6> 436 ns
FourierTransformDaubechiesScaling<double, 7> 447 ns
FourierTransformDaubechiesScaling<double, 8> 473 ns
FourierTransformDaubechiesScaling<double, 9> 503 ns
FourierTransformDaubechiesScaling<double, 10> 554 ns

Due to the low accuracy of this method, `float` precision is arg-promoted to `double`, and hence takes just as long as `double` precision to execute.

[heading References]

* Daubechies, Ingrid. ['Ten Lectures on Wavelets.] Vol. 61. Siam, 1992.
Expand Down
43 changes: 43 additions & 0 deletions example/calculate_fourier_transform_daubechies_constants.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
// (C) Copyright Nick Thompson 2023.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)

#include <utility>
#include <boost/math/filters/daubechies.hpp>
#include <boost/math/tools/polynomial.hpp>
#include <boost/multiprecision/cpp_bin_float.hpp>
#include <boost/math/constants/constants.hpp>

using std::pow;
using boost::multiprecision::cpp_bin_float_100;
using boost::math::filters::daubechies_scaling_filter;
using boost::math::tools::polynomial;
using boost::math::constants::half;
using boost::math::constants::root_two;

template<typename Real, size_t N>
std::vector<Real> get_constants() {
auto h = daubechies_scaling_filter<cpp_bin_float_100, N>();
auto p = polynomial<cpp_bin_float_100>(h.begin(), h.end());

auto q = polynomial({half<cpp_bin_float_100>(), half<cpp_bin_float_100>()});
q = pow(q, N);
auto l = p/q;
return l.data();
}

template<typename Real>
void print_constants(std::vector<Real> const & l) {
std::cout << std::setprecision(std::numeric_limits<Real>::digits10 -10);
std::cout << "return std::array<Real, " << l.size() << ">{";
for (size_t i = 0; i < l.size() - 1; ++i) {
std::cout << "BOOST_MATH_BIG_CONSTANT(Real, std::numeric_limits<Real>::digits, " << l[i]/root_two<Real>() << "), ";
}
std::cout << "BOOST_MATH_BIG_CONSTANT(Real, std::numeric_limits<Real>::digits, " << l.back()/root_two<Real>() << ")};\n";
}

int main() {
auto constants = get_constants<cpp_bin_float_100, 1>();
print_constants(constants);
}
60 changes: 60 additions & 0 deletions example/fourier_transform_daubechies_ulp_plot.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
// boost-no-inspect
// (C) Copyright Nick Thompson 2023.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)

#include <boost/math/special_functions/fourier_transform_daubechies.hpp>
#include <boost/math/tools/ulps_plot.hpp>

using boost::math::fourier_transform_daubechies_scaling;
using boost::math::tools::ulps_plot;

template<int p>
void real_part() {
auto phi_real_hi_acc = [](double omega) {
auto z = fourier_transform_daubechies_scaling<double, p>(omega);
return z.real();
};

auto phi_real_lo_acc = [](float omega) {
auto z = fourier_transform_daubechies_scaling<float, p>(omega);
return z.real();
};
auto plot = ulps_plot<decltype(phi_real_hi_acc), double, float>(phi_real_hi_acc, float(0.0), float(100.0), 20000);
plot.ulp_envelope(false);
plot.add_fn(phi_real_lo_acc);
plot.clip(100);
plot.title("Accuracy of 𝔑(𝓕[𝜙](ω)) with " + std::to_string(p) + " vanishing moments.");
plot.write("real_ft_daub_scaling_" + std::to_string(p) + ".svg");

}

template<int p>
void imaginary_part() {
auto phi_imag_hi_acc = [](double omega) {
auto z = fourier_transform_daubechies_scaling<double, p>(omega);
return z.imag();
};

auto phi_imag_lo_acc = [](float omega) {
auto z = fourier_transform_daubechies_scaling<float, p>(omega);
return z.imag();
};
auto plot = ulps_plot<decltype(phi_imag_hi_acc), double, float>(phi_imag_hi_acc, float(0.0), float(100.0), 20000);
plot.ulp_envelope(false);
plot.add_fn(phi_imag_lo_acc);
plot.clip(100);
plot.title("Accuracy of 𝕴(𝓕[𝜙](ω)) with " + std::to_string(p) + " vanishing moments.");
plot.write("imag_ft_daub_scaling_" + std::to_string(p) + ".svg");

}


int main() {
real_part<3>();
imaginary_part<3>();
real_part<6>();
imaginary_part<6>();
return 0;
}
16 changes: 16 additions & 0 deletions include/boost/math/concepts/real_concept.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@
# include <cstdio>
#endif

#if __has_include(<stdfloat>)
# include <stdfloat>
#endif

namespace boost{ namespace math{

namespace concepts
Expand Down Expand Up @@ -79,6 +83,12 @@ class real_concept
#ifdef BOOST_MATH_USE_FLOAT128
real_concept(BOOST_MATH_FLOAT128_TYPE c) : m_value(c){}
#endif
#ifdef __STDCPP_FLOAT32_T__
real_concept(std::float32_t c) : m_value(static_cast<real_concept_base_type>(c)){}
#endif
#ifdef __STDCPP_FLOAT64_T__
real_concept(std::float64_t c) : m_value(static_cast<real_concept_base_type>(c)){}
#endif

// Assignment:
real_concept& operator=(char c) { m_value = c; return *this; }
Expand All @@ -96,6 +106,12 @@ class real_concept
real_concept& operator=(float c) { m_value = c; return *this; }
real_concept& operator=(double c) { m_value = c; return *this; }
real_concept& operator=(long double c) { m_value = c; return *this; }
#ifdef __STDCPP_FLOAT32_T__
real_concept& operator=(std::float32_t c) { m_value = c; return *this; }
#endif
#ifdef __STDCPP_FLOAT64_T__
real_concept& operator=(std::float64_t c) { m_value = c; return *this; }
#endif

// Access:
real_concept_base_type value()const{ return m_value; }
Expand Down
2 changes: 1 addition & 1 deletion include/boost/math/cstdfloat/cstdfloat_limits.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#pragma GCC system_header
#endif

#if defined(BOOST_CSTDFLOAT_HAS_INTERNAL_FLOAT128_T) && defined(BOOST_MATH_USE_FLOAT128) && !defined(BOOST_CSTDFLOAT_NO_LIBQUADMATH_SUPPORT)
#if defined(BOOST_CSTDFLOAT_HAS_INTERNAL_FLOAT128_T) && defined(BOOST_MATH_USE_FLOAT128) && !defined(BOOST_CSTDFLOAT_NO_LIBQUADMATH_SUPPORT) && (!defined(__GNUC__) || (defined(__GNUC__) && __GNUC__ < 14))

#include <limits>
#include <boost/math/tools/nothrow.hpp>
Expand Down
2 changes: 1 addition & 1 deletion include/boost/math/differentiation/autodiff.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1491,7 +1491,7 @@ fvar<RealType, Order> sqrt(fvar<RealType, Order> const& cr) {
BOOST_IF_CONSTEXPR (order == 0)
return fvar<RealType, Order>(*derivatives);
else {
root_type numerator = 0.5;
root_type numerator = root_type(0.5);
root_type powers = 1;
#ifndef BOOST_NO_CXX17_IF_CONSTEXPR
derivatives[1] = numerator / *derivatives;
Expand Down
4 changes: 2 additions & 2 deletions include/boost/math/differentiation/finite_difference.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ namespace detail {

const Real eps = (numeric_limits<Real>::epsilon)();
// Error bound ~eps^4/5
Real h = pow(11.25*eps, static_cast<Real>(1) / static_cast<Real>(5));
Real h = pow(Real(11.25)*eps, static_cast<Real>(1) / static_cast<Real>(5));
h = detail::make_xph_representable(x, h);
Real ymth = f(x - 2 * h);
Real yth = f(x + 2 * h);
Expand Down Expand Up @@ -222,7 +222,7 @@ namespace detail {
// Mathematica code to get the error:
// Series[(f[x+h]-f[x-h])*(4/5) + (1/5)*(f[x-2*h] - f[x+2*h]) + (4/105)*(f[x+3*h] - f[x-3*h]) + (1/280)*(f[x-4*h] - f[x+4*h]), {h, 0, 9}]
// If we used Kahan summation, we could get the max error down to h^8|f^(9)(x)|/630 + |f(x)|eps/h.
Real h = pow(551.25*eps, static_cast<Real>(1) / static_cast<Real>(9));
Real h = pow(Real(551.25)*eps, static_cast<Real>(1) / static_cast<Real>(9));
h = detail::make_xph_representable(x, h);

Real yh = f(x + h);
Expand Down
1 change: 1 addition & 0 deletions include/boost/math/differentiation/lanczos_smoothing.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <limits> // to nan initialize
#include <vector>
#include <string>
#include <cstdint>
#include <stdexcept>
#include <type_traits>
#include <boost/math/tools/assert.hpp>
Expand Down
Loading