![]() | Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Kyle T. Mandli |
from __future__ import print_function
%matplotlib inline
import warnings
import matplotlib.pyplot as plt
import numpyA more practical example: The Error function¶
The error function:
is related to the cumuluative probability distribution of a Gaussian. And is useful in probability and PDE’s. But has no closed form solution.
from scipy.special import erf
x = numpy.linspace(0.0, 3.0, 100)
fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
axes.plot(
x, 2 / numpy.sqrt(numpy.pi) * numpy.exp(-(x**2)), label="Gaussian", linewidth=2
)
axes.plot(x, erf(x), label="erf(x)", linewidth=2)
axes.grid()
axes.set_xlabel("x")
axes.legend(loc="best")
plt.show()Solution to even simple non-linear ordinary differential equations
with initial condition
Can always be solved by “reduction to quadrature” i.e. the solution is given implicitly by
which are two, definite integrals that may, or may not have closed forms (but are essentially “the area under the curve”)
Solution to ordinary differential equations
Defining then leads to
which can be solved by integration
Solving partial differential equations such as “Poisson’s equation”
using the finite element method, will use quadrature to reduce the PDE to a linear algebraic problem
![]() |
Solution of poisson’s equation on an irregular domain using TerraFERMA
Basics of Quadrature¶
We want to approximate an integral with some approximation such that
where the are the quadrature points or nodes and the are the weights. Usually a particular quadrature rule specifies the points resulting in a particular set of weights .
Convergence requires that
Riemann Sums¶
Given and a partition of the interval with and and we define the Riemann integral as
This is a general definition and leads to a number of quadrature approaches that depend on how we pick .
Example: Integrate using midpoint rule¶
Calculate and illustrate the midpoint rule. Note that we are computing the cummulative integral here:
# Note that this calculates the cumulative integral from 0.0
f = lambda x: numpy.sin(x)
I = lambda x: 1.0 - numpy.cos(x)
x = numpy.linspace(0.0, 2.0 * numpy.pi, 100)
num_partitions = 10
x_hat = numpy.linspace(0.0, 2.0 * numpy.pi, num_partitions + 1)
x_star = 0.5 * (x_hat[1:] + x_hat[:-1])
delta_x = x_hat[1] - x_hat[0]fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(hspace=0.5)
axes = fig.add_subplot(2, 1, 1)
axes.plot(x, numpy.zeros(x.shape), "k--")
axes.plot(x, f(x), "b")
for i in range(num_partitions):
axes.plot([x_hat[i], x_hat[i]], [0.0, f(x_star[i])], "k--")
axes.plot([x_hat[i + 1], x_hat[i + 1]], [0.0, f(x_star[i])], "k--")
axes.plot([x_hat[i], x_hat[i + 1]], [f(x_star[i]), f(x_star[i])], "k--")
axes.set_xlabel("x")
axes.set_ylabel("$f(x)$")
axes.set_title("Partition and $f(x)$")
axes.set_xlim((0.0, 2.0 * numpy.pi))
axes.set_ylim((-1.1, 1.1))
I_hat = numpy.zeros(num_partitions)
I_hat[0] = f(x_star[0]) * delta_x
for i in range(1, num_partitions):
I_hat[i] = I_hat[i - 1] + f(x_star[i]) * delta_x
axes = fig.add_subplot(2, 1, 2)
axes.plot(x, I(x), "r")
# Offset due to indexing above
axes.plot(x_star + delta_x / 2.0, I_hat, "ko")
err = numpy.abs(I(x_hat[1:]) - I_hat)
axes.set_xlabel("x")
axes.set_ylabel("$f(x)$")
axes.set_title("Integral and Approximated Integral, Max Err = {}".format(err.max()))
axes.set_xlim((0.0, 2.0 * numpy.pi))
axes.set_ylim((-0.1, 2.5))
axes.grid()
plt.show()General Newton-Cotes Quadrature¶
Using equally spaced points on a single interval , evaluate at these points and exactly integrate the interpolating polynomial:
func = lambda x: 1 - x**2 + numpy.sin(5 * x)
x0 = 0.0
x1 = 0.5
x_buf = 0.2
x = numpy.linspace(x0 - x_buf, x1 + x_buf, 100)
x_interval = numpy.linspace(x0, x1, 100)
x_samp = numpy.array([0.5 * (x0 + x1)])
f_samp = func(x_samp)
fig = plt.figure(figsize=(15, 5))
axes = fig.add_subplot(1, 3, 1)
axes.plot(x, func(x), linewidth=3)
axes.plot(x_samp, f_samp, "ro")
axes.plot(x_interval, f_samp * numpy.ones(x_interval.shape), "k--")
axes.plot(x0 * numpy.ones(2), [0.0, f_samp[0]], "k--")
axes.plot(x1 * numpy.ones(2), [0.0, f_samp[0]], "k--")
axes.plot(x0 * numpy.ones(2), [0.0, func(x0)], "b")
axes.plot(x1 * numpy.ones(2), [0.0, func(x1)], "b")
axes.plot(x, numpy.zeros(x.shape), "k")
axes.text(x0 - 0.02, -0.2, "$x_i$", fontsize=16)
axes.text(x1 - 0.02, -0.2, "$x_{i+1}$", fontsize=16)
axes.set_title("Midpoint Rule", fontsize=16)
axes.set_xlabel("x", fontsize=16)
axes.set_ylabel("f(x)", fontsize=16)
axes.set_ylim((-0.25, 2.0))
axes.fill_between(x_interval, 0.0, func(x_interval))
axes.fill_between(
x_interval, 0, f_samp * numpy.ones(x_interval.shape), hatch="/", alpha=0
)
axes.grid()
axes = fig.add_subplot(1, 3, 2)
x_samp = numpy.array([x0, x1])
f_samp = func(x_samp)
axes.plot(x, func(x), linewidth=3)
axes.plot(x_samp, f_samp, "ro")
axes.plot(x_samp, f_samp, "k--")
axes.plot(x0 * numpy.ones(2), [0.0, f_samp[0]], "k--")
axes.plot(x1 * numpy.ones(2), [0.0, f_samp[1]], "k--")
axes.plot(x0 * numpy.ones(2), [0.0, func(x0)], "b")
axes.plot(x1 * numpy.ones(2), [0.0, func(x1)], "b")
axes.plot(x, numpy.zeros(x.shape), "k")
axes.text(x0 - 0.02, -0.2, "$x_i$", fontsize=16)
axes.text(x1 - 0.02, -0.2, "$x_{i+1}$", fontsize=16)
axes.set_title("Trapezoidal Rule", fontsize=16)
axes.set_xlabel("x", fontsize=16)
axes.set_ylabel("f(x)", fontsize=16)
axes.set_ylim((-0.25, 2.0))
axes.fill_between(x_interval, 0.0, func(x_interval))
axes.fill_between(x_samp, 0.0, f_samp, hatch="/", alpha=0)
axes.grid()
axes = fig.add_subplot(1, 3, 3)
x_samp = numpy.array([x0, (x0 + x1) / 2.0, x1])
f_samp = func(x_samp)
p = numpy.polyfit(x_samp, f_samp, 2)
f_p2 = numpy.polyval(p, x_interval)
axes.plot(x, func(x), linewidth=3)
axes.plot(x_samp, f_samp, "ro")
axes.plot(x_interval, f_p2, "k--")
axes.plot(x0 * numpy.ones(2), [0.0, f_samp[0]], "k--")
axes.plot(x1 * numpy.ones(2), [0.0, f_samp[-1]], "k--")
axes.plot(x0 * numpy.ones(2), [0.0, func(x0)], "b")
axes.plot(x1 * numpy.ones(2), [0.0, func(x1)], "b")
axes.plot(x, numpy.zeros(x.shape), "k")
axes.text(x0 - 0.02, -0.2, "$x_i$", fontsize=16)
axes.text(x1 - 0.02, -0.2, "$x_{i+1}$", fontsize=16)
axes.set_title("Simpsons Rule", fontsize=16)
axes.set_xlabel("x", fontsize=16)
axes.set_ylabel("f(x)", fontsize=16)
axes.set_ylim((-0.25, 2.0))
axes.fill_between(x_interval, 0.0, func(x_interval))
axes.fill_between(x_interval, 0.0, f_p2, hatch="/", alpha=0)
axes.grid()
plt.show()Trapezoidal Rule¶
Use polynomial to derive the trapezoidal rule.
Trapezoidal rule uses order polynomials between each point (i.e. piece-wise defined linear polynomials). Using the Linear Lagrange basis we can write the interpolating polynomial as
where is the width of the interval.
Integrating this polynomial we have for a single interval
\begin{aligned} I_T[f] &= \int^{x_{i+1}}{x_i} f(x_i)\ell_0(x) + f(x{i+1})\ell_1(x) dx \ &=\frac{1}{\Delta x} \left. \left[ f(x_i)\left(x_{i+1}x - \frac{x^2}{2}\right) + f(x_{i+1})\left(\frac{x^2}{2}-xx_i\right) \right] \right|{x_i}^{x{i+1}} \ &= \frac{1}{\Delta x} \left[ f(x_i)\left(\frac{x_{i+1}^2}{2} -x_ix_{i+1} + \frac{x_i^2}{2}\right) + f(x_{i+1})\left(\frac{x_{i+1}^2}{2} -x_ix_{i+1} + \frac{x_i^2}{2}\right) \right]\ &= \frac{(x_{i+1} - x_i)^2}{2\Delta x}\left[ f(x_i) + f(x_{i+1})\right]\ & = \frac{\Delta x}{2}\left[ f(x_i) + f(x_{i+1}) \right] \ & = \Delta x\left[ \frac{1}{2} f(x_i) + \frac{1}{2} f(x_{i+1})\right] \end{aligned}
Transformation of the interval¶
For quadrature, it is often more convenient to transform the interval from to the unit interval using the transformation
where is just the fractional distance across the element and .
As before, in this frame
Under this transformation, the original integral
Becomes much simpler
or
Extended Trapezoidal rule¶
If we now panel a larger interval with panels of size , we can calculate the Extended Trapezoidal rule, by summing the individual contributions of each Trapezoidal panel.
We can also simplify the sum over all the intervals by noting that all but the end points will have total contribution of to the entire sum such that
# Note that this calculates the cumulative integral from 0.0
f = lambda x: numpy.sin(x)
I = lambda x: 1.0 - numpy.cos(x)
x = numpy.linspace(0.0, 2.0 * numpy.pi, 100)
num_partitions = 10
x_hat = numpy.linspace(0.0, 2.0 * numpy.pi, num_partitions + 1)
delta_x = x_hat[1] - x_hat[0]fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(hspace=0.5)
axes = fig.add_subplot(2, 1, 1)
axes.plot(x, numpy.zeros(x.shape), "k--")
axes.plot(x, f(x), "b")
for i in range(num_partitions):
axes.plot([x_hat[i], x_hat[i]], [0.0, f(x_hat[i])], "k--")
axes.plot([x_hat[i + 1], x_hat[i + 1]], [0.0, f(x_hat[i + 1])], "k--")
axes.plot([x_hat[i], x_hat[i + 1]], [f(x_hat[i]), f(x_hat[i + 1])], "k--")
axes.set_xlabel("x")
axes.set_ylabel("$f(x)$")
axes.set_title("Partition and $f(x)$")
axes.set_xlim((0.0, 2.0 * numpy.pi))
axes.set_ylim((-1.1, 1.1))
I_hat = numpy.zeros(num_partitions)
I_hat[0] = (f(x_hat[1]) + f(x_hat[0])) * delta_x / 2.0
for i in range(1, num_partitions):
I_hat[i] = I_hat[i - 1] + (f(x_hat[i + 1]) + f(x_hat[i])) * delta_x / 2.0
err = numpy.abs(I(x_hat[1:]) - I_hat)
axes = fig.add_subplot(2, 1, 2)
axes.plot(x, I(x), "r")
# Offset due to indexing above
axes.plot(x_hat[1:], I_hat, "ko")
axes.set_xlabel("x")
axes.set_ylabel("$f(x)$")
axes.set_title("Integral and Approximated Integral, Max Err = {}".format(err.max()))
axes.set_xlim((0.0, 2.0 * numpy.pi))
axes.set_ylim((-0.1, 2.5))
axes.grid()
plt.show()Simpson’s Rule¶
Simpson’s rule uses order polynomials between each point (i.e. piece-wise defined quadratic polynomials at points where ).
The polynomial in the Lagrange basis has the form
Which we can just integrate to find
For this problem, transformation of the interval makes this much easier, and the Quadrature rule becomes
after a bit of work we get
Note: Like all quadrature rules, the integral over a single interval of size is always
Derivation of Newton Cotes formulas using the method of undetermined coefficients.¶
Use the general form of the quadrature rule and determine weights by using functions we know the solution to. These functions can be any representation of polynomials up to order however the monomials 1, , are the easiest in this case.
What are the corresponding systems of equations?
We then have the system of equations:
or in Matrix-vector form
or after some manipulation (and elimination) gives
Leading to
Another way to write Simpson’s rule is to use intervals of three points (similar to one of the ways we did this last time). The formulation here effectively has a half of what the intervals show but is easier to program.
# Note that this calculates the cumulative integral from 0.0
f = lambda x: numpy.sin(x)
I = lambda x: 1.0 - numpy.cos(x)
x = numpy.linspace(0.0, 2.0 * numpy.pi, 100)
num_partitions = 10
x_hat = numpy.linspace(0.0, 2.0 * numpy.pi, num_partitions + 1)
delta_x = x_hat[1] - x_hat[0]fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(hspace=0.5)
axes = fig.add_subplot(2, 1, 1)
axes.plot(x, numpy.zeros(x.shape), "k--")
axes.plot(x, f(x), "b")
for i in range(num_partitions):
axes.plot([x_hat[i], x_hat[i]], [0.0, f(x_hat[i])], "k--")
axes.plot([x_hat[i + 1], x_hat[i + 1]], [0.0, f(x_hat[i + 1])], "k--")
coeff = numpy.polyfit(
(x_hat[i], x_hat[i] + delta_x / 2.0, x_hat[i + 1]),
(f(x_hat[i]), f(x_hat[i] + delta_x / 2.0), f(x_hat[i + 1])),
2,
)
x_star = numpy.linspace(x_hat[i], x_hat[i + 1], 10)
axes.plot(x_star, numpy.polyval(coeff, x_star), "k--")
axes.set_xlabel("x")
axes.set_ylabel("$f(x)$")
axes.set_title("Partition and $f(x)$")
axes.set_xlim((0.0, 2.0 * numpy.pi))
axes.set_ylim((-1.1, 1.1))
# axes.grid()
I_hat = numpy.zeros(num_partitions)
I_hat[0] = delta_x * (
1.0 / 6.0 * (f(x_hat[0]) + f(x_hat[1])) + 2.0 / 3.0 * f(x_hat[0] + delta_x / 2.0)
)
for i in range(1, num_partitions):
I_hat[i] = I_hat[i - 1] + delta_x * (
1.0 / 6.0 * (f(x_hat[i]) + f(x_hat[i + 1]))
+ 2.0 / 3.0 * f(x_hat[i] + delta_x / 2.0)
)
err = numpy.abs(I(x_hat[1:]) - I_hat)
axes = fig.add_subplot(2, 1, 2)
axes.plot(x, I(x), "r")
# Offset due to indexing above
axes.plot(x_hat[1:], I_hat, "ko")
axes.set_xlabel("x")
axes.set_ylabel("$f(x)$")
axes.set_title("Integral and Approximated Integral, Max Err = {}".format(err.max()))
axes.set_xlim((0.0, 2.0 * numpy.pi))
axes.set_ylim((-0.1, 2.5))
axes.grid()
plt.show()Error Analysis¶
From before we have a particular -point quadrature scheme for a single interval which we can also write as
Define the error such that
The degree of is the integer such that and such that . In other words, it is the maximum degree polynomial that is integrated exactly using . As we will show
Mid-point rule: p=1 -- exact for all linear polynomials
Trapezoidal rule: p=1 -- also exact for all linear polynomials
Simpson’s rule: p=3 -- exact for all cubic polynomials
There are multiple (related) ways to estimate Quadrature error that either use the Taylor series or the Lagrange Remainder theorem.
Mid-Point error¶
For a 1-Point quadrature rule like the Mid-point rule, Taylor’s theorem is easiest to use as we can just expand around the midpoint such that
where is the midpoint of the interval
Integrating over one element Gives
With a bit of work (or some clear thinking) you can show that the the second term on the RHS is exactly zero and that the leading order error term comes from the third-term which evaluates to
As the mid-point rule is exact for degree 1 polynomials (i.e. straight lines)
Error Estimates using Polynomial interpolation¶
We can also use our polynomial analysis to analyze errors. From Lagrange’s theorem we have the remainder term as before which we can use to look at the error
and integrate it to find the form and magnitude of the error on a single interval.
Note:
The single-interval error for the Trapezoidal rule is of the same order as the Mid-point rule
Surprisingly, the magnitude of error for the Trapezoidal rule is twice that of the Mid-point rule!
Simpson’s Rule Error¶
We could apply the same approach to Simpson’s rule using the remainder term for quadratic functions
However we would actually find that this term cancels exactly (in the same way that the mid-point scheme has a cancellation in the Taylor series expansion). A more detailed Taylor’s series analysis shows that the leading error term for Simpson’s rule is
which is exact for cubic polynomials (). Interestingly we have gained two orders of accuracy by increasing the polynomial order by only 1!
Review: Newton-Cotes Formulas and errors for a single interval¶
given and , all of the Newton-Cotes quadrature formula’s can be derived by integrating the interpolating polynomial through equi-spaced points across the interval.
Mid-point: 1-point quadrature rule
Trapezoidal Rule: 2-point quadrature rule
Simpson’s Rule: 3-point quadrature rule
func = lambda x: 1 - x**2 + numpy.sin(5 * x)
x0 = 0.0
x1 = 0.5
x_buf = 0.2
x = numpy.linspace(x0 - x_buf, x1 + x_buf, 100)
x_interval = numpy.linspace(x0, x1, 100)
x_samp = numpy.array([0.5 * (x0 + x1)])
f_samp = func(x_samp)
fig = plt.figure(figsize=(15, 5))
axes = fig.add_subplot(1, 3, 1)
axes.plot(x, func(x), linewidth=3)
axes.plot(x_samp, f_samp, "ro")
axes.plot(x_interval, f_samp * numpy.ones(x_interval.shape), "k--")
axes.plot(x0 * numpy.ones(2), [0.0, f_samp[0]], "k--")
axes.plot(x1 * numpy.ones(2), [0.0, f_samp[0]], "k--")
axes.plot(x0 * numpy.ones(2), [0.0, func(x0)], "b")
axes.plot(x1 * numpy.ones(2), [0.0, func(x1)], "b")
axes.plot(x, numpy.zeros(x.shape), "k")
axes.text(x0 - 0.02, -0.2, "$x_i$", fontsize=16)
axes.text(x1 - 0.02, -0.2, "$x_{i+1}$", fontsize=16)
axes.set_title("Midpoint Rule", fontsize=16)
axes.set_xlabel("x", fontsize=16)
axes.set_ylabel("f(x)", fontsize=16)
axes.set_ylim((-0.25, 2.0))
axes.fill_between(x_interval, 0.0, func(x_interval))
axes.fill_between(
x_interval, 0, f_samp * numpy.ones(x_interval.shape), hatch="/", alpha=0
)
axes.grid()
axes = fig.add_subplot(1, 3, 2)
x_samp = numpy.array([x0, x1])
f_samp = func(x_samp)
axes.plot(x, func(x), linewidth=3)
axes.plot(x_samp, f_samp, "ro")
axes.plot(x_samp, f_samp, "k--")
axes.plot(x0 * numpy.ones(2), [0.0, f_samp[0]], "k--")
axes.plot(x1 * numpy.ones(2), [0.0, f_samp[1]], "k--")
axes.plot(x0 * numpy.ones(2), [0.0, func(x0)], "b")
axes.plot(x1 * numpy.ones(2), [0.0, func(x1)], "b")
axes.plot(x, numpy.zeros(x.shape), "k")
axes.text(x0 - 0.02, -0.2, "$x_i$", fontsize=16)
axes.text(x1 - 0.02, -0.2, "$x_{i+1}$", fontsize=16)
axes.set_title("Trapezoidal Rule", fontsize=16)
axes.set_xlabel("x", fontsize=16)
axes.set_ylabel("f(x)", fontsize=16)
axes.set_ylim((-0.25, 2.0))
axes.fill_between(x_interval, 0.0, func(x_interval))
axes.fill_between(x_samp, 0.0, f_samp, hatch="/", alpha=0)
axes.grid()
axes = fig.add_subplot(1, 3, 3)
x_samp = numpy.array([x0, (x0 + x1) / 2.0, x1])
f_samp = func(x_samp)
p = numpy.polyfit(x_samp, f_samp, 2)
f_p2 = numpy.polyval(p, x_interval)
axes.plot(x, func(x), linewidth=3)
axes.plot(x_samp, f_samp, "ro")
axes.plot(x_interval, f_p2, "k--")
axes.plot(x0 * numpy.ones(2), [0.0, f_samp[0]], "k--")
axes.plot(x1 * numpy.ones(2), [0.0, f_samp[-1]], "k--")
axes.plot(x0 * numpy.ones(2), [0.0, func(x0)], "b")
axes.plot(x1 * numpy.ones(2), [0.0, func(x1)], "b")
axes.plot(x, numpy.zeros(x.shape), "k")
axes.text(x0 - 0.02, -0.2, "$x_i$", fontsize=16)
axes.text(x1 - 0.02, -0.2, "$x_{i+1}$", fontsize=16)
axes.set_title("Simpsons Rule", fontsize=16)
axes.set_xlabel("x", fontsize=16)
axes.set_ylabel("f(x)", fontsize=16)
axes.set_ylim((-0.25, 2.0))
axes.fill_between(x_interval, 0.0, func(x_interval))
axes.fill_between(x_interval, 0.0, f_p2, hatch="/", alpha=0)
axes.grid()
plt.show()Example 1:¶
Given . Let’s consider the relative accuracy of midpoint, trapezoidal and simpson’s rules for a single interval .
Calculate the relative error for each of these estimates¶
err = numpy.abs(2 / numpy.pi - 2 / 3)
errError in extended Newton-Cotes formulas¶
To find the total error we must sum the error over all the intervals:
as we defined before.
Extended Midpoint (and Trapezoidal) rule error¶
If we sum up across all the intervals the total error for the mid-point rule is
or noting that we can write this a bit more physically as
such that expression in the brackets is the mean value of the second derivative over the interval . This also shows that the extended trapezoidal rule converges quadratically as (or ).
In general, for panels of any given Newton-Cotes quadrature formula over the interval , the approximate integral and error formulas are
where
Convergence of Extended N-C quadrature¶
Here we plot the relative error for various extended Newton-Cotes quadrature formulas for the test case
# Compute the error as a function of delta_x for each method
f = lambda x: numpy.sin(numpy.pi * x)
num_partitions = numpy.array([2**n for n in range(0, 16)])
delta_x = numpy.empty(len(num_partitions))
error_mid = numpy.empty(len(num_partitions))
error_trap = numpy.empty(len(num_partitions))
error_simpson = numpy.empty(len(num_partitions))
I_true = 2.0 / numpy.pi
for j, N in enumerate(num_partitions):
x_hat = numpy.linspace(0.0, 1.0, N + 1)
delta_x[j] = x_hat[1] - x_hat[0]
# Compute Midpoint
x_star = 0.5 * (x_hat[1:] + x_hat[:-1])
I_hat = 0.0
for i in range(0, N):
I_hat += f(x_star[i]) * delta_x[j]
error_mid[j] = numpy.abs(I_hat - I_true) / I_true
# Compute trapezoid
I_hat = 0.0
for i in range(1, N):
I_hat += (f(x_hat[i + 1]) + f(x_hat[i])) * delta_x[j] / 2.0
error_trap[j] = numpy.abs(I_hat - I_true) / I_true
# Compute simpson's
I_hat = 0.0
for i in range(0, N):
I_hat += delta_x[j] * (
1.0 / 6.0 * (f(x_hat[i]) + f(x_hat[i + 1]))
+ 2.0 / 3.0 * f(x_hat[i] + delta_x[j] / 2.0)
)
error_simpson[j] = numpy.abs(I_hat - I_true) / I_true
fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
order_C = lambda delta_x, error, order: numpy.exp(
numpy.log(error) - order * numpy.log(delta_x)
)
axes.loglog(delta_x, error_mid, "ro", label="Midpoint")
axes.loglog(delta_x, error_trap, "bo", label="Trapezoid")
axes.loglog(delta_x, error_simpson, "go", label="Simpson's")
axes.loglog(
delta_x,
order_C(delta_x[0], error_trap[0], 2.0) * delta_x**2.0,
"b--",
label="2nd Order",
)
axes.loglog(
delta_x,
order_C(delta_x[0], error_simpson[0], 4.0) * delta_x**4.0,
"g--",
label="4th Order",
)
axes.legend(loc="best")
axes.set_xlabel("$\Delta x$", fontsize=16)
axes.set_ylabel("Relative Error", fontsize=16)
axes.grid()
plt.show()Recursive Improvement of Accuracy¶
Say we ran the extended trapezoidal rule with step size , we then will have
Now compare the two rules for and :
Here we see we can actually reuse the work we did to calculate to refine the integral.
Arbitrary Intervals (Affine Transforms)¶
A lot of quadrature rules are defined on specific intervals, e.g or . However any two intervals can always be mapped onto each other through an affine transform or affine map which is a linear transformation.
where is a shift and is a scaling.
Given and , the inverse map is simply
Example:¶
Map
fig = plt.figure(figsize=(5, 5))
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, func(x), linewidth=3)
axes.fill_between(x_interval, 0.0, func(x_interval))
axes.plot(x, numpy.zeros(x.shape), "k")
axes.text(x0 - 0.02, -0.2, "$a$", fontsize=16)
axes.text((x0 + x1) / 2.0 - 0.02, -0.2, "$x$", fontsize=16)
axes.text(x1 - 0.02, -0.2, "$b$", fontsize=16)
axes.set_xlabel("x", fontsize=16)
axes.set_ylabel("f(x)", fontsize=16)
axes.text(x0 - 0.06, -0.4, "$-1$", fontsize=16)
axes.text((x0 + x1) / 2.0 - 0.02, -0.4, "$\\xi$", fontsize=16)
axes.text(x1 - 0.02, -0.4, "$1$", fontsize=16)
axes.set_ylim((-0.5, 2.0))
axes.grid()Given an Affine mapping, it is easy to transform any integral from one coordinate system to another
and the point quadrature rule for a single interval is
where
is the point quadrature rule on the interval
and the cumulative integral over panels
Example: Newton-Cotes Rules¶
We can rewrite our previous quadrature rules so that they are given on the interval instead of . Recall that a general quadrature rule can be written as
where are the weights and are the points specified to evaluate the function at.
For Newton-Cotes rules we know that the points are uniformly distributed on but we still need to define the weights. For a 2-Point trapezoid rule we can do this to find that
so the quadrature points are at with weights
Note that if we map this using our affine transform we would get back the original trapezoid rule:
Similarly for Simpson’s rule we have
def ncintegration(f, a, b, N):
"""Approximate \int_a^b f(x)dx using Newton-Cotes quadrature rules with N<=3 quadrature points"""
assert N > 0
assert N < 4
# Build up a nested list of integration points and weights for the interval [-1, 1]:
xNC = []
wNC = []
# 1 point Newton-Cotes (Mid-point)
xNC.append(numpy.array([0.0]))
wNC.append(numpy.array([2.0]))
# 2 point Newton-Cotes quadrature (Trapezoidal)
xNC.append(numpy.array([-1.0, 1.0]))
wNC.append(numpy.array([1.0, 1.0]))
# 3 point Newton-Cotes quadrature (Simpson's rule)
xNC.append(numpy.array([-1.0, 0.0, 1.0]))
wNC.append(numpy.array([1.0, 4.0, 1.0]) / 3.0)
# make affine map between x and xi
xi_map = lambda xi: (b - a) / 2.0 * xi + (a + b) / 2.0
I = (b - a) / 2.0 * wNC[N - 1].dot(f(xi_map(xNC[N - 1])))
return If = lambda x: numpy.sin(x)
I = lambda x: -numpy.cos(x)
a = numpy.pi / 4.0
b = 3 * numpy.pi / 4.0
# b = numpy.pi/2
I_true = I(b) - I(a)
print(I_true)x = numpy.linspace(0.0, 2.0 * numpy.pi, 100)
x_interval = numpy.linspace(a, b, 100)
fig = plt.figure(figsize=(5, 5))
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, f(x), linewidth=3)
axes.fill_between(x_interval, 0.0, f(x_interval))
axes.plot(x, numpy.zeros(x.shape), "k")
axes.grid()# Calculate Newton-Cotes Quadrature and relative errors for 1,2, and 3 point NC quadrature schemes
for N in range(1, 4):
I = ncintegration(f, a, b, N)
err = numpy.abs(I - I_true) / numpy.abs(I_true)
print("N = {}, I_N = {}, err = {}".format(N, I, err))Optimal Quadrature Methods¶
Newton-Cotes formulas assume a fixed, regularly spaced set of quadrature points and calculate quadrature rules based on integrating the interpolating polynomial.
Can we do better?
Actually, if we allow both the points and weights to be chosen in an optimal way, we can generate optimal quadrature rules with points that can exactly integrate polynomials up to order
A Naive argument for for an point quadrature scheme¶
Given an point quadrature scheme
If we allow both and to be variables there are unknowns and we can use the method of undetermined coefficients to generate non-linear equations that exactly integrate all monomials up to degree .
Example: 2-Point Gauss-Legendre Quadrature¶
Consider on
using the method of undetermined coefficients we can solve for the four unknowns that exactly integrate the first 4 monomials, .
Or as a system of non-linear equations
Note that we need to solve for 4 unknowns , , , and . If we guess by symmetry, it will just fall out that and
However, there turns out to be some much deeper math based on orthogonal polynomials that can generate both weights and quadrature points in a much more rigorous and well defined manner.
f = lambda x: x**3 - 3 * x**2 + 2 * x - 1.0
If = lambda x: x**4 / 4 - x**3 + x**2 - xx = numpy.linspace(-1, 1, 100)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xi = numpy.array([-numpy.sqrt(1.0 / 3.0), numpy.sqrt(1.0 / 3.0)])
ax.plot(x, f(x), "b", linewidth=4)
ax.plot(xi, f(xi), "bo", markersize=10)
ax.grid()
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_ylim(-7, 0)
ax.fill_between(x, 0.0, f(x), hatch="/", alpha=0.2)
ax.set_title("$f(x) = x^3-3x^2+2x-1$, $\int_{-1}^{1}f = -4$")
plt.show()print("Exact integral = {}".format(If(1.0) - If(-1.0)))
xi = numpy.sqrt(1.0 / 3)
print("Gaussian 2-pt Quadrature = {}".format(f(-xi) + f(xi)))Generalized Gaussian Quadrature¶
As it turns out, both the weights and quadrature points for optimal Gaussian quadrature can be derived from the properties of families of orthogonal polynomials such as Chebyshev or Legendre polynomials.
In general, all orthogonal polynomials form a family of polynomials of all orders with the property that
where , are the order of the polynomial and is a weighting function.
We say that the polynomials are orthogonal to each other under a weighted inner product. And because they are orthogonal, they are automatically linearly independent and can form a basis for the space of all polynomials up to degree .
Recall something similar for vectors with the standard inner product (dot product)
If then the vectors and are orthogonal.
Example: The Legendre Polynomials¶
The Legendre polynomials form one family that are orthogonal with over the interval .
The first few Legendre Polynomials are
from scipy.special import legendre
x = numpy.linspace(-1, 1, 100)
fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
for N in range(5):
Pn = legendre(N)
axes.plot(x, Pn(x), label="N={}".format(N))
axes.grid()
axes.set_title("Legendre Polynomials", fontsize=18)
axes.set_xlabel("x", fontsize=16)
axes.legend(loc="best")
plt.show()Big idea¶
Let be an orthogonal polynomial of degree with weighting function on some interval it follows that
i.e. is orthogonal to all monomials () with respect to the weight function .
This follows from the fact that the orthogonal polynomials form a basis and all monomials , can be written as a linear combination of for all .
Moreover, any general polynomial up to order can be written as
where is a quotient polynomial of degree and a remainder polynomial also of order strictly . (Remember synthetic division?)
and given are the roots of
Therefore we have a relationship between the weighted integral of and a discrete quadrature scheme with undetermined weights . All that remains is to find a set of weights that integrate all polynomials up to the order of exactly.
Given that we know the quadrature points as the roots of the Legendre polynomials, we could compute the weights by the method of undetermined coefficients.
Alternatively, we can use the lagrange basis sampled at the roots of to expand exactly as
and therefore $$
$$
or
However, with a bit of work, the weights can can be calculated as functions of (This is the tricky part) For a proof and considerably more detail see Gaussian quadrature.
But given a formula for the weights the general quadrature scheme becomes
Choosing the correct weighting function and basis leads to a number of useful quadrature approaches:
Gauss-Legendre Quadrature¶
General Gauss-Legendre quadrature uses and the Legendre Polynomials, which can be shown to have weights
and is the th root of .
from scipy.special import legendre
x = numpy.linspace(-1, 1, 100)
fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
for N in range(5):
Pn = legendre(N)
axes.plot(x, Pn(x), label="N={}".format(N))
axes.grid()
axes.set_title("Legendre Polynomials", fontsize=18)
axes.set_xlabel("x", fontsize=16)
axes.legend(loc="best")
plt.show()Which lead to the quadrature points and weights...
| $$N$$ | $$x_i$$ | $$w_i$$ |
|---|---|---|
| $$1$$ | $$0$$ | $$2$$ |
| $$2$$ | $$\pm \sqrt{\frac{1}{3}}$$ | $$1$$ |
| $$3$$ | $$0$$ | $$8/9$$ |
| $$\pm \sqrt{\frac{3}{5}}$$ | $$5/9$$ | |
| $$4$$ | $$\pm \sqrt{\frac{3}{7} - \frac{2}{7} \sqrt{\frac{6}{5}}}$$ | $$\frac{18 + \sqrt{30}}{36}$$ |
| $$\pm \sqrt{\frac{3}{7} + \frac{2}{7} \sqrt{\frac{6}{5}}}$$ | $$\frac{18 - \sqrt{30}}{36}$$ | |
| $$5$$ | $$0$$ | $$\frac{128}{225}$$ |
| $$\pm \frac{1}{3} \sqrt{5 - 2 \sqrt{\frac{10}{7}}}$$ | $$\frac{322 + 13\sqrt{70}}{900}$$ | |
| $$\pm \frac{1}{3} \sqrt{5 + 2 \sqrt{\frac{10}{7}}}$$ | $$\frac{322 - 13\sqrt{70}}{900}$$ |
Note the weights and quadrature points for the 2-Point Gauss-Legendre quadrature and compare to what we found by undetermined coefficients.
Again, all of the weights and quadrature points are defined on the interval however these can be transferred to any arbitrary interval by an affine transformation.
def glintegration(f, a, b, N):
"""Approximate \int_a^b f(x)dx using Gauss-Legendre quadrature rules with N<=4 quadrature points"""
# Build up a nested list of gauss points and weights:
xGl = []
wGl = []
# 1 point Gauss-legendre quadrature
xGl.append(numpy.array([0.0]))
wGl.append(numpy.array([2.0]))
# 2 point Gauss-legendre quadrature
xGl.append(numpy.array([-1.0 / numpy.sqrt(3.0), 1.0 / numpy.sqrt(3)]))
wGl.append(numpy.array([1.0, 1.0]))
# 3 point Gauss-legendre quadrature
xGl.append(numpy.array([-numpy.sqrt(3.0 / 5.0), 0.0, numpy.sqrt(3.0 / 5.0)]))
wGl.append(numpy.array([5.0, 8.0, 5.0]) / 9.0)
# 4 point Gauss-Legendre quadrature
xGl.append(
numpy.array(
[
-numpy.sqrt(3.0 / 7.0 - 2.0 / 7.0 * numpy.sqrt(6.0 / 5.0)),
numpy.sqrt(3.0 / 7.0 - 2.0 / 7.0 * numpy.sqrt(6.0 / 5.0)),
-numpy.sqrt(3.0 / 7.0 + 2.0 / 7.0 * numpy.sqrt(6.0 / 5.0)),
numpy.sqrt(3.0 / 7.0 + 2.0 / 7.0 * numpy.sqrt(6.0 / 5.0)),
]
)
)
wGl.append(
numpy.array(
[
(18.0 + numpy.sqrt(30.0)) / 36.0,
(18.0 + numpy.sqrt(30.0)) / 36.0,
(18.0 - numpy.sqrt(30.0)) / 36.0,
(18.0 - numpy.sqrt(30.0)) / 36.0,
]
)
)
# make affine map between x and xi
xi_map = lambda xi: (b - a) / 2.0 * xi + (a + b) / 2.0
I = (b - a) / 2.0 * (wGl[N - 1].dot(f(xi_map(xGl[N - 1]))))
return INote:¶
From a programming perspective, this would make more sense being written as a python class that calculates and stores the quadrature weights and points once.
f = lambda x: numpy.cos(x)
I = 1.0
x_interval = numpy.linspace(0.0, numpy.pi / 2, 100)x = numpy.linspace(-1.0, 2, 100)
fig = plt.figure(figsize=(5, 5))
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, f(x), linewidth=3)
axes.fill_between(x_interval, 0.0, f(x_interval))
axes.plot(x, numpy.zeros(x.shape), "k")
axes.grid()
plt.show()a = 0.0
b = numpy.pi / 2
I_true = 1.0
err = numpy.zeros(4)
print("Gauss-Legendre N-point quadrature formulas\n")
for N in range(1, 5):
I = glintegration(f, a, b, N)
err[N - 1] = numpy.abs(I_true - I)
print("N = {}, I = {}, err = {}".format(N, I, err[N - 1]))errNC = numpy.zeros(4)
print("Newton-Cotes N-point quadrature formulas\n")
for N in range(1, 4):
I = ncintegration(f, a, b, N)
errNC[N - 1] = numpy.abs(I_true - I)
print("N = {}, I = {}, err = {}".format(N, I, errNC[N - 1]))fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
axes.semilogy(range(1, 5), errNC, "bs", markersize=10, label="Newton-Cotes")
axes.semilogy(range(1, 5), err, "ro", markersize=10, label="Gauss-Legendre")
axes.set_ylabel("Error")
axes.set_xlabel("quadrature points (N)")
axes.legend(loc="best")
axes.grid()
plt.show()Check polynomial degree¶
by integrating arbitrary polynomials of given order
# 3rd order polynomial
f = lambda x: -4 * x**3 - 3.0 * x**2 + 2.0 * x + 300.0
int_f = lambda x: -(x**4) - x**3.0 + x**2 + x * 300.0
# 5th order polynomial
# f = lambda x: x**5 - 4*x**3 - 3.*x**2 + 2.*x +300.
# int_f = lambda x: x**6/6. - x**4 - x**3. + x**2 + x*300.
a = 1.0
b = 4.0
I_true = int_f(b) - int_f(a)
print("I_true = {}\n".format(I_true))
for N in range(1, 5):
I = glintegration(f, a, b, N)
err = numpy.abs(I - I_true) / numpy.abs(I_true)
print("N = {}, I = {:5.6g}, rel_err = {} ".format(N, I, err))Same problem for Newton-Cotes quadrature¶
print("Newton-Cotes N-point quadrature formulas\n")
for N in range(1, 4):
I = ncintegration(f, a, b, N)
errNC = numpy.abs(I - I_true) / numpy.abs(I_true)
print("N = {}, I = {}, err = {}".format(N, I, errNC))x = numpy.linspace(a - 1, b + 1, 100)
x_interval = numpy.linspace(a, b, 100)
fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, f(x), linewidth=3)
axes.fill_between(x_interval, 0.0, f(x_interval), hatch="/", alpha=0.2)
axes.plot(x, numpy.zeros(x.shape), "k")
axes.grid()
plt.show()Example 3:¶
If look at the relative accuracy of midpoint, trapezoidal, Simpson’s and 2-point Gauss-Legendre quadrature for a single interval .
# Compute the error as a function of delta_x for each method
f = lambda x: numpy.sin(numpy.pi * x)
I = 2.0 / numpy.pi
# num_partitions = range(50, 1000, 50)
num_partitions = range(5, 50, 5)
delta_x = numpy.empty(len(num_partitions))
error_trap = numpy.empty(len(num_partitions))
error_simpson = numpy.empty(len(num_partitions))
error_2 = numpy.empty(len(num_partitions))
error_3 = numpy.empty(len(num_partitions))
error_4 = numpy.empty(len(num_partitions))
for j, N in enumerate(num_partitions):
x_hat = numpy.linspace(0.0, 1.0, N)
delta_x[j] = x_hat[1] - x_hat[0]
# Compute trapezoid
I_hat = 0.0
for i in range(0, N - 1):
I_hat += (f(x_hat[i + 1]) + f(x_hat[i])) * delta_x[j] / 2.0
error_trap[j] = numpy.abs(I_hat - I)
# Compute simpson's
I_hat = 0.0
for i in range(0, N - 1):
I_hat += delta_x[j] * (
1.0 / 6.0 * (f(x_hat[i]) + f(x_hat[i + 1]))
+ 2.0 / 3.0 * f(x_hat[i] + delta_x[j] / 2.0)
)
error_simpson[j] = numpy.abs(I_hat - I)
# Compute Gauss-Legendre 2-point
xi_map = lambda a, b, xi: (b - a) / 2.0 * xi + (a + b) / 2.0
xi = [-numpy.sqrt(1.0 / 3.0), numpy.sqrt(1.0 / 3.0)]
w = [1.0, 1.0]
I_hat = 0.0
for i in range(0, N - 1):
for k in range(len(xi)):
I_hat += f(xi_map(x_hat[i], x_hat[i + 1], xi[k])) * w[k]
I_hat *= delta_x[j] / 2.0
error_2[j] = numpy.abs(I_hat - I)
# Compute Gauss-Legendre 3-point
xi_map = lambda a, b, xi: (b - a) / 2.0 * xi + (a + b) / 2.0
xi = [-numpy.sqrt(3.0 / 5.0), 0.0, numpy.sqrt(3.0 / 5.0)]
w = [5.0 / 9.0, 8.0 / 9.0, 5.0 / 9.0]
I_hat = 0.0
for i in range(0, N - 1):
for k in range(len(xi)):
I_hat += f(xi_map(x_hat[i], x_hat[i + 1], xi[k])) * w[k]
I_hat *= delta_x[j] / 2.0
error_3[j] = numpy.abs(I_hat - I)
# Compute Gauss-Legendre 4-point
xi_map = lambda a, b, xi: (b - a) / 2.0 * xi + (a + b) / 2.0
xi = [
-numpy.sqrt(3.0 / 7.0 - 2.0 / 7.0 * numpy.sqrt(6.0 / 5.0)),
numpy.sqrt(3.0 / 7.0 - 2.0 / 7.0 * numpy.sqrt(6.0 / 5.0)),
-numpy.sqrt(3.0 / 7.0 + 2.0 / 7.0 * numpy.sqrt(6.0 / 5.0)),
numpy.sqrt(3.0 / 7.0 + 2.0 / 7.0 * numpy.sqrt(6.0 / 5.0)),
]
w = [
(18.0 + numpy.sqrt(30.0)) / 36.0,
(18.0 + numpy.sqrt(30.0)) / 36.0,
(18.0 - numpy.sqrt(30.0)) / 36.0,
(18.0 - numpy.sqrt(30.0)) / 36.0,
]
I_hat = 0.0
for i in range(0, N - 1):
for k in range(len(xi)):
I_hat += f(xi_map(x_hat[i], x_hat[i + 1], xi[k])) * w[k]
I_hat *= delta_x[j] / 2.0
error_4[j] = numpy.abs(I_hat - I)
fig = plt.figure(figsize=(8, 6))
axes = fig.add_subplot(1, 1, 1)
# axes.plot(delta_x, error)
axes.loglog(delta_x, error_trap, "o", label="Trapezoid")
axes.loglog(delta_x, error_simpson, "o", label="Simpson's")
axes.loglog(delta_x, error_2, "o", label="G-L 2-point")
axes.loglog(delta_x, error_3, "o", label="G-L 3-point")
axes.loglog(delta_x, error_4, "o", label="G-L 4-point")
order_C = lambda delta_x, error, order: numpy.exp(
numpy.log(error) - order * numpy.log(delta_x)
)
axes.loglog(
delta_x,
order_C(delta_x[0], error_trap[0], 2.0) * delta_x**2.0,
"r--",
label="2nd Order",
)
axes.loglog(
delta_x,
order_C(delta_x[0], error_simpson[0], 4.0) * delta_x**4.0,
"g--",
label="4th Order",
)
axes.loglog(
delta_x, order_C(delta_x[1], error_3[1], 6) * delta_x**6, "b--", label="6th Order"
)
axes.loglog(
delta_x,
order_C(delta_x[1], error_4[1], 8.0) * delta_x**8.0,
"k--",
label="8th Order",
)
axes.legend(loc="best", fontsize=14)
axes.set_xlabel("$\Delta x$", fontsize=18)
axes.set_ylabel("error", fontsize=18)
axes.set_xlim((5e-3, delta_x[0]))
axes.grid()
plt.show()Other Quadrature Families¶
Clenshaw-Curtis (Gauss-Chebyshev): If and are Chebyshev polynomials then we know the roots of the polynomials to be (the Chebyshev nodes) and we can derive that .
Gauss-Hermite: If and are Hermite polynomials then
SciPy Integration Routines¶
SciPy has a number of integration routines that we have derived here including general purpose integrators that can control error. For more information see scipy.integrate
import scipy.integrate as integrate
integrate?
