import numpy as np
import matplotlib.pyplot as plt
import random
from matplotlib import colors, ticker, cm
from scipy.optimize import curve_fit
import utils as u
from uncertainties import ufloat
def power_law_fit_model(x,a,c):
return c*x**(-a)
def f(x):
return np.sqrt(x + 1.0/np.power(np.pi,2.0))*np.sin(1.0/(x + 1.0/np.power(np.pi,2.0)))
def f(x):
return (x**0.7)*np.sqrt(x + 1.0/np.power(np.pi,2.0))*np.sin(1.0/(x + 1.0/np.power(np.pi,2.0)))
f(0.12)
x = np.linspace(0.01,1,10000); y = f(x);
plt.plot(x, y, color="red", linewidth=2.0, linestyle="--")
plt.title("A sinusoidally modulated square root")
#plt.ylabel('some numbers')
plt.show()
def quadIntegral(nP):
_values=np.linspace(0.01,1,nP);
_sampled_values=[ f(num) for num in _values];
return np.sum(_sampled_values)/np.size(_sampled_values)
def quadIntegralFast(nP):
_values=np.linspace(0.01,1,nP);
_sampled_values=f(_values);
return np.sum(_sampled_values)/np.size(_sampled_values)
quadIntegral(100000)
quadIntegralFast(100000)
_x=[ 1000, 3000, 10000, 30000, 100000, 300000, 1000000 ]
quadIntegrals = np.array([ quadIntegralFast(k) for k in _x ])
_y = np.abs(quadIntegrals - quadIntegrals[-1])
_dy = 40/np.power(_x,2)
_y
fig, ax = plt.subplots()
last=-1
ax.errorbar(_x[0:last], _y[0:last], yerr=_dy[0:last])
ax.set_yscale('log')
ax.set_xscale('log')
plt.show()
last=-1
popt, pcov = curve_fit(power_law_fit_model, _x[0:last], _y[0:last],sigma=np.power(_y[0:last],2),p0=[2.9,1.11],bounds=(0,np.inf))
popt, pcov
_fx=power_law_fit_model(_x,popt[0],popt[1])
fig, ax = plt.subplots();
ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.errorbar(_x[0:last], _y[0:last], yerr=2/np.power(_x[0:last],2));
ax.set_title("Power Law best fit");
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel('spread')
ax.set_xlabel('points evaluated')
#plt.ylabel('some numbers')
plt.show()
def mcIntegral(nP):
_values=np.random.rand(nP)
_sampled_values=[ f(num) for num in _values];
return np.sum(_sampled_values)/np.size(_sampled_values)
def mcIntegralFast(nP):
_values=np.random.rand(nP)
_sampled_values=f(_values) ;
return np.sum(_sampled_values)/np.size(_sampled_values)
mcIntegral(100000)
mcIntegralFast(100000)
a = np.array([[1, 2, 3, 4]])
print(np.mean(np.power(a-a.mean(),2)))
np.var(a)
def averageMCintegral(nP,trials=100):
_arr=[ mcIntegralFast(nP) for k in range(trials) ]
return np.sum(_arr )/trials, np.sqrt(np.var(_arr)) #returns the mean and the std-deviation
averageMCintegral(100)
MCintegrals=np.array([ averageMCintegral(k) for k in [10, 100, 1000, 3000, 10000, 30000 ] ])
MCintegrals
MCintegrals[:,0]
_x = [ 1000, 3000, 10000, 30000, 300000 ]
MCintegrals=np.array([ averageMCintegral(k) for k in _x ])
_y = MCintegrals[:,0] -0.53054788312233625
_dy = MCintegrals[:,1]
fig, ax = plt.subplots()
ax.errorbar(_x, _y, yerr=_dy)
plt.show()
_x = np.power(np.array([ 30, 50, 75, 100, 150 ]),3)
_x = np.array([ 1000, 10000, 100000, 10**6,5*10**6, 8*10**6 ,10**7 ])
#_x = np.power(np.array([ 30, 50, 60, 70, 80, 100 ]),3)
#_x = [10,30,50,100,300,1000]
mcIntegrals=np.array([ averageMCintegral(k,trials=10) for k in _x ])
_y = np.abs(mcIntegrals[:,0] - mcIntegrals[:,0][-1])
_dy = mcIntegrals[:,1]
_y, _y[0:-1]
_dy, _dy[0:-1]
fig, ax = plt.subplots();
ax.set_yscale('log');
ax.set_xscale('log');
ax.errorbar(_x[0:-1], _y[0:-1], yerr=_dy[0:-1])
plt.show()
last=-1
popt, pcov = curve_fit(power_law_fit_model, _x[0:last], _y[0:last],sigma=np.power(_y[0:last],2),p0=[0.5,1.11],bounds=(0,np.inf))
popt, np.sqrt(pcov), np.power(popt[0] ,1/3)
_fx=power_law_fit_model(_x,popt[0],popt[1])
fig, ax = plt.subplots();
ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.errorbar(_x[0:last], _y[0:last], yerr=2/np.power(_x[0:last],2));
ax.set_title("Power Law best fit");
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel('spread')
ax.set_xlabel('total points in all dimension')
#plt.ylabel('some numbers')
plt.show()
import ghalton
#The last code will produce a sequence in five dimension. To get the points use
points = sequencer.get(1000000)
list_points=np.array(u.flattenOnce(points))
u.write_lines_to_file_newline(list_points,'numbers.txt')
plt.hist(list_points)
plt.show()
def averageHaltonMCintegral(nP,trials=4):
_arr=[ HaltonMCintegral(int(nP)) for k in range(trials) ]
return np.sum(_arr )/trials, np.sqrt(np.var(_arr)) #returns the mean and the std-deviation
def HaltonMCintegral(k):
sequencer = ghalton.Halton(1)
points = sequencer.get(k)
list_points=np.array(u.flattenOnce(points))
_values=list_points
_sampled_values=f(_values) ;
return np.sum(_sampled_values)/np.size(_sampled_values)
_x = np.power(np.array([ 10, 30, 50, 70, 100 ]),3)
_x = np.array([ 1000, 10000, 100000, 10**6,5*10**6, 8*10**6 ])
#_x = [10,30,50,100,300,1000]
mcIntegrals=np.array([ averageHaltonMCintegral(k,trials=4) for k in _x ])
_y = np.abs(mcIntegrals[:,0] - mcIntegrals[:,0][-1])
_dy = mcIntegrals[:,1]
_y, _y[0:-1]
_dy, _dy[0:-1]
fig, ax = plt.subplots();
ax.set_yscale('log');
ax.set_xscale('log');
ax.errorbar(_x[0:-1], _y[0:-1], yerr=_dy[0:-1])
plt.show()
last=-1
popt, pcov = curve_fit(power_law_fit_model, _x[0:last], _y[0:last],sigma=np.power(_y[0:last],2),p0=[2.9,1.11],bounds=(0,np.inf))
popt, np.sqrt(pcov), np.power(popt[0] ,1/3)
_fx=power_law_fit_model(_x,popt[0],popt[1])
fig, ax = plt.subplots();
ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.errorbar(_x[0:last], _y[0:last], yerr=2/np.power(_x[0:last],2));
ax.set_title("Power Law best fit");
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel('spread')
ax.set_xlabel('total points in all dimension')
#plt.ylabel('some numbers')
plt.show()
def f2d(x,y):
return np.sin(y)*np.sqrt(x + 1.0/np.power(np.pi,2.0))*np.sin(1.0/(x + 1.0/np.power(np.pi,2.0)))
x = np.linspace(0.01,1,100);
y = np.linspace(0.01,1,100);
X, Y = np.meshgrid(x, y)
z = f2d(X,Y);
fig, ax = plt.subplots()
cs=ax.contourf(X, Y, z,cmap=cm.PuBu_r)#, color="red", linewidth=2.0, linestyle="--")
plt.title("A sinusoidally modulated square root")
#plt.ylabel('some numbers')
cbar = fig.colorbar(cs)
plt.show()
np.size(Y)
def quadIntegral2d(nP):
_Xvalues=np.linspace(0.01,1,nP);
_Yvalues=np.linspace(0.01,1,nP);
_X, _Y = np.meshgrid(_Xvalues, _Yvalues)
_z = f2d(_X,_Y);
return np.sum(_z)/np.size(_z)
#_sampled_values=np.array([ [ f2d(numX,numY) for numX in _Xvalues ] for numY in _Yvalues ]); #cycles ---> slow!
#return np.sum(_sampled_values)/np.size(_sampled_values)
quadIntegral2d(100)
_x = [ 30, 100, 300, 1000, 3000, 10000 ]
QuadIntegrals=np.array([ quadIntegral2d(k) for k in _x ])
10K in each dimension makes already 10^8 points => 3 Gb RAM occupation
_y = np.abs(QuadIntegrals - QuadIntegrals[-1])
#_dy = QuadIntegrals[:,1]
_y, _y[0:-1]
fig, ax = plt.subplots();
ax.set_yscale('log')
ax.set_xscale('log')
ax.errorbar(_x[0:-1], _y[0:-1], yerr=2/np.power(_x[0:-1],2))
plt.show()
last=-1
popt, pcov = curve_fit(power_law_fit_model, _x[0:last], _y[0:last],sigma=np.power(_y[0:last],2),p0=[2.9,1.11],bounds=(0,np.inf))
popt, np.sqrt(pcov), np.power(popt[0], 1/2)
_fx=power_law_fit_model(_x,popt[0],popt[1])
fig, ax = plt.subplots();
ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.errorbar(_x[0:last], _y[0:last], yerr=2/np.power(_x[0:last],2));
ax.set_title("Power Law best fit");
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel('spread')
ax.set_xlabel('points in each dimension')
#plt.ylabel('some numbers')
plt.show()
def f3d(x,y,z):
return np.cos(z/(1/np.pi+y))*np.sin(y)*np.sqrt(x + 1.0/np.power(np.pi,2.0))*np.sin(1.0/(x + 1.0/np.power(np.pi,2.0)))
x = np.linspace(0.01,1,100);
y = np.linspace(0.01,1,100);
z=0.3
X, Y = np.meshgrid(x, y)
t = f3d(X,Y,z);
fig, ax = plt.subplots()
cs=ax.contourf(X, Y, t,cmap=cm.PuBu_r)#, color="red", linewidth=2.0, linestyle="--")
plt.title("A sinusoidally modulated square root")
#plt.ylabel('some numbers')
cbar = fig.colorbar(cs)
plt.show()
np.size(Y)
def quadIntegral3d(nP):
_Xvalues=np.linspace(0.01,1,nP);
_Yvalues=np.linspace(0.01,1,nP);
_Zvalues=np.linspace(0.01,1,nP);
_X, _Y , _Z = np.meshgrid(_Xvalues, _Yvalues, _Zvalues)
_t = f3d(_X,_Y, _Z);
return np.sum(_t)/np.size(_t)
#_sampled_values=np.array([ [ f2d(numX,numY) for numX in _Xvalues ] for numY in _Yvalues ]); #cycles ---> slow!
#return np.sum(_sampled_values)/np.size(_sampled_values)
quadIntegral3d(10)
quadIntegral3d(100)
_x = [ 30, 60, 100, 200, 300, 500 ]#, 3000, 10000 ]
QuadIntegrals=np.array([ quadIntegral3d(k) for k in _x ])
300 in each dimension makes already 10^9 points >> 3 Gb RAM occupation
_y = np.abs(QuadIntegrals - QuadIntegrals[-1])
#_dy = QuadIntegrals[:,1]
_y, _y[0:-1]
fig, ax = plt.subplots();
ax.set_yscale('log')
ax.set_xscale('log')
ax.errorbar(_x[0:-1], _y[0:-1], yerr=2/np.power(_x[0:-1],2))
plt.show()
last=-1
popt, pcov = curve_fit(power_law_fit_model, _x[0:last], _y[0:last],sigma=np.power(_y[0:last],2),p0=[2.9,1.11],bounds=(0,np.inf))
popt, np.sqrt(pcov), np.power(popt[0] ,1/3)
_fx=power_law_fit_model(_x,popt[0],popt[1])
fig, ax = plt.subplots();
ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.errorbar(_x[0:last], _y[0:last], yerr=2/np.power(_x[0:last],2));
ax.set_title("Power Law best fit");
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel('spread')
ax.set_xlabel('points in each dimension')
#plt.ylabel('some numbers')
plt.show()
np.log10(500**3)
nP=10
dims=3
_values=np.random.rand(nP,dims)
_values
def mcIntegralFast3d(nP,dims=3):
_arr_values = np.random.rand(dims,nP);
_t = f3d(*_arr_values);
return np.sum(_t)/np.size(_t)
mcIntegralFast3d(1000)
def averageMCintegral(nP,trials=100):
_arr=np.array([ mcIntegralFast3d(nP) for k in range(trials) ])
return np.sum(_arr )/trials, np.sqrt(np.var(_arr)) #returns the mean and the std-deviation
averageMCintegral(1000)
averageMCintegral(3000)
_x = np.power(np.array([ 10, 30, 50, 100, 150, 200 ]),3)
#_x = [10,30,50,100,300,1000]
mcIntegrals=np.array([ averageMCintegral(k) for k in _x ])
_y = np.abs(mcIntegrals[:,0] - mcIntegrals[:,0][-1])
_dy = mcIntegrals[:,1]
_y, _y[0:-1]
_dy, _dy[0:-1]
fig, ax = plt.subplots();
ax.set_yscale('log');
ax.set_xscale('log');
ax.errorbar(_x[0:-1], _y[0:-1], yerr=_dy[0:-1])
plt.show()
last=-1
popt, pcov = curve_fit(power_law_fit_model, _x[0:last], _y[0:last],sigma=np.power(_y[0:last],2),p0=[2.9,1.11],bounds=(0,np.inf))
popt, np.sqrt(pcov), np.power(popt[0] ,1/3)
_fx=power_law_fit_model(_x,popt[0],popt[1])
fig, ax = plt.subplots();
ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.errorbar(_x[0:last], _y[0:last], yerr=2/np.power(_x[0:last],2));
ax.set_title("Power Law best fit");
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel('spread')
ax.set_xlabel('total points in all dimension')
#plt.ylabel('some numbers')
plt.show()
np.log10(150**3)
np.product( [1,2,5] )
def fNd(*argv):
x=argv[0]
y=argv[1]
z=argv[2]
fED=0
if len(argv)>3:
rED = np.array([ np.sqrt(argv[M+3]) for M in range(len(argv)-3) ])
fED=np.product(rED)
return (1-fED)\
*np.cos(z/(1/np.pi+y))*np.sin(y)*np.sqrt(x + 1.0/np.power(np.pi,2.0))*np.sin(1.0/(x + 1.0/np.power(np.pi,2.0)))
fNd(1,1,1,0.3,0.5,0.6)
fNd(1,1,1)
f3d(1,1,1)
x = np.linspace(0.01,1,100);
y = np.linspace(0.01,1,100);
z=0.3
u=0.9
v=0.1
X, Y = np.meshgrid(x, y)
t = fNd(X,Y,z,u,v);
fig, ax = plt.subplots()
cs=ax.contourf(X, Y, t,cmap=cm.PuBu_r)#, color="red", linewidth=2.0, linestyle="--")
plt.title("A sinusoidally modulated square root")
#plt.ylabel('some numbers')
cbar = fig.colorbar(cs)
plt.show()
np.size(Y)
def quadIntegralNd(nP,ndims=3):
nPoints=nP**ndims;
if nPoints > 10**7-1: print("more than 0.01 bilion points, may kill kernel")
values=np.array( [ np.linspace(0.01,1,nP) for k in range(ndims) ]);
_A = np.meshgrid( *values )
_t = fNd(*_A);
return np.sum(_t)/np.size(_t)
quadIntegral3d(30)
quadIntegralNd(30,ndims=5)
_x = [ 10, 15, 20, 25, 30, 35, 40 ]#, 3000, 10000 ]
QuadIntegrals=np.array([ quadIntegralNd(k,ndims=5) for k in _x ])
_y = np.abs(QuadIntegrals - QuadIntegrals[-1])
#_dy = QuadIntegrals[:,1]
_y, _y[0:-1]
fig, ax = plt.subplots();
ax.set_yscale('log')
ax.set_xscale('log')
ax.errorbar(_x[0:-1], _y[0:-1], yerr=2/np.power(_x[0:-1],2))
plt.show()
last=-1
popt, pcov = curve_fit(power_law_fit_model, _x[0:last], _y[0:last],sigma=np.power(_y[0:last],2),p0=[2.9,1.11],bounds=(0,np.inf))
popt, np.sqrt(pcov), np.power(popt[0] ,1/3)
_fx=power_law_fit_model(_x,popt[0],popt[1])
fig, ax = plt.subplots();
ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.errorbar(_x[0:last], _y[0:last], yerr=2/np.power(_x[0:last],2));
ax.set_title("Power Law best fit");
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel('spread')
ax.set_xlabel('points in each dimension')
#plt.ylabel('some numbers')
plt.show()
np.log10(40**5)
def mcIntegralFastNd(nP,dims=3):
_arr_values = np.random.rand(dims,nP);
_t = fNd(*_arr_values);
return np.sum(_t)/np.size(_t)
mcIntegralFastNd(1000,dims=3)
mcIntegralFastNd(10000,dims=7)
def averageMCintegralNd(nP,trials=100,dims=3):
_arr=np.array([ mcIntegralFastNd(nP,dims=dims) for k in range(trials) ])
return np.sum(_arr )/trials, np.sqrt(np.var(_arr)) #returns the mean and the std-deviation
averageMCintegralNd(10**6,dims=5)
averageMCintegral(3000)
_x = np.power(np.array( [ 10, 15, 20, 25, 30, 35, 40 ] ),5)/100
print(np.log10(_x))
#_x = [10,30,50,100,300,1000]
mcIntegrals=np.array([ averageMCintegralNd(int(k),dims=5) for k in _x ])
_y = np.abs(mcIntegrals[:,0] - mcIntegrals[:,0][-1])
_dy = mcIntegrals[:,1]
_y, _y[0:-1]
_dy, _dy[0:-1]
fig, ax = plt.subplots();
ax.set_yscale('log');
ax.set_xscale('log');
ax.errorbar(_x[0:-1], _y[0:-1], yerr=_dy[0:-1])
plt.show()
last=-1
popt, pcov = curve_fit(power_law_fit_model, _x[0:last], _y[0:last],sigma=np.power(_y[0:last],2),p0=[2.9,1.11],bounds=(0,np.inf))
popt, np.sqrt(pcov), np.power(popt[0] ,1/3)
_fx=power_law_fit_model(_x,popt[0],popt[1])
np.sqrt(0.347)
fig, ax = plt.subplots();
ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.errorbar(_x[0:last], _y[0:last], yerr=2/np.power(_x[0:last],2));
ax.set_title("Power Law best fit");
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylabel('spread')
ax.set_xlabel('total points in all dimension')
#plt.ylabel('some numbers')
plt.show()
np.log10(150**3)
def MCintegralsNd(nP,trials=100,dims=3):
_arr=np.array([ mcIntegralFastNd(nP,dims=dims) for k in range(trials) ])
return _arr #returns all the values
_x = np.power(np.array( [ 10, 15, 20, 25, 30, 35, 40 ] ),5)/100
print(np.log10(_x))
mc_int_results=np.array([ MCintegralsNd(int(k),dims=5) for k in _x ])
fig, ax = plt.subplots();
#ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.violinplot(mc_int_results.T);
ax.set_title("MC integral");
#ax.set_yscale('log')
#ax.set_xscale('log')
ax.set_ylabel('result')
ax.set_xlabel('total points in all dimension')
#plt.ylabel('some numbers')
plt.show()
y=np.array(mc_int_results)-np.mean(mc_int_results[-1])
fig, ax = plt.subplots();
#ax.plot(_x[0:last], _fx[0:last], color="red", linewidth=2.0, linestyle="--");
ax.violinplot(y.T);
ax.set_title("MC integral - Best Estimate");
#ax.set_yscale('log')
#ax.set_xscale('log')
ax.set_ylabel('result')
ax.set_xlabel('total points in all dimension')
#plt.ylabel('some numbers')
plt.show()