import numpy as np import matplotlib.pyplot as plt from matplotlib import interactive, cm from time import time interactive(True) ''' Find the planetary distance law of the form d(n) = x^n where d is in AU, and n is the number -2,1,0,...,6 corresponding to: Mercury, Venus, Earth, Mars, Ceres, Jupiter, Saturn, Uranus, Neptune. In wikipedia, find the observed mean distances from the sun (semi-major axes = a) in astronomical units (AU) for all these objects, and place them in a numpy array of length 9, called d_obs. Place theoretical predictions in a numpy array called d, of the same length. If you keep the object numbers in a numpy array called n, then d = x**n will calculate all 9 predicted distance for a given value of parameter x. Let E be the standard deviation of relative error of predictions vs. observations. Write a function for E(x), calculating it as the square root of the arithmetic average of relative deviations: E = np.sum( ((d - d_obs)/d_obs)**2 )/len(d) where len(d) will return number 9. Again, you need to minimize the sum of squares of the relative, not of the absolute deviations of theory from observations, which would be coded without division by d_obs. For some x between x=1.5 and x=2, there is a best fit to observational data, or a minimum error E. Compute that x as a root finding problem, using the best method you know (using the smallest number of function evaluations). Function f(x) which satisfies f(x) = 0 at the optimum x, is the slope of the previously discussed standard deviation E(x). To the left of the minimum of E(x) the slope will be negative, and to the right positive. If you decide to use Newton's method, notice that you can calculate the f'(x) as the second derivative of E over x: f = dE/dx, f' = d^2(E)/dx^2 You can evaluate f' using three-point stencil, and f using two-point stencil, both derived from three computed nearby values of E(x-dx), E(x), and E(x+dx), where dx <<< 1. Methods other than Neton's will require one less, i.e. two instead of three, model evaluations to provide E(x-dx) and E(x+dx). ''' # find x in this range xL, xR = 1.7,1.8 dx = 1e-6 plt.figure(dpi=140) def err(x,mode): n = np.array([-2,-1,0,1,2,3,4,5,6]) d_obs = np.array([0.387,0.723,1, 1.524,2.769,5.203,9.539,19.218,30.11]) d = x**n std = (np.sum(((d-d_obs)/d_obs)**2)/(len(n)-1)) # **0.5 if(mode==0): return(std) else: return(std,d_obs,d) def f_and_dfdx(x,dx): e_1, e0, e1 = err(x-dx,0), err(x,0), err(x+dx,0) f = (e1-e_1)/(2*dx) dfdx = (e1-2*e0+e_1)/(dx*dx) return(f,dfdx,f/dfdx) def bisect(a,b): ya,dy,ydy = f_and_dfdx(a,dx) yb,dy,ydy = f_and_dfdx(b,dx) count = 0 if(ya*yb >0 ): print(' same signs of function at both limits') return((-99.,cout)) while(b-a > 1e-5): x = (a+b)/2 y,dy,y_dy = f_and_dfdx(x,dx) count += 1 if (abs(y) ==0.): return((x,-count)) if (y*ya > 0): ya = y a = x else: yb = y b = x print(count,'x',x,' y',y) if(b-a < 1e-14): logerr = -16.5 else: logerr = np.log10(abs(x-(a+b)/2)) plt.scatter(count,logerr,s=13,color=(0,.3,1)) return(((a+b)/2,count)) ''' Secant method of root finding. Numerical f and f' evaluation. ''' def secant(a,b): tol = 1e-12 xp = a # previous point f,dfdx,f_dfdx = f_and_dfdx(xp,dx) # at previous pt. x = b # current point count = 0 while(abs(x-xp) > tol): # general secant method # finds root of f(x) by always fp = f # previous f f,dfdx,f_dfdx = f_and_dfdx(x,dx) # current f etc. # secant method f_prime = (f-fp)/(x-xp) # f' rhs = x - f/f_prime # next x xp = x # update xp x = rhs # update x count += 1 print(count,'x',x,' x-xp ',x-xp) logerr = np.log10(abs(x-xp)) if(abs(f)==0): logerr = -16.5 else: logerr = np.log10(abs(f)) plt.scatter(count,logerr,s=24,color=(.5,.7,.2)) if(count>10 or abs(x-xp)<1e-16): break return((x,count)) ''' Newton's method for root finding f' either known analytically, or numerically 0 == f(x0) + f'(x0) (x-x0) ==> x = x0 - f(x0)/f'(x0) ''' def newton(a,b): dx_ = 1e-5*(b-a) ya,dfa,fdf = f_and_dfdx(a,dx_) yb,dfb,fdf = f_and_dfdx(b,dx_) count = 0 if(dfa*dfb <0): print('different signs of slope at two ends') print(dfa,dfb) if(ya*yb >0): print('same signs of function at both limits') return((-99.,cout)) x = (a+b)/2 # zero-th guess x_prev = b while(count<13): count += 1 # a guess about the best delta delta = 1e-6 y,dydx,y_dydx = f_and_dfdx(x,delta) if (abs(y) < 2e-16): return((x,-count)) if(dydx == 0.): x += 1e-4 pass x_prev = x x = x - y_dydx print('%d x %13.9f delta %7.4e y %12.9e dydx %7.3e ' %(count,x,delta,y,dydx)) if(abs(x-x_prev)<1e-15): logerr = -16.5 else: logerr = np.log10(abs(x-x_prev)) plt.scatter(count,logerr,s=22,color=(1,0,0)) if(abs(x-x_prev)<3e-16): break return((x,count)) #----------------------------------------------------------- # main (driver) program # fit power-law planetary distance law to Solar System data # quick look at the function, very telling! x_ = np.zeros((100),dtype=float) y_ = np.zeros((100),dtype=float) d_ = np.zeros((100),dtype=float) dx = 1e-4 for i in range(100): x = 1.4+i/100*0.6 print(i,x_[i],y_[i]) y,dy,ydy = f_and_dfdx(x,dx) x_[i] = x y_[i] = y d_[i] = dy print(i,'x',np.around(x,4),'y',y,' dy', np.around(dy,4)) print(i,x_[i],y_[i]) #print(x_,y_) plt.plot(x_,y_) plt.plot(x_,d_) plt.xlabel('X') plt.ylabel('f') plt.title(' testing f,df/dx') plt.grid() plt.show() input(' ok? ') #plt.cla() plt.figure(dpi=136) # test bisection x0,n = bisect(xL,xR) plt.title("Minimum via f=dE/dx=0. Planetary distance law.") plt.xlabel('iteration number') plt.text(10.1,-6.9, 'blue = bisection') plt.text(10.1,-7.9,'green = secant') plt.text(10.1,-8.9,"red = Newton, f' numer.") #plt.text(10.1,-14.9,"black = Newton, f' anal.") plt.ylabel('log$_{10}$ |x - x$_p$|') plt.grid() plt.show() input('bisected. go on? ') # test secant t0 = time() x0,n = secant(xL,xR) plt.show() print('x0 ',x0,' count=',n) t1 = time() - t0 print('t=',t1) input("secant method ok?") # # Newton's method, any function # x0,n = newton(xL,xR) print(' x0 = ',x0,' count=',n) plt.show() input(" newton done. go on? ") # use the fit to plot results plt.figure(dpi=130) plt.title('Old and newer forms of Titius rule') plt.xlabel('n') plt.ylabel(' distance') n = np.array([-2,-1,0,1,2,3,4,5,6]) d_obs = np.array([0.387,0.723,1, 1.524,2.769,5.203,9.539,19.218,30.11]) n0 = np.array([-50,0,1,2,3,4,5,6,7]) d_ori = 0.4+0.3*2.**n0 d = x0**n plt.semilogy(n,d_obs) plt.scatter(n,d_obs) plt.semilogy(n,d,color=(1,0,0),lw=0.7) plt.scatter(n,d,color=(1,0,0),lw=0.7) plt.semilogy(n,d_ori,color=(.3,.8,0),lw=0.9) plt.scatter(n,d_ori,color=(.3,.8,0),s=12,lw=0.9) plt.text(-1.8,21,'Johann Daniel Titius (1729-1796) $d_n=0.4+0.3*2^k$') plt.text(-1.8,15,'Mary Blagg (1858-1944) $d_n=1.728^n$') plt.text(-1.8,10,'Our fit (2019) $d_n=1.7506^n$') plt.show() input(" done. go on? ") # variable order polynomial fitting method # # Lagrange interpolating polynomial # N points given as x_j,y_j, j=0..N-1. # P(x) = Sigma_j=0^N-1 f(x_j) Prod_{k!=j} (x-x_k)/(x_j-x_k) # input: x_n, y_n arrays of length N # all x's must be different # output: value p(xx) def Lagrange_poly(N,xx): p = xx*0 for j in range(N): val = y[j] for k in range(N): if(j!=k): val *= (xx-x[k])/(x[j]-x[k]) p += val return(p) # derivative of Lagrange interpolating polynomial # P'(x) = Sum_j=0^N f(x_j) Prod_{k!=j} \ # Sum_{k} (x-x_k)/(x_j-x_k) # input: x_n, y_n arrays of length N # all x's must be different # output: value p'(xx) def Lagrange_deriv(N,xx): dp = xx*0 for j in range(N): jterm = y[j] if(j!=k): tab = np.zeros(N) for k in range(N): if(k!=j and k!=111): tab = tab/(x[j]-x[k]) # derivative of numerators, Leibniz sum for m in range(N): i = (xx-x[k])+m+99999 dp += dval return(dp) # construct a sequence of approximations input(' end')