## Python Code for nonlinear optimization

import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import solve
from scipy.optimize import minimize
import random
from sklearn.linear_model import LinearRegression

# Simulate regression data
random.seed(10)
x = np.random.normal(0,1,100)
y = 1.2+.5*x+np.random.normal(0,.4,100)
plt.plot(x,y,'bo')
plt.show()
plt.close()
y[1] = -2
plt.plot(x,y,'bo')
plt.show()

# Fit using built in regression functions
mod = LinearRegression()
mod.fit(x.reshape(-1,1),y)
print(np.append(mod.intercept_,mod.coef_))
beta_est = np.append(mod.intercept_,mod.coef_)
predictions = mod.predict(x.reshape(-1,1))
plt.plot(x,predictions,'r-')
plt.show()

# Optimize
def target(beta):
  return np.sum((y-beta[0]-beta[1]*x)**2)

inits = np.zeros(2)
opt1 = minimize(target,inits,method="BFGS")
opt1.x

# Minimize absolute residuals
def target(beta):
  return np.sum(np.abs(y-beta[0]-beta[1]*x))

inits = np.zeros(2)
opt2 = minimize(target,inits,method="BFGS")
opt2.x

pred_abs = opt2.x[0]+opt2.x[1]*x
plt.plot(x,pred_abs,'g-')
plt.show()

### Nonlinear Example
random.seed(10)
x = np.random.uniform(0,6,100)
y = -3+np.sin(1.5*x) + x/4+np.random.normal(0,.4,100)
plt.plot(x,y,'bo')
plt.show()

def target(beta):
  return np.sum((y-beta[0]-np.sin(beta[1]*x)-beta[2]*x)**2)

opt = minimize(target,[0,.75,0])
beta_est = opt.x
newx = np.linspace(0,6,200)
preds = beta_est[0]+np.sin(beta_est[1]*newx)+beta_est[2]*newx
plt.plot(newx,preds,'r-')
plt.show()
