## Code written by Robert Richardson for Stat 624
## MLE for gamma

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.special import loggamma as l
from scipy.special import digamma as d
from scipy.special import polygamma
from numpy.random import gamma
from numpy.linalg import solve
from scipy.optimize import minimize

dat = gamma(2,1/3,100)

def tr(x):
  return polygamma(1,x)

n = dat.size
r = np.sum(dat)
lr = np.sum(np.log(dat))

def log_lik(theta):
  a = theta[0]
  b = theta[1]
  return n*a*np.log(b) - n*l(a) + (a-1)*lr - b*r

def gradient(theta):
  a = theta[0]
  b = theta[1]
  return np.array([n*np.log(b)-n*d(a) + lr, n*a/b - r])

def hessian(theta):
  a = theta[0]
  b = theta[1]
  return np.array([[-n*tr(a),n/b],[n/b,-n*a/(b*b)]])

theta = np.array([1,2])
epsilon = .0001
count = 0
while np.sqrt(np.sum(gradient(theta)**2)) > epsilon:
  theta = theta-solve(hessian(theta),gradient(theta))
  count = count + 1

print("In %d steps the MLE estimate is (%4.3f,%4.3f)" % (count,theta[0],theta[1]))
 
def neg_log_lik(theta):
  a = theta[0]
  b = theta[1]
  return -(n*a*np.log(b) - n*l(a) + (a-1)*lr - b*r)

def neg_gradient(theta):
  a = theta[0]
  b = theta[1]
  return -np.array([n*np.log(b)-n*d(a) + lr, n*a/b - r])

theta = np.array([1,2])
opt1 = minimize(neg_log_lik,theta,jac=neg_gradient,options={'disp':True})
opt1.x




