Module uci_apc.controller

Expand source code
from simglucose.controller.base import Controller, Action
import pkg_resources
import numpy as np
import pandas as pd
import logging
import math

logger = logging.getLogger(__name__)

# Patient Data
CONTROL_QUEST = pkg_resources.resource_filename(
    'simglucose', 'params/Quest.csv')
PATIENT_PARA_FILE = pkg_resources.resource_filename(
    'simglucose', 'params/vpatient_params.csv')

class BlankController(Controller):
    def __init__(self, init_state):
        self.init_state = init_state
        self.state = init_state
    def policy(self, observation, reward, done, **info):
        self.state = observation
        action = Action(basal=.03, bolus=0)
        return action
    def reset(self):
        self.state = self.init_state

class PIDController(Controller):
    def __init__(self, controller_params, name):
        # patient params, for setting basal
        self.quest = pd.read_csv(CONTROL_QUEST)
        self.patient_params = pd.read_csv(
            PATIENT_PARA_FILE)
        
        self.target = controller_params[0]
        self.lower_bound = controller_params[1]
        self.tau_c = controller_params[2]

        # to begin, values of bg used to calculate dxdt are set to target BG
        self.prev1 = self.target
        self.prev2 = self.target

        ''' basal, PID gains are set patient-to-patient'''
        if any(self.quest.Name.str.match(name)):
            params = self.patient_params[self.patient_params.Name.str.match(name)]
            quest = self.quest[self.quest.Name.str.match(name)]
            self.patient_BW = np.asscalar(params.BW.values)
            self.patient_basal = np.asscalar(params.u2ss.values) * self.patient_BW / 6000
            self.patient_TDI = np.asscalar(quest.TDI.values)

            '''
            Model-Based Personalization Scheme of an Artificial Pancreas for Type 1 Diabetes Applications
            Joon Bok Lee, Eyal Dassau, Senior Member, IEEE, Dale E. Seborg, Member, IEEE, and Francis J. Doyle III*, Fellow, IEEE
            2013 American Control Conference (ACC) Washington, DC, USA, June 17-19, 2013
            '''
            s_fb = 0.5 * self.patient_TDI / 24  # (6)
            c = .0664                           # (5)
            '''
            Guidelines for Optimal Bolus Calculator Settings in Adults
            John Walsh, P.A., Ruth Roberts, M.A.,2 and Timothy Bailey, M.D., FACE, C.P.I.1
            J Diabetes Sci Technol. 2011 Jan; 5(1): 129–135. 
            '''
            k_i = 1960 / self.patient_TDI       # (3),(4)
            K = k_i * c * s_fb                  # (2)
            tau_1 = 247                         # (13)
            tau_2 = 210                         # (14)
            theta = 93.5                        # (12)

            self.k_c = 2 * self.patient_basal * 298/((self.tau_c + 93.5)*1960*.5)            # (22) --> Proportional Gain
            print("k_c: {}".format(self.k_c))
            self.tau_i = 458               # (20) --> Integral Gain
            self.tau_d = 113               # (21) --> Derivative Gain

        else:
            raise LookupError("Invalid patient name.")

        self.ierror = 0


    def policy(self, observation, reward, done, **kwargs):
        sample_time = kwargs.get('sample_time', 1)
        pname = kwargs.get('patient_name')
        action = self._policy(
            pname,
            observation.CGM,
            self.prev1, 
            sample_time)

        # for the derivative
        self.prev1 = observation.CGM
        return action


    def _policy(self, pname, glucose, prev1, env_sample_time):
        error = np.asscalar((glucose - self.target))    # error
        self.ierror += error                            # integral error
        deriv = (glucose - prev1) / env_sample_time     # derivative

        pterm = self.k_c * error
        iterm = self.k_c / self.tau_i * self.ierror
        dterm = self.k_c * self.tau_d * deriv

        bolus = pterm + iterm + dterm
        basal = self.patient_basal
        if bolus + basal < 0:
            bolus = -1 * basal
        return Action(basal=basal, bolus=bolus)

Classes

class BlankController (init_state)
Expand source code
class BlankController(Controller):
    def __init__(self, init_state):
        self.init_state = init_state
        self.state = init_state
    def policy(self, observation, reward, done, **info):
        self.state = observation
        action = Action(basal=.03, bolus=0)
        return action
    def reset(self):
        self.state = self.init_state

Ancestors

  • simglucose.controller.base.Controller

Methods

def policy(self, observation, reward, done, **info)

Every controller must have this implementation!

Inputs: observation - a namedtuple defined in simglucose.simulation.env. It has CHO and CGM two entries. reward - current reward returned by environment done - True, game over. False, game continues info - additional information as key word arguments, simglucose.simulation.env.T1DSimEnv returns patient_name and sample_time


Output: action - a namedtuple defined at the beginning of this file. The controller action contains two entries: basal, bolus

Expand source code
def policy(self, observation, reward, done, **info):
    self.state = observation
    action = Action(basal=.03, bolus=0)
    return action
def reset(self)

Reset the controller state to inital state, must be implemented

Expand source code
def reset(self):
    self.state = self.init_state
class PIDController (controller_params, name)
Expand source code
class PIDController(Controller):
    def __init__(self, controller_params, name):
        # patient params, for setting basal
        self.quest = pd.read_csv(CONTROL_QUEST)
        self.patient_params = pd.read_csv(
            PATIENT_PARA_FILE)
        
        self.target = controller_params[0]
        self.lower_bound = controller_params[1]
        self.tau_c = controller_params[2]

        # to begin, values of bg used to calculate dxdt are set to target BG
        self.prev1 = self.target
        self.prev2 = self.target

        ''' basal, PID gains are set patient-to-patient'''
        if any(self.quest.Name.str.match(name)):
            params = self.patient_params[self.patient_params.Name.str.match(name)]
            quest = self.quest[self.quest.Name.str.match(name)]
            self.patient_BW = np.asscalar(params.BW.values)
            self.patient_basal = np.asscalar(params.u2ss.values) * self.patient_BW / 6000
            self.patient_TDI = np.asscalar(quest.TDI.values)

            '''
            Model-Based Personalization Scheme of an Artificial Pancreas for Type 1 Diabetes Applications
            Joon Bok Lee, Eyal Dassau, Senior Member, IEEE, Dale E. Seborg, Member, IEEE, and Francis J. Doyle III*, Fellow, IEEE
            2013 American Control Conference (ACC) Washington, DC, USA, June 17-19, 2013
            '''
            s_fb = 0.5 * self.patient_TDI / 24  # (6)
            c = .0664                           # (5)
            '''
            Guidelines for Optimal Bolus Calculator Settings in Adults
            John Walsh, P.A., Ruth Roberts, M.A.,2 and Timothy Bailey, M.D., FACE, C.P.I.1
            J Diabetes Sci Technol. 2011 Jan; 5(1): 129–135. 
            '''
            k_i = 1960 / self.patient_TDI       # (3),(4)
            K = k_i * c * s_fb                  # (2)
            tau_1 = 247                         # (13)
            tau_2 = 210                         # (14)
            theta = 93.5                        # (12)

            self.k_c = 2 * self.patient_basal * 298/((self.tau_c + 93.5)*1960*.5)            # (22) --> Proportional Gain
            print("k_c: {}".format(self.k_c))
            self.tau_i = 458               # (20) --> Integral Gain
            self.tau_d = 113               # (21) --> Derivative Gain

        else:
            raise LookupError("Invalid patient name.")

        self.ierror = 0


    def policy(self, observation, reward, done, **kwargs):
        sample_time = kwargs.get('sample_time', 1)
        pname = kwargs.get('patient_name')
        action = self._policy(
            pname,
            observation.CGM,
            self.prev1, 
            sample_time)

        # for the derivative
        self.prev1 = observation.CGM
        return action


    def _policy(self, pname, glucose, prev1, env_sample_time):
        error = np.asscalar((glucose - self.target))    # error
        self.ierror += error                            # integral error
        deriv = (glucose - prev1) / env_sample_time     # derivative

        pterm = self.k_c * error
        iterm = self.k_c / self.tau_i * self.ierror
        dterm = self.k_c * self.tau_d * deriv

        bolus = pterm + iterm + dterm
        basal = self.patient_basal
        if bolus + basal < 0:
            bolus = -1 * basal
        return Action(basal=basal, bolus=bolus)

Ancestors

  • simglucose.controller.base.Controller

Instance variables

var prev2

basal, PID gains are set patient-to-patient

Methods

def policy(self, observation, reward, done, **kwargs)

Every controller must have this implementation!

Inputs: observation - a namedtuple defined in simglucose.simulation.env. It has CHO and CGM two entries. reward - current reward returned by environment done - True, game over. False, game continues info - additional information as key word arguments, simglucose.simulation.env.T1DSimEnv returns patient_name and sample_time


Output: action - a namedtuple defined at the beginning of this file. The controller action contains two entries: basal, bolus

Expand source code
def policy(self, observation, reward, done, **kwargs):
    sample_time = kwargs.get('sample_time', 1)
    pname = kwargs.get('patient_name')
    action = self._policy(
        pname,
        observation.CGM,
        self.prev1, 
        sample_time)

    # for the derivative
    self.prev1 = observation.CGM
    return action