Ejemplo modelo difuso de tipo Mamdani

!pip install ipywidgets
!pip install ipympl
import os
import numpy as np

import ipywidgets as widgets

%matplotlib widget
import matplotlib.pyplot as plt

def gauss_mf(x, a, b):
  return np.exp( -np.power(x-a,2)/(b**2) )

def singleton_mf(x, a):
  y = np.zeros_like(x)
  y[np.argmin( np.abs(x-a) )] = 1
  return y

def tri_mf(x, a, b, c):
  t1 = (x-a) / (b-a)
  t2 = (c-x) / (c-b)
  if hasattr(x, "__len__"):
    return np.fmax( np.fmin( t1, t2 ), np.zeros_like(t1) )
  else:
    y = min( t1, t2 )
    return max( y, 0 )

def T_min(a,b):
  if hasattr(a, "__len__"):
    return np.fmin( a, b )
  else:
    return min( a, b )

def S_max(a, b):
  if hasattr(a, "__len__"):
    return np.fmax( a, b )
  else:
    return max( a, b )

def T_proba(a,b):
  return np.multiply(a, b)

def S_proba(a,b):
  return a + b - np.multiply(a, b)

def I_mamdani(a,b):
  if hasattr(a, "__len__"):
    return np.fmin( a, b )
  else:
    return min( a, b )
  

def plot_rules(reglas, ps, mfs, U, show=True):

    n_rules = len(reglas)
    n_lterms_premisa = len(reglas[0]['premisa'])
    n_lterms_consecuente = len(reglas[0]['consecuente'])
    n_lterms = n_lterms_premisa + n_lterms_consecuente

    fig, axs = plt.subplots(figsize=(10,8), nrows=n_rules, ncols=n_lterms)
    axs_dict = {}

    for r in range(n_rules):
      
      col = 0
      
      for c, lterms in [('r', reglas[r]['premisa']), ('b', reglas[r]['consecuente'])]:
        for lt, (lvar, lterm) in enumerate(lterms):
          if n_rules == 1:ax = axs[col]
          else: ax = axs[r, col]
          
          if (r+1, lvar) not in axs_dict:
              axs_dict[(r+1, lvar)] = ax

          mf_func = mfs[lvar][lterm]
          p = ps[lvar][lterm]
          x = U[lvar]
          mu = mf_func(x, *p)

          axs_dict[(r+1, lvar)].plot(x, mu, linewidth=4, color=c, label=lterm)
          axs_dict[(r+1, lvar)].set_xlabel(lvar)
          axs_dict[(r+1, lvar)].set_ylabel('Grado de pertenencia')
          axs_dict[(r+1, lvar)].set_xlim(np.min(x), np.max(x))
          axs_dict[(r+1, lvar)].set_ylim(0, 1)
          col += 1

    fig.tight_layout()
    
    print(axs_dict)
    if show == False:
        return axs_dict


def get_U(lvars, N = 1000):
    U = {}
    for lvar in lvars:
        if lvar not in U:
            U[lvar] = []
        for tl in lvars[lvar]:
            U[lvar] += [lvars[lvar][tl][0]]
    #print('Universos de discurso:')
    for lvar in U:
        U[lvar] = [min(U[lvar]), max(U[lvar])]
        #print(lvar, [min(U[lvar]), max(U[lvar])])
        U[lvar] = np.linspace(U[lvar][0], U[lvar][1], N)
    return U, N

def mk_axes(reglas, ps, mfs, U):

    n_rules = len(reglas)
    n_lterms_premisa = len(reglas[0]['premisa'])
    n_lterms_consecuente = len(reglas[0]['consecuente'])
    n_lterms = n_lterms_premisa + n_lterms_consecuente

    fig, axs = plt.subplots(figsize=(5,4), nrows=n_rules, ncols=n_lterms)
    axs_dict = {}

    fcol = ''

    for r in range(n_rules):      
      col = 0
      for lterms in [reglas[r]['premisa'], reglas[r]['consecuente']]:
        for lt, (lvar, lterm) in enumerate(lterms):

          if len(fcol) == 0: fcol = lvar

          if n_rules == 1:ax = axs[col]
          else: ax = axs[r, col]          

          if (r+1, lvar) not in axs_dict:
              axs_dict[(r+1, lvar)] = ax
          col += 1
    
    return fig, axs_dict, fcol

Modelo Mamdani

# definición de MFs de los términos linguísticos
mf_premisa = {'suciedad': {'bajo': gauss_mf, 'medio': gauss_mf, 'alto': gauss_mf},
              'masa': {'bajo': gauss_mf, 'medio': gauss_mf, 'alto': gauss_mf}}
mf_consecuente = {'rpm': {'bajo': gauss_mf, 'medio': gauss_mf, 'alto': gauss_mf},
                  'tiempo secado': {'bajo': gauss_mf, 'medio': gauss_mf, 'alto': gauss_mf}}
mfs = {**mf_premisa, **mf_consecuente} # concatenación de MFs de premisas y consecuentes

# definición de parámetros de las MFs
p_premisa = {'suciedad': {'bajo': [0,15], 'medio': [25, 15], 'alto': [50, 15]},
             'masa': {'bajo': [0,2], 'medio': [4.5, 2], 'alto': [9, 2]}}
p_consecuente = {'rpm': {'bajo': [1000,100], 'medio': [1500, 100], 'alto': [2000, 100]},
                 'tiempo secado': {'bajo': [0,0.5], 'medio': [1.5, 0.5], 'alto': [3, 0.5]}}
p =  {**p_premisa, **p_consecuente} # concatenación de parámetros de MFs de premisas y consecuentes


U, N = get_U( p ) # determina los universos de discurso desde p

reglas = [
            {'premisa': [('suciedad', 'bajo'), ('masa', 'bajo')],
                    'consecuente': [('rpm', 'bajo'), ('tiempo secado', 'bajo')]},
            {'premisa': [('suciedad', 'medio'), ('masa', 'medio')],
                    'consecuente': [('rpm', 'medio'), ('tiempo secado', 'medio')]},
            {'premisa': [('suciedad', 'alto'), ('masa', 'alto')],
                    'consecuente': [('rpm', 'alto'), ('tiempo secado', 'alto')]}
          ]

U_p, _ = get_U( p_premisa ) # determina los universos de discurso desde p
tnorm = T_min
tconorm = S_max
imp = I_mamdani


fig, ax, fcol = mk_axes(reglas, p, mfs, U)

hecho_0 = lambda r: ( np.min(r),
                     np.max(r),
                      (r.max() - r.min())/20,
                       (np.max(r)-np.min(r))*np.random.rand() + np.min(r))

U_p, _ = get_U(p_premisa)
slider_data = {lvar:hecho_0(U[lvar]) for lvar in U_p}

hecho = {lvar:slider_data[lvar][3] for lvar in U_p}
sliders = {lvar:widgets.FloatSlider(min=r[0],
                                    max=r[1],
                                    step=r[2],
                                    value=r[3],
                                    description=lvar) for lvar, r in slider_data.items()}


def update(**U_p):
  global mu_agg
  hecho = {}
  for lvar, valor in U_p.items():
    hecho[lvar] = valor
    #print(lvar, valor)

  alphas = {} # para los niveles de activación de las reglas
  mu_agg = {}
  n_rules = len(reglas)
  resultado = {lvar:np.zeros_like( U[lvar] ) for lvar in p_consecuente}
  N = 1000

  for r in range(n_rules):
    for lvar, lterm in reglas[r]['premisa']:

      mf_func = mfs[lvar][lterm]
      p_ = p[lvar][lterm]
      x = U[lvar]
      mu_ = mf_func(x, *p_)

      mu_hecho = singleton_mf(x, *[hecho[ lvar ]])
      mu_hecho_inter = T_min(mu_hecho, mu_)

      if r not in alphas: alphas[r] = 1
      alphas[r] = np.fmin(alphas[r], np.max(mu_hecho_inter))

      line1[(r+1, lvar)].set_ydata(mu_hecho)
      line2[(r+1, lvar)].set_ydata(mu_hecho_inter)

    text[(r+1, fcol)].set_label(f'alpha {r+1}: {alphas[r]:.2f}')

    for lvar, lterm in reglas[r]['consecuente']:

      #line1[(r+1, lvar)].remove()

      p_ = p[lvar][lterm]
      y = U[lvar]
      mu_consecuente = mfs[lvar][lterm](y, *p_)
      mu_implicacion = I_mamdani(alphas[r] * np.ones_like(y), mu_consecuente)
      if lvar not in mu_agg:
          mu_agg[lvar] = np.zeros_like( y )
      mu_agg[lvar] = np.fmax(mu_agg[lvar], mu_implicacion)

      line1[(r+1, lvar)].set_ydata(mu_implicacion)

    fig.canvas.draw()

widgets.interact(update, **sliders)


n_rules = len(reglas)
resultado = {lvar:np.zeros_like( U[lvar] ) for lvar in p_consecuente}
N = 1000

line1 = {}
line2 = {}
text = {}
alphas = {} # para los niveles de activación de las reglas
mu_agg = {}

for r in range(n_rules):

    for lvar, lterm in reglas[r]['premisa']:

        mf_func = mfs[lvar][lterm]
        p_ = p[lvar][lterm]
        x = U[lvar]
        mu_ = mf_func(x, *p_)

        mu_hecho = singleton_mf(x, *[hecho[ lvar ]])
        mu_hecho_inter = T_min(mu_hecho, mu_)

        if r not in alphas:
          alphas[r] = 1
        alphas[r] = np.fmin(alphas[r], np.max(mu_hecho_inter))

        c = 'r'
        ax[(r+1, lvar)].plot(x, mu_, linewidth=4, color=c, label=lterm)
        if r != n_rules-1: ax[(r+1, lvar)].set_xticks([])
        if lvar != fcol: ax[(r+1, lvar)].set_yticks([])
        if r == n_rules-1: ax[(r+1, lvar)].set_xlabel(lvar)
        #ax[(r+1, lvar)].set_ylabel('Grado de pertenencia')
        ax[(r+1, lvar)].set_xlim(np.min(x), np.max(x))
        ax[(r+1, lvar)].set_ylim(0, 1)
        line1[(r+1, lvar)], = ax[(r+1, lvar)].plot(x, mu_hecho, linewidth=4, color='c', label='hecho')
        line2[(r+1, lvar)], = ax[(r+1, lvar)].plot(x, mu_hecho_inter, linewidth=4, color='y', label='hecho')

    text[(r+1, fcol)] = ax[(r+1, fcol)].set_title( f'alpha {r+1}: {alphas[r]:.2f}' )

    for lvar, lterm in reglas[r]['consecuente']:

        p_ = p[lvar][lterm]
        y = U[lvar]
        mu_consecuente = mfs[lvar][lterm](y, *p_)
        mu_implicacion = I_mamdani(alphas[r] * np.ones_like(y), mu_consecuente)
        if lvar not in mu_agg: mu_agg[lvar] = np.zeros_like( y )
        mu_agg[lvar] = np.fmax(mu_agg[lvar], mu_implicacion)

        c = 'b'
        ax[(r+1, lvar)].plot(y, mu_consecuente, linewidth=4, color=c, label=lterm)

        if r != n_rules-1: ax[(r+1, lvar)].set_xticks([])
        if lvar != fcol: ax[(r+1, lvar)].set_yticks([])
        if r == n_rules-1: ax[(r+1, lvar)].set_xlabel(lvar)

        ax[(r+1, lvar)].set_xlim(np.min(y), np.max(y))
        ax[(r+1, lvar)].set_ylim(0, 1)
        line1[(r+1, lvar)], = ax[(r+1, lvar)].plot(y, mu_implicacion, linewidth=4, color='c', label=f'resultado regla {r+1}')

fig.tight_layout()

fig, axs = plt.subplots(figsize=(6,5), nrows=len(mu_agg), ncols=1)

for i, lvar in enumerate(mu_agg):
  for lterm in p_consecuente[lvar]:
    ax = axs[i]
    p_ = p[lvar][lterm]
    y = U[lvar]
    mu_consecuente = mfs[lvar][lterm](y, *p_)

    ax.plot(y, mu_consecuente, linewidth=4, color='b', alpha=0.3, label=lterm)

for i, lvar in enumerate(mu_agg):
  ax = axs[i]
  y = U[lvar]
  y_cgrav = np.trapz( np.multiply(mu_agg[lvar], y), y ) / np.trapz( mu_agg[lvar], y )

  ax.fill_between(y, mu_agg[lvar], color='c', alpha=0.5)
  ax.plot(y, mu_agg[lvar], linewidth=4, color='c', label=lvar)
  ax.set_xlim(y[0], y[-1])
  ax.set_ylim(0, 1)
  ax.set_title( f'{lvar} = {y_cgrav: .2f}' )

fig.tight_layout()