Vous êtes sur la page 1sur 8

TENSORFLOW

Es una biblioteca de software de código abierto para el cálculo numérico que utiliza gráficos
de flujo de datos. Los nodos en el gráfico representan operaciones matemáticas, mientras
que los bordes del gráfico representan los conjuntos de datos multidimensionales
(tensores) comunicados entre ellos.
Lo que sucede dentro del código de aprendizaje automático es matemática, ayuda a
organizar esto de una manera que simplifica y mantiene el flujo computacional organizado.
Usaremos tflearn , una capa sobre Tensorflow y, por supuesto, Python. Como
siempre, usaremos la notebook iPython como herramienta para facilitar nuestro trabajo.

CODIGO CHATBOT TENSORFLOW


#Cosas que necesitamos para la PNL

import nltk

from nltk.stem.lancaster import LancasterStemmer

stemmer = LancasterStemmer()

#Cosas que necesitamos para Tensorflow

import numpy as np

import tflearn

import tensorflow as tf

import random

Un marco de chatbot necesita una estructura en la que se definan los intentos de


conversación. Una forma limpia de hacer esto es con un archivo JSON.

{"intents": [

{"tag": "greeting",

"patterns": ["Hi", "How are you", "Is anyone there?", "Hello", "Good day"],

"responses": ["Hello, thanks for visiting", "Good to see you again", "Hi there, how can I help?"],

"context_set": ""

},

{"tag": "goodbye",
"patterns": ["Bye", "See you later", "Goodbye"],

"responses": ["See you later, thanks for visiting", "Have a nice day", "Bye! Come back again
soon."]

},

{"tag": "thanks",

"patterns": ["Thanks", "Thank you", "That's helpful"],

"responses": ["Happy to help!", "Any time!", "My pleasure"]

},

{"tag": "hours",

"patterns": ["What hours are you open?", "What are your hours?", "When are you open?" ],

"responses": ["We're open every day 9am-9pm", "Our hours are 9am-9pm every day"]

},

{"tag": "mopeds",

"patterns": ["Which mopeds do you have?", "What kinds of mopeds are there?", "What do you
rent?" ],

"responses": ["We rent Yamaha, Piaggio and Vespa mopeds", "We have Piaggio, Vespa and
Yamaha mopeds"]

},

{"tag": "payments",

"patterns": ["Do you take credit cards?", "Do you accept Mastercard?", "Are you cash only?" ],

"responses": ["We accept VISA, Mastercard and AMEX", "We accept most major credit cards"]

},

{"tag": "opentoday",

"patterns": ["Are you open today?", "When do you open today?", "What are your hours
today?"],

"responses": ["We're open every day from 9am-9pm", "Our hours are 9am-9pm every day"]

},

{"tag": "rental",

"patterns": ["Can we rent a moped?", "I'd like to rent a moped", "How does this work?" ],

"responses": ["Are you looking to rent today or later this week?"],


"context_set": "rentalday"

},

{"tag": "today",

"patterns": ["today"],

"responses": ["For rentals today please call 1-800-MYMOPED", "Same-day rentals please call
1-800-MYMOPED"],

"context_filter": "rentalday"

words = []

classes = []

documents = []

ignore_words = ['?']

#Bucle a través de cada oración en nuestros patrones de intenciones

for intent in intents['intents']:

for pattern in intent['patterns']:

#Tokenizar cada palabra de la oración

w = nltk.word_tokenize(pattern)

#Añadir a nuestra lista de palabras

words.extend(w)

#Añadir a los documentos en nuestro corpus

documents.append((w, intent['tag']))

#Añadir a nuestra lista de clases

if intent['tag'] not in classes:

classes.append(intent['tag'])

#Detener y bajar cada palabra y eliminar duplicados

words = [stemmer.stem(w.lower()) for w in words if w not in ignore_words]


words = sorted(list(set(words)))

#Eliminar duplicados

classes = sorted(list(set(classes)))

print (len(documents), "documents")

print (len(classes), "classes", classes)

print (len(words), "unique stemmed words", words)

# crea nuestros datos de entrenamiento

training = []

output = []

# crear una matriz vacía para nuestra salida

output_empty = [0] * len(classes)

# Conjunto de entrenamiento, bolsa de palabras para cada oración

for doc in documents:

# inicializar nuestra bolsa de palabras

bag = []

# Lista de palabras tokenizadas para el patrón

pattern_words = doc[0]

# detener cada palabra

pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]

# crea nuestra matriz de bolsa de palabras

for w in words:
bag.append(1) if w in pattern_words else bag.append(0)

# Salida es un '0' para cada etiqueta y '1' para la etiqueta actual

output_row = list(output_empty)

output_row[classes.index(doc[1])] = 1

training.append([bag, output_row])

# baraja nuestras funciones y conviértete en np.array

random.shuffle(training)

training = np.array(training)

# crear listas de trenes y pruebas

train_x = list(training[:,0])

train_y = list(training[:,1])

# restablecer datos subyacentes del gráfico

tf.reset_default_graph()

# Construir red neuronal

net = tflearn.input_data(shape=[None, len(train_x[0])])

net = tflearn.fully_connected(net, 8)

net = tflearn.fully_connected(net, 8)

net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')

net = tflearn.regression(net)

# Definir modelo y configurar tensorboard

model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')

# Comience el entrenamiento (aplique el algoritmo de descenso de gradiente)


model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)

model.save('model.tflearn')

def clean_up_sentence(sentence):

# Tokenizar el patrón

sentence_words = nltk.word_tokenize(sentence)

# detener cada palabra

sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]

return sentence_words

# Conjunto de bolsa de palabras de retorno: 0 o 1 por cada palabra en la bolsa que existe
en la oración

def bow(sentence, words, show_details=False):

# Tokenizar el patrón

sentence_words = clean_up_sentence(sentence)

# Bolsa de palabras

bag = [0]*len(words)

for s in sentence_words:

for i,w in enumerate(words):

if w == s:

bag[i] = 1

if show_details:

print ("found in bag: %s" % w)

return(np.array(bag))

p = bow("is your shop open today?", words)

print (p)
print (classes)

print(model.predict([p]))

# guardar todas nuestras estructuras de datos

import pickle

pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open(


"training_data", "wb" )
BIBLIOGRAFIA
WEB

https://chatbotsmagazine.com/contextual-chat-bots-with-tensorflow-4391749d0077

Vous aimerez peut-être aussi