





Prepara tus exámenes y mejora tus resultados gracias a la gran cantidad de recursos disponibles en Docsity
Gana puntos ayudando a otros estudiantes o consíguelos activando un Plan Premium
Prepara tus exámenes
Prepara tus exámenes y mejora tus resultados gracias a la gran cantidad de recursos disponibles en Docsity
Prepara tus exámenes con los documentos que comparten otros estudiantes como tú en Docsity
Los mejores documentos en venta realizados por estudiantes que han terminado sus estudios
Estudia con lecciones y exámenes resueltos basados en los programas académicos de las mejores universidades
Responde a preguntas de exámenes reales y pon a prueba tu preparación
Consigue puntos base para descargar
Gana puntos ayudando a otros estudiantes o consíguelos activando un Plan Premium
Comunidad
Pide ayuda a la comunidad y resuelve tus dudas de estudio
Descubre las mejores universidades de tu país según los usuarios de Docsity
Ebooks gratuitos
Descarga nuestras guías gratuitas sobre técnicas de estudio, métodos para controlar la ansiedad y consejos para la tesis preparadas por los tutores de Docsity
Informe de laboratorio 7 de la materia Procesamiento Digital de Imágenes.
Tipo: Ejercicios
1 / 9
Esta página no es visible en la vista previa
¡No te pierdas las partes importantes!
1 def train ( self , X_train , y_train , X_val , y_val , batch_size =32 , num_epochs =5 , learning_rate =5 e -3) : 2 num_batches_per_epoch = len ( X_train ) // batch_size 3 4 5 accuracy_val , accuracy_train = [] , [] 6 loss_val , loss_train = [] , [] 7 8 y_train_OH = np. eye ( num_classes ) [ y_train ] 9 y_val_OH = np. eye ( num_classes ) [ y_val ] 10 11 12 for i in range ( num_epochs ) : 13 epoch_loss = 14 epoch_loss_val = 0 15 for b in range ( num_batches_per_epoch ) : 16 b_idx = b * batch_size 17 b_idx_e = b_idx + batch_size 18 19 x , y_true = X_train [ b_idx : b_idx_e ] , y_train_OH [ b_idx : b_idx_e ] 20 y = self. forward ( x ) 21 epoch_loss += self. loss_fn (y , y_true ) 22 23 dL_dy = self. d_loss_fn (y , y_true ) 24 self. backward ( dL_dy ) 25 self. optimize ( learning_rate ) 26 27 y_results_val = self. forward ( X_val ) 28 epoch_loss_val = self. loss_fn ( y_results_val , y_val_OH ) 29 30 31 loss_train. append ( epoch_loss / num_batches_per_epoch ) 32 loss_val. append ( epoch_loss_val ) 33 34 accuracy_train. append ( self. evaluate_accuracy ( X_train , y_train ) ) 35 accuracy_val. append ( self. evaluate_accuracy ( X_val , y_val ) ) 36 37 38 for i in range ( num_epochs ) : 39 print ( " Epoch {:4 d }: Loss_train = {:.6 f } | Accuracy_train {:.2 f }% ". format (i , loss_train [ i ] , accuracy_train [ i ]100) ) 40 print ( " Epoch {:4 d }: Loss_val = {:.6 f } | Accuracy_val {:.2 f }% ". format (i , loss_val [ i ] , accuracy_val [ i ]100) ) 41 print ( ’ - ’ *50)
Z K
1 def softmax ( x ) : 2 return ( np. exp ( x ) ) /( sum ( np. exp ( x ) ) ) 3 4 def derivate_softmax ( y ) : 5 return ( y *( np. ones ( np. array ( y ). shape ) -y ) )
1 class SimpleNetwork ( object ) : 2 def init ( self , num_inputs , num_outputs , hidden_layers_sizes =(64 ,32) , loss_fn = loss_L2 , d_loss_fn = derivated_loss_L2 ) : 3 super (). init () 4 sizes =[ num_inputs ,* hidden_layers_sizes , num_outputs ] 5 self. loss_fn , self. d_loss_fn = loss_fn , d_loss_fn 6 # self. layers = [ FullyConnectedLayer ( sizes [ i ] , sizes [ i +1] , sigmoid ) for i in range ( len ( sizes ) -1) ] 7 self. layers =[] 8 for i in range ( len ( size ) -1) : 9 self. layers. append ( FullyConnectedLayer ( size [ i ] , sizes [ i +1] , sigmoid ) ) 10 if i == len ( sizes ) -1: 11 self. layers. append ( FullyConnectedLayer ( sizes [10] , sizes [10] , softmax , derivate_softmax ) )
1 class SimpleNetwork ( object ) : 2 3 # Activation functions and their derivatives are defined 4 5 def sigmoid ( x ) : 6 return 1/(1+ np. exp ( - x ) ) 7 8 def derivated_sigmoid ( y ) : 9 return y *(1 - y ) 10 11 def tanh ( x ) : 12 return np. tanh ( x ) 13 14 def derivated_tanh ( y ) : 15 return 1 - np. tanh ( y ) ** 16 17 def relu ( x ) : 18 return np. maximum (0 , x ) 19 20 def derivated_relu ( y ) : 21 return [[1 if element >= 0 else 0 for element in row ] for row in y ] 22 23 24 # # Create and array of dicts with activation functions 25 sigmoid_fn = dict ( name = " sigmoid " , fn = sigmoid , dv = derivated_sigmoid ) 26 tanh_fn = dict ( name = " tanh " , fn = tanh , dv = derivated_tanh ) 27 relu_fn = dict ( name = " relu " , fn = relu , dv = derivated_relu ) 28 29 functions = [ sigmoid_fn , tanh_fn , relu_fn ] 30 31 32 # # Create a function to find activation and derivated_function by name 33 def get_functions ( name ) : 34 find_functions = next (( dictionary for dictionary in SimpleNetwork. functions if dictionary [ " name " ] == name ) , None ) 35 return [ find_functions [ ’ fn ’] , find_functions [ ’ dv ’ ]] if find_functions else [ sigmoid , derivated_sigmoid ] 36 37 ....
1 2 def init ( self , 3 num_inputs ,
4 num_outputs ,
5 hidden_layers_sizes ,
6 layers_activation_functions ,
7 loss_fn = loss_L2 ,
8 d_loss_fn = derivated_loss_L2 ,
9 ) :
1 def optimize ( self , epsilon ) : 2 for layer in range ( len ( self. layers ) ) : 3 if ( not ( layer in self. not_w ) ) : 4 self. layers [ layer ]. optimize ( epsilon )
1 mnist_classifier = SimpleNetwork ( capas , not_pesos =[])
1 2 def train ( self , X_train , y_train , X_val , y_val , batch_size =32 , num_epochs =5 , learning_rate =5 e -3) : 3 num_batches_per_epoch = len ( X_train ) // batch_size 4 self. num_epochs = num_epochs 5 6 7 self. accuracy_val , self. accuracy_train = [] , [] 8 self. loss_val , self. loss_train = [] , [] 9 10 y_train_OH = np. eye ( num_classes ) [ y_train ] 11 y_val_OH = np. eye ( num_classes ) [ y_val ] 12 13 14 for i in range ( num_epochs ) : 15 epoch_loss = 16 epoch_loss_val = 0 17 for b in range ( num_batches_per_epoch ) : 18 b_idx = b * batch_size 19 b_idx_e = b_idx + batch_size 20 21 x , y_true = X_train [ b_idx : b_idx_e ] , y_train_OH [ b_idx : b_idx_e ] 22 y = self. forward ( x ) 23 epoch_loss += self. loss_fn (y , y_true ) 24 25 dL_dy = self. d_loss_fn (y , y_true ) 26 self. backward ( dL_dy ) 27 self. optimize ( learning_rate ) 28 29 30 31 32 y_results_val = self. forward ( X_val ) 33 epoch_loss_val = self. loss_fn ( y_results_val , y_val_OH ) 34 35 36 self. loss_train. append ( epoch_loss / num_batches_per_epoch ) 37 self. loss_val. append ( epoch_loss_val ) 38 39 self. accuracy_train. append ( self. evaluate_accuracy ( X_train , y_train ) ) 40 self. accuracy_val. append ( self. evaluate_accuracy ( X_val , y_val ) ) 41 42 self. showMetrics () 43 44 def showMetrics ( self ) : 45 for i in range ( self. num_epochs ) : 46 print (" Epoch {:4 d }: Loss_train = {:.6 f } | Accuracy_train {:.2 f } %". format (i , self. loss_train [ i ] , self. accuracy_train [ i ]100) ) 47 print (" Epoch {:4 d }: Loss_val = {:.6 f } | Accuracy_val {:.2 f } %". format (i , self. loss_val [ i ] , self. accuracy_val [ i ]100) ) 48 print ( ’ - ’ *50)