Preliminary ANN preparation with RBM in pylearn2

I am trying to prepare a layered ANN using pylearn2 using pre-training with RBM. I modified a little script called run_deep_trainer , which is contained in the file pylearn2 \ pylearn2 \ scripts \ tutorials \ deep_trainer. I want a 4-layer network, where the first 3 are made with 500 GaussianBinaryRBM , and the last one with a mlp.Softmax layer.

Here's the script I created:

 from pylearn2.models.rbm import GaussianBinaryRBM from pylearn2.models.softmax_regression import SoftmaxRegression from pylearn2.models.mlp import Softmax from pylearn2.training_algorithms.sgd import SGD from pylearn2.costs.autoencoder import MeanSquaredReconstructionError from pylearn2.termination_criteria import EpochCounter from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix from pylearn2.energy_functions.rbm_energy import GRBM_Type_1 from pylearn2.blocks import StackedBlocks from pylearn2.datasets.transformer_dataset import TransformerDataset from pylearn2.costs.ebm_estimation import SMD from pylearn2.training_algorithms.sgd import MonitorBasedLRAdjuster from pylearn2.train import Train from optparse import OptionParser import numpy def get_dataset_timitConsSmall(): print('loading timitConsSmall dataset...') template = \ """!obj:pylearn2.datasets.timitConsSmall.timit.TIMIT { classes_number: 32, which_set: %s, }""" trainset = yaml_parse.load(template % "train") # testset = yaml_parse.load(template % "test") print('...done loading timitConsSmall.') return trainset def get_grbm(structure): n_input, n_output = structure config = { 'nvis': n_input, 'nhid': n_output, "irange": 0.05, "energy_function_class": GRBM_Type_1, "learn_sigma": True, "init_sigma": .4, "init_bias_hid": -2., "mean_vis": False, "sigma_lr_scale": 1e-3 } return GaussianBinaryRBM(**config) def get_logistic_regressor(structure): n_input, n_output = structure layer = SoftmaxRegression(n_classes=n_output, irange=0.02, nvis=n_input) return layer def get_mlp_softmax(structure): n_input, n_output = structure layer = Softmax(n_classes=n_output, irange=0.02, layer_name='y') return layer def get_layer_trainer_softmax(layer, trainset): # configs on sgd config = {'learning_rate': 000.1, 'cost': Default(), 'batch_size': 100, 'monitoring_batches': 10, 'monitoring_dataset': trainset, 'termination_criterion': EpochCounter(max_epochs=MAX_EPOCHS_SUPERVISED), 'update_callbacks': None } train_algo = SGD(**config) model = layer return Train(model=model, dataset=trainset, algorithm=train_algo, extensions=None) def get_layer_trainer_logistic(layer, trainset): # configs on sgd config = {'learning_rate': 0.1, 'cost': Default(), 'batch_size': 10, 'monitoring_batches': 10, 'monitoring_dataset': trainset, 'termination_criterion': EpochCounter(max_epochs=MAX_EPOCHS_SUPERVISED), 'update_callbacks': None } train_algo = SGD(**config) model = layer return Train(model=model, dataset=trainset, algorithm=train_algo, extensions=None) def get_layer_trainer_sgd_rbm(layer, trainset): train_algo = SGD( learning_rate=1e-2, batch_size=100, # "batches_per_iter" : 2000, monitoring_batches=20, monitoring_dataset=trainset, cost=SMD(corruptor=GaussianCorruptor(stdev=0.4)), termination_criterion=EpochCounter(max_epochs=MAX_EPOCHS_UNSUPERVISED), ) model = layer extensions = [MonitorBasedLRAdjuster()] return Train(model=model, algorithm=train_algo, save_path='grbm.pkl', save_freq=1, extensions=extensions, dataset=trainset) def main(args=None): trainset = get_dataset_timitConsSmall() n_output = 32 design_matrix = trainset.get_design_matrix() n_input = design_matrix.shape[1] # build layers layers = [] structure = [[n_input, 500], [500, 500], [500, 500], [500, n_output]] # layer 0: gaussianRBM layers.append(get_grbm(structure[0])) # # layer 1: denoising AE # layers.append(get_denoising_autoencoder(structure[1])) # # layer 2: AE # layers.append(get_autoencoder(structure[2])) # # layer 3: logistic regression used in supervised training # layers.append(get_logistic_regressor(structure[3])) # layer 1: gaussianRBM layers.append(get_grbm(structure[1])) # layer 2: gaussianRBM layers.append(get_grbm(structure[2])) # layer 3: logistic regression used in supervised training # layers.append(get_logistic_regressor(structure[3])) layers.append(get_mlp_softmax(structure[3])) # construct training sets for different layers trainset = [trainset, TransformerDataset(raw=trainset, transformer=layers[0]), TransformerDataset(raw=trainset, transformer=StackedBlocks(layers[0:2])), TransformerDataset(raw=trainset, transformer=StackedBlocks(layers[0:3]))] # construct layer trainers layer_trainers = [] layer_trainers.append(get_layer_trainer_sgd_rbm(layers[0], trainset[0])) # layer_trainers.append(get_layer_trainer_sgd_autoencoder(layers[1], trainset[1])) # layer_trainers.append(get_layer_trainer_sgd_autoencoder(layers[2], trainset[2])) layer_trainers.append(get_layer_trainer_sgd_rbm(layers[1], trainset[1])) layer_trainers.append(get_layer_trainer_sgd_rbm(layers[2], trainset[2])) # layer_trainers.append(get_layer_trainer_logistic(layers[3], trainset[3])) layer_trainers.append(get_layer_trainer_softmax(layers[3], trainset[3])) # unsupervised pretraining for i, layer_trainer in enumerate(layer_trainers[0:3]): print('-----------------------------------') print(' Unsupervised training layer %d, %s' % (i, layers[i].__class__)) print('-----------------------------------') layer_trainer.main_loop() print('\n') print('------------------------------------------------------') print(' Unsupervised training done! Start supervised training...') print('------------------------------------------------------') print('\n') # supervised training layer_trainers[-1].main_loop() if __name__ == '__main__': main() 

He correctly performs the uncontrolled part of the preliminary training, but there is a mistake in the controlled part of the training:

 Traceback (most recent call last): File "run_deep_trainer.py", line 404, in <module> main() File "run_deep_trainer.py", line 400, in main layer_trainers[-1].main_loop() File "/home/gortolan/pylearn2/pylearn2/train.py", line 141, in main_loop self.setup() File "/home/gortolan/pylearn2/pylearn2/train.py", line 121, in setup self.algorithm.setup(model=self.model, dataset=self.dataset) File "/home/gortolan/pylearn2/pylearn2/training_algorithms/sgd.py", line 243, in setup inf_params = [param for param in model.get_params() File "/home/gortolan/pylearn2/pylearn2/models/model.py", line 503, in get_params return list(self._params) AttributeError: 'Softmax' object has no attribute '_params' 

If I use SoftmaxRegression (as a model) in the last layer, which means replacing the get_mlp_softmax() and get_layer_trainer_softmax() get_logistic_regressor() with get_logistic_regressor() and get_layer_trainer_logistic() , everything works fine.

It seems that the mlp.Softmax model mlp.Softmax not return parameters ( _params ) via the get_params() function.

Does anyone know how to fix this?

+5
source share
1 answer

The problem is that SoftmaxRegressor is a model, but Softmax is a layer for MLP . The way to fix this would be something like strings

 def get_mlp_softmax(structure): n_input, n_output = structure layer = MLP(nvis=500, layers=[Softmax(n_classes=n_output, irange=0.02, layer_name='y')]) return layer 

Where MLP is mlp.MLP

+4
source

Source: https://habr.com/ru/post/1236079/


All Articles