diff --git a/machine_learning/course2/assignment4/a4_init.m b/machine_learning/course2/assignment4/a4_init.m new file mode 100644 index 0000000..1555639 --- /dev/null +++ b/machine_learning/course2/assignment4/a4_init.m @@ -0,0 +1,27 @@ +global randomness_source +load a4_randomness_source + +global data_sets +temp = load('data_set'); % same as in PA3 +data_sets = temp.data; + +global report_calls_to_sample_bernoulli +report_calls_to_sample_bernoulli = false; + +test_rbm_w = a4_rand([100, 256], 0) * 2 - 1; +small_test_rbm_w = a4_rand([10, 256], 0) * 2 - 1; + +temp = extract_mini_batch(data_sets.training, 1, 1); +data_1_case = sample_bernoulli(temp.inputs); +temp = extract_mini_batch(data_sets.training, 100, 10); +data_10_cases = sample_bernoulli(temp.inputs); +temp = extract_mini_batch(data_sets.training, 200, 37); +data_37_cases = sample_bernoulli(temp.inputs); + +test_hidden_state_1_case = sample_bernoulli(a4_rand([100, 1], 0)); +test_hidden_state_10_cases = sample_bernoulli(a4_rand([100, 10], 1)); +test_hidden_state_37_cases = sample_bernoulli(a4_rand([100, 37], 2)); + +report_calls_to_sample_bernoulli = true; + +clear temp; diff --git a/machine_learning/course2/assignment4/a4_main.m b/machine_learning/course2/assignment4/a4_main.m new file mode 100644 index 0000000..ce0cb26 --- /dev/null +++ b/machine_learning/course2/assignment4/a4_main.m @@ -0,0 +1,67 @@ +% This file was published on Wed Nov 14 20:48:30 2012, UTC. + +function a4_main(n_hid, lr_rbm, lr_classification, n_iterations) +% first, train the rbm + global report_calls_to_sample_bernoulli + report_calls_to_sample_bernoulli = false; + global data_sets + if prod(size(data_sets)) ~= 1, + error('You must run a4_init before you do anything else.'); + end + rbm_w = optimize([n_hid, 256], ... + @(rbm_w, data) cd1(rbm_w, data.inputs), ... % discard labels + data_sets.training, ... + lr_rbm, ... + n_iterations); + % rbm_w is now a weight matrix of by + show_rbm(rbm_w); + input_to_hid = rbm_w; + % calculate the hidden layer representation of the labeled data + hidden_representation = logistic(input_to_hid * data_sets.training.inputs); + % train hid_to_class + data_2.inputs = hidden_representation; + data_2.targets = data_sets.training.targets; + hid_to_class = optimize([10, n_hid], @(model, data) classification_phi_gradient(model, data), data_2, lr_classification, n_iterations); + % report results + for data_details = reshape({'training', data_sets.training, 'validation', data_sets.validation, 'test', data_sets.test}, [2, 3]), + data_name = data_details{1}; + data = data_details{2}; + hid_input = input_to_hid * data.inputs; % size: