Skip to content
Snippets Groups Projects
Commit 219efbe4 authored by MARMORET Axel's avatar MARMORET Axel
Browse files

Adding a parameter for returning errors (optional now)

parent f1c328f3
No related branches found
No related tags found
No related merge requests found
......@@ -10,7 +10,7 @@ import nn_fac.multilayer_nmf as multi_nmf
import nn_fac.utils.beta_divergence as beta_div
from nn_fac.utils.normalize_wh import normalize_WH
def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop = 100, init = "multilayer_nmf", init_multi_layer = "random", W_0 = None, H_0 = None, delta = 1e-6, tol = 1e-6, verbose = False):
def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop = 100, init = "multilayer_nmf", init_multi_layer = "random", W_0 = None, H_0 = None, delta = 1e-6, tol = 1e-6, return_errors = False, verbose = False):
L = len(all_ranks)
assert L > 1, "The number of layers must be at least 2. Otherwise, ou should just use NMF."
......@@ -23,7 +23,7 @@ def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop
#warnings.warn("Warning: The ranks of deep NMF should be decreasing.")
if init == "multilayer_nmf":
W, H, e = multi_nmf.multilayer_beta_NMF(data, all_ranks, n_iter_max_each_nmf = n_iter_max_each_nmf, init_each_nmf = init_multi_layer, delta = delta, verbose = False)
W, H, e = multi_nmf.multilayer_beta_NMF(data, all_ranks, n_iter_max_each_nmf = n_iter_max_each_nmf, init_each_nmf = init_multi_layer, delta = delta, return_errors = True, verbose = False)
all_errors[0] = e
elif init == "custom":
......@@ -61,10 +61,13 @@ def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop
print(f'Converged in {deep_iteration} iterations.')
break
if return_errors:
return W, H, all_errors, toc
else:
return W, H
def one_step_deep_KL_nmf(data, W, H, all_ranks, lambda_, delta):
# delta is useless here, because we use our own beta_nmf.
L = len(all_ranks)
errors = []
......
......
......@@ -3,8 +3,8 @@ import numpy as np
from nn_fac.nmf import nmf
from nn_fac.utils.normalize_wh import normalize_WH
def multilayer_beta_NMF(data, all_ranks, beta = 1, delta = 1e-6, n_iter_max_each_nmf = 100, init_each_nmf = "nndsvd", verbose = False):
def multilayer_beta_NMF(data, all_ranks, beta = 1, delta = 1e-6, n_iter_max_each_nmf = 100, init_each_nmf = "nndsvd", return_errors = False, verbose = False):
# delta is useless here, because we use our own beta_nmf.
L = len(all_ranks)
assert L > 1, "The number of layers must be at least 2. Otherwise, ou should just use NMF"
if sorted(all_ranks, reverse=True) != all_ranks:
......@@ -22,12 +22,15 @@ def multilayer_beta_NMF(data, all_ranks, beta = 1, delta = 1e-6, n_iter_max_each
if verbose:
print(f'Layer {i} done.')
if return_errors:
return W, H, reconstruction_errors
else:
return W, H
def one_layer_update(data, rank, beta, delta, init_each_nmf, n_iter_max_each_nmf, verbose):
W, H, cost_fct_vals, times = nmf(data, rank, init = init_each_nmf, U_0 = None, V_0 = None, n_iter_max=n_iter_max_each_nmf, tol=1e-8,
update_rule = "mu", beta = beta,
sparsity_coefficients = [None, None], fixed_modes = [], normalize = [False, False],
sparsity_coefficients = [None, None], fixed_modes = [], normalize = [False, True],
verbose=verbose, return_costs=True, deterministic=False)
W_normalized, H_normalized = normalize_WH(W, H, matrix="H")
reconstruction_error = cost_fct_vals[-1]
......
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment