Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • master
  • v0.2.0
  • v0.2.1
3 results

Target

Select target project
  • a23marmo/nonnegative-factorization
1 result
Select Git revision
  • master
  • v0.2.0
  • v0.2.1
3 results
Show changes
Commits on Source (2)
......@@ -10,7 +10,7 @@ import nn_fac.multilayer_nmf as multi_nmf
import nn_fac.utils.beta_divergence as beta_div
from nn_fac.utils.normalize_wh import normalize_WH
def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop = 100, init = "multilayer_nmf", init_multi_layer = "random", W_0 = None, H_0 = None, delta = 1e-6, tol = 1e-6, verbose = False):
def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop = 100, init = "multilayer_nmf", init_multi_layer = "random", W_0 = None, H_0 = None, delta = 1e-6, tol = 1e-6, return_errors = False, verbose = False):
L = len(all_ranks)
assert L > 1, "The number of layers must be at least 2. Otherwise, ou should just use NMF."
......@@ -23,7 +23,7 @@ def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop
#warnings.warn("Warning: The ranks of deep NMF should be decreasing.")
if init == "multilayer_nmf":
W, H, e = multi_nmf.multilayer_beta_NMF(data, all_ranks, n_iter_max_each_nmf = n_iter_max_each_nmf, init_each_nmf = init_multi_layer, delta = delta, verbose = False)
W, H, e = multi_nmf.multilayer_beta_NMF(data, all_ranks, n_iter_max_each_nmf = n_iter_max_each_nmf, init_each_nmf = init_multi_layer, delta = delta, return_errors = True, verbose = False)
all_errors[0] = e
elif init == "custom":
......@@ -60,11 +60,14 @@ def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop
if verbose:
print(f'Converged in {deep_iteration} iterations.')
break
return W, H, all_errors, toc
if return_errors:
return W, H, all_errors, toc
else:
return W, H
def one_step_deep_KL_nmf(data, W, H, all_ranks, lambda_, delta):
# delta is useless here, because we use our own beta_nmf.
L = len(all_ranks)
errors = []
......
......@@ -3,8 +3,8 @@ import numpy as np
from nn_fac.nmf import nmf
from nn_fac.utils.normalize_wh import normalize_WH
def multilayer_beta_NMF(data, all_ranks, beta = 1, delta = 1e-6, n_iter_max_each_nmf = 100, init_each_nmf = "nndsvd", verbose = False):
def multilayer_beta_NMF(data, all_ranks, beta = 1, delta = 1e-6, n_iter_max_each_nmf = 100, init_each_nmf = "nndsvd", return_errors = False, verbose = False):
# delta is useless here, because we use our own beta_nmf.
L = len(all_ranks)
assert L > 1, "The number of layers must be at least 2. Otherwise, ou should just use NMF"
if sorted(all_ranks, reverse=True) != all_ranks:
......@@ -22,12 +22,15 @@ def multilayer_beta_NMF(data, all_ranks, beta = 1, delta = 1e-6, n_iter_max_each
if verbose:
print(f'Layer {i} done.')
return W, H, reconstruction_errors
if return_errors:
return W, H, reconstruction_errors
else:
return W, H
def one_layer_update(data, rank, beta, delta, init_each_nmf, n_iter_max_each_nmf, verbose):
W, H, cost_fct_vals, times = nmf(data, rank, init = init_each_nmf, U_0 = None, V_0 = None, n_iter_max=n_iter_max_each_nmf, tol=1e-8,
update_rule = "mu", beta = beta,
sparsity_coefficients = [None, None], fixed_modes = [], normalize = [False, False],
sparsity_coefficients = [None, None], fixed_modes = [], normalize = [False, True],
verbose=verbose, return_costs=True, deterministic=False)
W_normalized, H_normalized = normalize_WH(W, H, matrix="H")
reconstruction_error = cost_fct_vals[-1]
......
......@@ -7,7 +7,8 @@ def deep_KL_mu(W_Lm1, W_L, H_L, WH_Lp1, lambda_):
ONES = np.ones_like(W_Lm1)
a = ONES @ H_L.T - lambda_ * np.log(WH_Lp1)
b = W_L * ((W_Lm1 / (W_L @ H_L)) @ H_L.T)
W_L = np.maximum(eps, (1/lambda_ * b) / (lambertw(b * np.exp(a/lambda_) / lambda_, k=0).real))
lambert = lambertw(b * np.exp(a/lambda_) / lambda_, k=0).real
W_L = np.maximum(eps, (1/lambda_ * b) / (lambert + eps))
return W_L