Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
N
Nonnegative Factorization
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
MARMORET Axel
Nonnegative Factorization
Compare revisions
f1c328f35792f41d7d05470679fea860a61264b8 to 418f71405012743cbe9ac6327b98474778359815
Compare revisions
Changes are shown as if the
source
revision was being merged into the
target
revision.
Learn more about comparing revisions.
Source
a23marmo/nonnegative-factorization
Select target project
No results found
418f71405012743cbe9ac6327b98474778359815
Select Git revision
Branches
master
Tags
v0.2.0
v0.2.1
3 results
Swap
Target
a23marmo/nonnegative-factorization
Select target project
a23marmo/nonnegative-factorization
1 result
f1c328f35792f41d7d05470679fea860a61264b8
Select Git revision
Branches
master
Tags
v0.2.0
v0.2.1
3 results
Show changes
Only incoming changes from source
Include changes to target since source was created
Compare
Commits on Source (2)
Adding a parameter for returning errors (optional now)
· 219efbe4
MARMORET Axel
authored
1 year ago
219efbe4
Adding a small parameter to the divide in deep mu to avoid numerical instability.
· 418f7140
MARMORET Axel
authored
1 year ago
418f7140
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
nn_fac/deep_nmf.py
+8
-5
8 additions, 5 deletions
nn_fac/deep_nmf.py
nn_fac/multilayer_nmf.py
+7
-4
7 additions, 4 deletions
nn_fac/multilayer_nmf.py
nn_fac/update_rules/deep_mu.py
+2
-1
2 additions, 1 deletion
nn_fac/update_rules/deep_mu.py
with
17 additions
and
10 deletions
nn_fac/deep_nmf.py
View file @
418f7140
...
...
@@ -10,7 +10,7 @@ import nn_fac.multilayer_nmf as multi_nmf
import
nn_fac.utils.beta_divergence
as
beta_div
from
nn_fac.utils.normalize_wh
import
normalize_WH
def
deep_KL_NMF
(
data
,
all_ranks
,
n_iter_max_each_nmf
=
100
,
n_iter_max_deep_loop
=
100
,
init
=
"
multilayer_nmf
"
,
init_multi_layer
=
"
random
"
,
W_0
=
None
,
H_0
=
None
,
delta
=
1e-6
,
tol
=
1e-6
,
verbose
=
False
):
def
deep_KL_NMF
(
data
,
all_ranks
,
n_iter_max_each_nmf
=
100
,
n_iter_max_deep_loop
=
100
,
init
=
"
multilayer_nmf
"
,
init_multi_layer
=
"
random
"
,
W_0
=
None
,
H_0
=
None
,
delta
=
1e-6
,
tol
=
1e-6
,
return_errors
=
False
,
verbose
=
False
):
L
=
len
(
all_ranks
)
assert
L
>
1
,
"
The number of layers must be at least 2. Otherwise, ou should just use NMF.
"
...
...
@@ -23,7 +23,7 @@ def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop
#warnings.warn("Warning: The ranks of deep NMF should be decreasing.")
if
init
==
"
multilayer_nmf
"
:
W
,
H
,
e
=
multi_nmf
.
multilayer_beta_NMF
(
data
,
all_ranks
,
n_iter_max_each_nmf
=
n_iter_max_each_nmf
,
init_each_nmf
=
init_multi_layer
,
delta
=
delta
,
verbose
=
False
)
W
,
H
,
e
=
multi_nmf
.
multilayer_beta_NMF
(
data
,
all_ranks
,
n_iter_max_each_nmf
=
n_iter_max_each_nmf
,
init_each_nmf
=
init_multi_layer
,
delta
=
delta
,
return_errors
=
True
,
verbose
=
False
)
all_errors
[
0
]
=
e
elif
init
==
"
custom
"
:
...
...
@@ -60,11 +60,14 @@ def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop
if
verbose
:
print
(
f
'
Converged in
{
deep_iteration
}
iterations.
'
)
break
return
W
,
H
,
all_errors
,
toc
if
return_errors
:
return
W
,
H
,
all_errors
,
toc
else
:
return
W
,
H
def
one_step_deep_KL_nmf
(
data
,
W
,
H
,
all_ranks
,
lambda_
,
delta
):
# delta is useless here, because we use our own beta_nmf.
L
=
len
(
all_ranks
)
errors
=
[]
...
...
This diff is collapsed.
Click to expand it.
nn_fac/multilayer_nmf.py
View file @
418f7140
...
...
@@ -3,8 +3,8 @@ import numpy as np
from
nn_fac.nmf
import
nmf
from
nn_fac.utils.normalize_wh
import
normalize_WH
def
multilayer_beta_NMF
(
data
,
all_ranks
,
beta
=
1
,
delta
=
1e-6
,
n_iter_max_each_nmf
=
100
,
init_each_nmf
=
"
nndsvd
"
,
verbose
=
False
):
def
multilayer_beta_NMF
(
data
,
all_ranks
,
beta
=
1
,
delta
=
1e-6
,
n_iter_max_each_nmf
=
100
,
init_each_nmf
=
"
nndsvd
"
,
return_errors
=
False
,
verbose
=
False
):
# delta is useless here, because we use our own beta_nmf.
L
=
len
(
all_ranks
)
assert
L
>
1
,
"
The number of layers must be at least 2. Otherwise, ou should just use NMF
"
if
sorted
(
all_ranks
,
reverse
=
True
)
!=
all_ranks
:
...
...
@@ -22,12 +22,15 @@ def multilayer_beta_NMF(data, all_ranks, beta = 1, delta = 1e-6, n_iter_max_each
if
verbose
:
print
(
f
'
Layer
{
i
}
done.
'
)
return
W
,
H
,
reconstruction_errors
if
return_errors
:
return
W
,
H
,
reconstruction_errors
else
:
return
W
,
H
def
one_layer_update
(
data
,
rank
,
beta
,
delta
,
init_each_nmf
,
n_iter_max_each_nmf
,
verbose
):
W
,
H
,
cost_fct_vals
,
times
=
nmf
(
data
,
rank
,
init
=
init_each_nmf
,
U_0
=
None
,
V_0
=
None
,
n_iter_max
=
n_iter_max_each_nmf
,
tol
=
1e-8
,
update_rule
=
"
mu
"
,
beta
=
beta
,
sparsity_coefficients
=
[
None
,
None
],
fixed_modes
=
[],
normalize
=
[
False
,
Fals
e
],
sparsity_coefficients
=
[
None
,
None
],
fixed_modes
=
[],
normalize
=
[
False
,
Tru
e
],
verbose
=
verbose
,
return_costs
=
True
,
deterministic
=
False
)
W_normalized
,
H_normalized
=
normalize_WH
(
W
,
H
,
matrix
=
"
H
"
)
reconstruction_error
=
cost_fct_vals
[
-
1
]
...
...
This diff is collapsed.
Click to expand it.
nn_fac/update_rules/deep_mu.py
View file @
418f7140
...
...
@@ -7,7 +7,8 @@ def deep_KL_mu(W_Lm1, W_L, H_L, WH_Lp1, lambda_):
ONES
=
np
.
ones_like
(
W_Lm1
)
a
=
ONES
@
H_L
.
T
-
lambda_
*
np
.
log
(
WH_Lp1
)
b
=
W_L
*
((
W_Lm1
/
(
W_L
@
H_L
))
@
H_L
.
T
)
W_L
=
np
.
maximum
(
eps
,
(
1
/
lambda_
*
b
)
/
(
lambertw
(
b
*
np
.
exp
(
a
/
lambda_
)
/
lambda_
,
k
=
0
).
real
))
lambert
=
lambertw
(
b
*
np
.
exp
(
a
/
lambda_
)
/
lambda_
,
k
=
0
).
real
W_L
=
np
.
maximum
(
eps
,
(
1
/
lambda_
*
b
)
/
(
lambert
+
eps
))
return
W_L
This diff is collapsed.
Click to expand it.