Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
N
Nonnegative Factorization
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
MARMORET Axel
Nonnegative Factorization
Commits
219efbe4
Commit
219efbe4
authored
Jan 31, 2024
by
MARMORET Axel
Browse files
Options
Downloads
Patches
Plain Diff
Adding a parameter for returning errors (optional now)
parent
f1c328f3
No related branches found
No related tags found
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
nn_fac/deep_nmf.py
+8
-5
8 additions, 5 deletions
nn_fac/deep_nmf.py
nn_fac/multilayer_nmf.py
+7
-4
7 additions, 4 deletions
nn_fac/multilayer_nmf.py
with
15 additions
and
9 deletions
nn_fac/deep_nmf.py
+
8
−
5
View file @
219efbe4
...
...
@@ -10,7 +10,7 @@ import nn_fac.multilayer_nmf as multi_nmf
import
nn_fac.utils.beta_divergence
as
beta_div
from
nn_fac.utils.normalize_wh
import
normalize_WH
def
deep_KL_NMF
(
data
,
all_ranks
,
n_iter_max_each_nmf
=
100
,
n_iter_max_deep_loop
=
100
,
init
=
"
multilayer_nmf
"
,
init_multi_layer
=
"
random
"
,
W_0
=
None
,
H_0
=
None
,
delta
=
1e-6
,
tol
=
1e-6
,
verbose
=
False
):
def
deep_KL_NMF
(
data
,
all_ranks
,
n_iter_max_each_nmf
=
100
,
n_iter_max_deep_loop
=
100
,
init
=
"
multilayer_nmf
"
,
init_multi_layer
=
"
random
"
,
W_0
=
None
,
H_0
=
None
,
delta
=
1e-6
,
tol
=
1e-6
,
return_errors
=
False
,
verbose
=
False
):
L
=
len
(
all_ranks
)
assert
L
>
1
,
"
The number of layers must be at least 2. Otherwise, ou should just use NMF.
"
...
...
@@ -23,7 +23,7 @@ def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop
#warnings.warn("Warning: The ranks of deep NMF should be decreasing.")
if
init
==
"
multilayer_nmf
"
:
W
,
H
,
e
=
multi_nmf
.
multilayer_beta_NMF
(
data
,
all_ranks
,
n_iter_max_each_nmf
=
n_iter_max_each_nmf
,
init_each_nmf
=
init_multi_layer
,
delta
=
delta
,
verbose
=
False
)
W
,
H
,
e
=
multi_nmf
.
multilayer_beta_NMF
(
data
,
all_ranks
,
n_iter_max_each_nmf
=
n_iter_max_each_nmf
,
init_each_nmf
=
init_multi_layer
,
delta
=
delta
,
return_errors
=
True
,
verbose
=
False
)
all_errors
[
0
]
=
e
elif
init
==
"
custom
"
:
...
...
@@ -61,10 +61,13 @@ def deep_KL_NMF(data, all_ranks, n_iter_max_each_nmf = 100, n_iter_max_deep_loop
print
(
f
'
Converged in
{
deep_iteration
}
iterations.
'
)
break
if
return_errors
:
return
W
,
H
,
all_errors
,
toc
else
:
return
W
,
H
def
one_step_deep_KL_nmf
(
data
,
W
,
H
,
all_ranks
,
lambda_
,
delta
):
# delta is useless here, because we use our own beta_nmf.
L
=
len
(
all_ranks
)
errors
=
[]
...
...
...
...
This diff is collapsed.
Click to expand it.
nn_fac/multilayer_nmf.py
+
7
−
4
View file @
219efbe4
...
...
@@ -3,8 +3,8 @@ import numpy as np
from
nn_fac.nmf
import
nmf
from
nn_fac.utils.normalize_wh
import
normalize_WH
def
multilayer_beta_NMF
(
data
,
all_ranks
,
beta
=
1
,
delta
=
1e-6
,
n_iter_max_each_nmf
=
100
,
init_each_nmf
=
"
nndsvd
"
,
verbose
=
False
):
def
multilayer_beta_NMF
(
data
,
all_ranks
,
beta
=
1
,
delta
=
1e-6
,
n_iter_max_each_nmf
=
100
,
init_each_nmf
=
"
nndsvd
"
,
return_errors
=
False
,
verbose
=
False
):
# delta is useless here, because we use our own beta_nmf.
L
=
len
(
all_ranks
)
assert
L
>
1
,
"
The number of layers must be at least 2. Otherwise, ou should just use NMF
"
if
sorted
(
all_ranks
,
reverse
=
True
)
!=
all_ranks
:
...
...
@@ -22,12 +22,15 @@ def multilayer_beta_NMF(data, all_ranks, beta = 1, delta = 1e-6, n_iter_max_each
if
verbose
:
print
(
f
'
Layer
{
i
}
done.
'
)
if
return_errors
:
return
W
,
H
,
reconstruction_errors
else
:
return
W
,
H
def
one_layer_update
(
data
,
rank
,
beta
,
delta
,
init_each_nmf
,
n_iter_max_each_nmf
,
verbose
):
W
,
H
,
cost_fct_vals
,
times
=
nmf
(
data
,
rank
,
init
=
init_each_nmf
,
U_0
=
None
,
V_0
=
None
,
n_iter_max
=
n_iter_max_each_nmf
,
tol
=
1e-8
,
update_rule
=
"
mu
"
,
beta
=
beta
,
sparsity_coefficients
=
[
None
,
None
],
fixed_modes
=
[],
normalize
=
[
False
,
Fals
e
],
sparsity_coefficients
=
[
None
,
None
],
fixed_modes
=
[],
normalize
=
[
False
,
Tru
e
],
verbose
=
verbose
,
return_costs
=
True
,
deterministic
=
False
)
W_normalized
,
H_normalized
=
normalize_WH
(
W
,
H
,
matrix
=
"
H
"
)
reconstruction_error
=
cost_fct_vals
[
-
1
]
...
...
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
sign in
to comment