|
3 | 3 | # U.S. Government retains certain rights in this software.
|
4 | 4 |
|
5 | 5 | import pyttb as ttb
|
6 |
| -from .pyttb_utils import * |
| 6 | +from pyttb.pyttb_utils import * |
7 | 7 | import numpy as np
|
8 | 8 |
|
9 | 9 | def cp_als(tensor, rank, stoptol=1e-4, maxiters=1000, dimorder=None,
|
@@ -51,54 +51,56 @@ def cp_als(tensor, rank, stoptol=1e-4, maxiters=1000, dimorder=None,
|
51 | 51 |
|
52 | 52 | Example
|
53 | 53 | -------
|
| 54 | + Random initialization causes slight pertubation in intermediate results. |
| 55 | + `...` is our place holder for these numeric values. |
54 | 56 | Example using default values ("random" initialization):
|
55 | 57 |
|
56 | 58 | >>> weights = np.array([1., 2.])
|
57 | 59 | >>> fm0 = np.array([[1., 2.], [3., 4.]])
|
58 | 60 | >>> fm1 = np.array([[5., 6.], [7., 8.]])
|
59 | 61 | >>> K = ttb.ktensor.from_data(weights, [fm0, fm1])
|
60 | 62 | >>> np.random.seed(1)
|
61 |
| - >>> M, Minit, output = ttb.cp_als(K.full(), 2) |
| 63 | + >>> M, Minit, output = ttb.cp_als(K.full(), 2) # doctest: +ELLIPSIS |
62 | 64 | CP_ALS:
|
63 |
| - Iter 0: f = 0.9999999836180988 f-delta = 0.9999999836180988 |
64 |
| - Iter 1: f = 0.9999999836180988 f-delta = 0.0 |
65 |
| - Final f = 0.9999999836180988 |
66 |
| - >>> print(M) |
| 65 | + Iter 0: f = ... f-delta = ... |
| 66 | + Iter 1: f = ... f-delta = ... |
| 67 | + Final f = ... |
| 68 | + >>> print(M) # doctest: +ELLIPSIS |
67 | 69 | ktensor of shape 2 x 2
|
68 |
| - weights=[108.47158396 8.61141076] |
| 70 | + weights=[108.4715... 8.6114...] |
69 | 71 | factor_matrices[0] =
|
70 |
| - [[0.41877462 0.39899343] |
71 |
| - [0.9080902 0.91695378]] |
| 72 | + [[0.4187... 0.3989...] |
| 73 | + [0.9080... 0.9169...]] |
72 | 74 | factor_matrices[1] =
|
73 |
| - [[0.61888633 0.25815611] |
74 |
| - [0.78548056 0.96610322]] |
75 |
| - >>> print(Minit) |
| 75 | + [[0.6188... 0.2581...] |
| 76 | + [0.7854... 0.9661...]] |
| 77 | + >>> print(Minit) # doctest: +ELLIPSIS |
76 | 78 | ktensor of shape 2 x 2
|
77 | 79 | weights=[1. 1.]
|
78 | 80 | factor_matrices[0] =
|
79 |
| - [[4.17022005e-01 7.20324493e-01] |
80 |
| - [1.14374817e-04 3.02332573e-01]] |
| 81 | + [[4.1702...e-01 7.2032...e-01] |
| 82 | + [1.1437...e-04 3.0233...e-01]] |
81 | 83 | factor_matrices[1] =
|
82 |
| - [[0.14675589 0.09233859] |
83 |
| - [0.18626021 0.34556073]] |
| 84 | + [[0.1467... 0.0923...] |
| 85 | + [0.1862... 0.3455...]] |
84 | 86 | >>> print(output)
|
85 |
| - {'params': (0.0001, 1000, 1, [0, 1]), 'iters': 1, 'normresidual': 1.9073486328125e-06, 'fit': 0.9999999836180988} |
| 87 | + {'params': (0.0001, 1000, 1, [0, 1]), 'iters': 1, 'normresidual': ..., 'fit': ...} |
86 | 88 |
|
87 | 89 | Example using "nvecs" initialization:
|
88 | 90 |
|
89 |
| - >>> M, Minit, output = ttb.cp_als(K.full(), 2, init="nvecs") |
| 91 | + >>> M, Minit, output = ttb.cp_als(K.full(), 2, init="nvecs") # doctest: +ELLIPSIS |
90 | 92 | CP_ALS:
|
91 |
| - Iter 0: f = 1.0 f-delta = 1.0 |
92 |
| - Iter 1: f = 1.0 f-delta = 0.0 |
93 |
| - Final f = 1.0 |
| 93 | + Iter 0: f = ... f-delta = ... |
| 94 | + Iter 1: f = ... f-delta = ... |
| 95 | + Final f = ... |
94 | 96 |
|
95 | 97 | Example using :class:`pyttb.ktensor` initialization:
|
96 | 98 |
|
97 |
| - >>> M, Minit, output = ttb.cp_als(K.full(), 2, init=K) |
| 99 | + >>> M, Minit, output = ttb.cp_als(K.full(), 2, init=K) # doctest: +ELLIPSIS |
98 | 100 | CP_ALS:
|
99 |
| - Iter 0: f = 0.9999999836180988 f-delta = 0.9999999836180988 |
100 |
| - Iter 1: f = 0.9999999836180988 f-delta = 0.0 |
101 |
| - Final f = 0.9999999836180988 |
| 101 | + Iter 0: f = ... f-delta = ... |
| 102 | + Iter 1: f = ... f-delta = ... |
| 103 | + Final f = ... |
102 | 104 | """
|
103 | 105 |
|
104 | 106 | # Extract number of dimensions and norm of tensor
|
@@ -246,5 +248,5 @@ def cp_als(tensor, rank, stoptol=1e-4, maxiters=1000, dimorder=None,
|
246 | 248 |
|
247 | 249 | if __name__ == "__main__":
|
248 | 250 | import doctest # pragma: no cover
|
249 |
| - import pyttb as ttb # pragma: no cover |
| 251 | + |
250 | 252 | doctest.testmod() # pragma: no cover
|
0 commit comments