Skip to content

[MRG] Cleanup test warnings #242

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions ot/da.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
def sinkhorn_lpl1_mm(a, labels_a, b, M, reg, eta=0.1, numItermax=10,
numInnerItermax=200, stopInnerThr=1e-9, verbose=False,
log=False):
"""
r"""
Solve the entropic regularization optimal transport problem with nonconvex
group lasso regularization
Expand Down Expand Up @@ -137,7 +137,7 @@ def sinkhorn_lpl1_mm(a, labels_a, b, M, reg, eta=0.1, numItermax=10,
def sinkhorn_l1l2_gl(a, labels_a, b, M, reg, eta=0.1, numItermax=10,
numInnerItermax=200, stopInnerThr=1e-9, verbose=False,
log=False):
"""
r"""
Solve the entropic regularization optimal transport problem with group
lasso regularization
Expand Down Expand Up @@ -245,7 +245,7 @@ def joint_OT_mapping_linear(xs, xt, mu=1, eta=0.001, bias=False, verbose=False,
verbose2=False, numItermax=100, numInnerItermax=10,
stopInnerThr=1e-6, stopThr=1e-5, log=False,
**kwargs):
"""Joint OT and linear mapping estimation as proposed in [8]
r"""Joint OT and linear mapping estimation as proposed in [8]
The function solves the following optimization problem:
Expand Down Expand Up @@ -434,7 +434,7 @@ def joint_OT_mapping_kernel(xs, xt, mu=1, eta=0.001, kerneltype='gaussian',
numItermax=100, numInnerItermax=10,
stopInnerThr=1e-6, stopThr=1e-5, log=False,
**kwargs):
"""Joint OT and nonlinear mapping estimation with kernels as proposed in [8]
r"""Joint OT and nonlinear mapping estimation with kernels as proposed in [8]
The function solves the following optimization problem:
Expand Down Expand Up @@ -645,7 +645,7 @@ def df(G):

def OT_mapping_linear(xs, xt, reg=1e-6, ws=None,
wt=None, bias=True, log=False):
""" return OT linear operator between samples
r""" return OT linear operator between samples
The function estimates the optimal linear operator that aligns the two
empirical distributions. This is equivalent to estimating the closed
Expand Down Expand Up @@ -1228,7 +1228,7 @@ def inverse_transform_labels(self, yt=None):


class LinearTransport(BaseTransport):
""" OT linear operator between empirical distributions
r""" OT linear operator between empirical distributions
The function estimates the optimal linear operator that aligns the two
empirical distributions. This is equivalent to estimating the closed
Expand Down
2 changes: 1 addition & 1 deletion ot/dr.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def proj(X):


def wda(X, y, p=2, reg=1, k=10, solver=None, maxiter=100, verbose=0, P0=None):
"""
r"""
Wasserstein Discriminant Analysis [11]_

The function solves the following optimization problem:
Expand Down
2 changes: 1 addition & 1 deletion ot/gpu/bregman.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

def sinkhorn_knopp(a, b, M, reg, numItermax=1000, stopThr=1e-9,
verbose=False, log=False, to_numpy=True, **kwargs):
"""
r"""
Solve the entropic regularization optimal transport on GPU

If the input matrix are in numpy format, they will be uploaded to the
Expand Down
20 changes: 10 additions & 10 deletions ot/gromov.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@


def init_matrix(C1, C2, p, q, loss_fun='square_loss'):
"""Return loss matrices and tensors for Gromov-Wasserstein fast computation
r"""Return loss matrices and tensors for Gromov-Wasserstein fast computation

Returns the value of \mathcal{L}(C1,C2) \otimes T with the selected loss
function as the loss function of Gromow-Wasserstein discrepancy.
Expand Down Expand Up @@ -109,7 +109,7 @@ def h2(b):


def tensor_product(constC, hC1, hC2, T):
"""Return the tensor for Gromov-Wasserstein fast computation
r"""Return the tensor for Gromov-Wasserstein fast computation

The tensor is computed as described in Proposition 1 Eq. (6) in [12].

Expand Down Expand Up @@ -262,7 +262,7 @@ def update_kl_loss(p, lambdas, T, Cs):


def gromov_wasserstein(C1, C2, p, q, loss_fun, log=False, armijo=False, **kwargs):
"""
r"""
Returns the gromov-wasserstein transport between (C1,p) and (C2,q)

The function solves the following optimization problem:
Expand Down Expand Up @@ -343,7 +343,7 @@ def df(G):


def gromov_wasserstein2(C1, C2, p, q, loss_fun, log=False, armijo=False, **kwargs):
"""
r"""
Returns the gromov-wasserstein discrepancy between (C1,p) and (C2,q)

The function solves the following optimization problem:
Expand Down Expand Up @@ -420,7 +420,7 @@ def df(G):


def fused_gromov_wasserstein(M, C1, C2, p, q, loss_fun='square_loss', alpha=0.5, armijo=False, log=False, **kwargs):
"""
r"""
Computes the FGW transport between two graphs see [24]

.. math::
Expand Down Expand Up @@ -496,7 +496,7 @@ def df(G):


def fused_gromov_wasserstein2(M, C1, C2, p, q, loss_fun='square_loss', alpha=0.5, armijo=False, log=False, **kwargs):
"""
r"""
Computes the FGW distance between two graphs see [24]

.. math::
Expand Down Expand Up @@ -574,7 +574,7 @@ def df(G):

def entropic_gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon,
max_iter=1000, tol=1e-9, verbose=False, log=False):
"""
r"""
Returns the gromov-wasserstein transport between (C1,p) and (C2,q)

(C1,p) and (C2,q)
Expand Down Expand Up @@ -681,7 +681,7 @@ def entropic_gromov_wasserstein(C1, C2, p, q, loss_fun, epsilon,

def entropic_gromov_wasserstein2(C1, C2, p, q, loss_fun, epsilon,
max_iter=1000, tol=1e-9, verbose=False, log=False):
"""
r"""
Returns the entropic gromov-wasserstein discrepancy between the two measured similarity matrices

(C1,p) and (C2,q)
Expand Down Expand Up @@ -747,7 +747,7 @@ def entropic_gromov_wasserstein2(C1, C2, p, q, loss_fun, epsilon,

def entropic_gromov_barycenters(N, Cs, ps, p, lambdas, loss_fun, epsilon,
max_iter=1000, tol=1e-9, verbose=False, log=False, init_C=None):
"""
r"""
Returns the gromov-wasserstein barycenters of S measured similarity matrices

(Cs)_{s=1}^{s=S}
Expand Down Expand Up @@ -857,7 +857,7 @@ def entropic_gromov_barycenters(N, Cs, ps, p, lambdas, loss_fun, epsilon,

def gromov_barycenters(N, Cs, ps, p, lambdas, loss_fun,
max_iter=1000, tol=1e-9, verbose=False, log=False, init_C=None):
"""
r"""
Returns the gromov-wasserstein barycenters of S measured similarity matrices

(Cs)_{s=1}^{s=S}
Expand Down
3 changes: 1 addition & 2 deletions ot/lp/cvx.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def scipy_sparse_to_spmatrix(A):


def barycenter(A, M, weights=None, verbose=False, log=False, solver='interior-point'):
"""Compute the Wasserstein barycenter of distributions A
r"""Compute the Wasserstein barycenter of distributions A

The function solves the following optimization problem [16]:

Expand Down Expand Up @@ -76,7 +76,6 @@ def barycenter(A, M, weights=None, verbose=False, log=False, solver='interior-po
.. [16] Agueh, M., & Carlier, G. (2011). Barycenters in the Wasserstein space. SIAM Journal on Mathematical Analysis, 43(2), 904-924.



"""

if weights is None:
Expand Down
4 changes: 2 additions & 2 deletions ot/optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def solve_linesearch(cost, G, deltaG, Mi, f_val,

def cg(a, b, M, reg, f, df, G0=None, numItermax=200, numItermaxEmd=100000,
stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False, **kwargs):
"""
r"""
Solve the general regularized OT problem with conditional gradient

The function solves the following optimization problem:
Expand Down Expand Up @@ -278,7 +278,7 @@ def cost(G):

def gcg(a, b, M, reg1, reg2, f, df, G0=None, numItermax=10,
numInnerItermax=200, stopThr=1e-9, stopThr2=1e-9, verbose=False, log=False):
"""
r"""
Solve the general regularized OT problem with the generalized conditional gradient

The function solves the following optimization problem:
Expand Down
3 changes: 2 additions & 1 deletion test/test_bregman.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,8 +321,9 @@ def test_implemented_methods():
# make dists unbalanced
b = ot.utils.unif(n)
A = rng.rand(n, 2)
A /= A.sum(0, keepdims=True)
M = ot.dist(x, x)
epsilon = 1.
epsilon = 1.0

for method in IMPLEMENTED_METHODS:
ot.bregman.sinkhorn(a, b, M, epsilon, method=method)
Expand Down