Dual OT solvers for entropic and quadratic regularized OT with Pytorch

# Author: Remi Flamary <remi.flamary@polytechnique.edu>
#
# License: MIT License

# sphinx_gallery_thumbnail_number = 3

import numpy as np
import matplotlib.pyplot as pl
import torch
import ot
import ot.plot

Data generation

torch.manual_seed(1)

n_source_samples = 100
n_target_samples = 100
theta = 2 * np.pi / 20
noise_level = 0.1

Xs, ys = ot.datasets.make_data_classif("gaussrot", n_source_samples, nz=noise_level)
Xt, yt = ot.datasets.make_data_classif(
    "gaussrot", n_target_samples, theta=theta, nz=noise_level
)

# one of the target mode changes its variance (no linear mapping)
Xt[yt == 2] *= 3
Xt = Xt + 4

Plot data

pl.figure(1, (10, 5))
pl.clf()
pl.scatter(Xs[:, 0], Xs[:, 1], marker="+", label="Source samples")
pl.scatter(Xt[:, 0], Xt[:, 1], marker="o", label="Target samples")
pl.legend(loc=0)
pl.title("Source and target distributions")
Source and target distributions
Text(0.5, 1.0, 'Source and target distributions')

Convert data to torch tensors

Estimating dual variables for entropic OT

u = torch.randn(n_source_samples, requires_grad=True)
v = torch.randn(n_source_samples, requires_grad=True)

reg = 0.5

optimizer = torch.optim.Adam([u, v], lr=1)

# number of iteration
n_iter = 200


losses = []

for i in range(n_iter):
    # generate noise samples

    # minus because we maximize the dual loss
    loss = -ot.stochastic.loss_dual_entropic(u, v, xs, xt, reg=reg)
    losses.append(float(loss.detach()))

    if i % 10 == 0:
        print("Iter: {:3d}, loss={}".format(i, losses[-1]))

    loss.backward()
    optimizer.step()
    optimizer.zero_grad()


pl.figure(2)
pl.plot(losses)
pl.grid()
pl.title("Dual objective (negative)")
pl.xlabel("Iterations")

Ge = ot.stochastic.plan_dual_entropic(u, v, xs, xt, reg=reg)
Dual objective (negative)
Iter:   0, loss=0.20204949002247324
Iter:  10, loss=-19.512574806268116
Iter:  20, loss=-31.073153911808895
Iter:  30, loss=-35.55871444660664
Iter:  40, loss=-37.77794926082943
Iter:  50, loss=-39.171990557878146
Iter:  60, loss=-39.57328776878489
Iter:  70, loss=-39.793972029937365
Iter:  80, loss=-39.850890559048295
Iter:  90, loss=-39.87255757269692
Iter: 100, loss=-39.88304150470942
Iter: 110, loss=-39.88960049003411
Iter: 120, loss=-39.892327735172024
Iter: 130, loss=-39.893702718640725
Iter: 140, loss=-39.89456221670055
Iter: 150, loss=-39.895122495511174
Iter: 160, loss=-39.895508126256615
Iter: 170, loss=-39.89578213031158
Iter: 180, loss=-39.8959816276472
Iter: 190, loss=-39.896130626454514

Plot the estimated entropic OT plan

pl.figure(3, (10, 5))
pl.clf()
ot.plot.plot2D_samples_mat(Xs, Xt, Ge.detach().numpy(), alpha=0.1)
pl.scatter(Xs[:, 0], Xs[:, 1], marker="+", label="Source samples", zorder=2)
pl.scatter(Xt[:, 0], Xt[:, 1], marker="o", label="Target samples", zorder=2)
pl.legend(loc=0)
pl.title("Source and target distributions")
Source and target distributions
Text(0.5, 1.0, 'Source and target distributions')

Estimating dual variables for quadratic OT

u = torch.randn(n_source_samples, requires_grad=True)
v = torch.randn(n_source_samples, requires_grad=True)

reg = 0.01

optimizer = torch.optim.Adam([u, v], lr=1)

# number of iteration
n_iter = 200


losses = []


for i in range(n_iter):
    # generate noise samples

    # minus because we maximize the dual loss
    loss = -ot.stochastic.loss_dual_quadratic(u, v, xs, xt, reg=reg)
    losses.append(float(loss.detach()))

    if i % 10 == 0:
        print("Iter: {:3d}, loss={}".format(i, losses[-1]))

    loss.backward()
    optimizer.step()
    optimizer.zero_grad()


pl.figure(4)
pl.plot(losses)
pl.grid()
pl.title("Dual objective (negative)")
pl.xlabel("Iterations")

Gq = ot.stochastic.plan_dual_quadratic(u, v, xs, xt, reg=reg)
Dual objective (negative)
Iter:   0, loss=-0.0018442196020623663
Iter:  10, loss=-19.560225704371927
Iter:  20, loss=-30.85163720601527
Iter:  30, loss=-35.18881564110298
Iter:  40, loss=-37.60196016486029
Iter:  50, loss=-38.8845853328738
Iter:  60, loss=-39.38947131192644
Iter:  70, loss=-39.63493665058151
Iter:  80, loss=-39.70912671034389
Iter:  90, loss=-39.743498101022084
Iter: 100, loss=-39.755642555545954
Iter: 110, loss=-39.76252448160897
Iter: 120, loss=-39.766049757495686
Iter: 130, loss=-39.76780557310809
Iter: 140, loss=-39.76868145683973
Iter: 150, loss=-39.769121027001134
Iter: 160, loss=-39.76936764927778
Iter: 170, loss=-39.76952207145638
Iter: 180, loss=-39.76962536927158
Iter: 190, loss=-39.76969638269907

Plot the estimated quadratic OT plan

pl.figure(5, (10, 5))
pl.clf()
ot.plot.plot2D_samples_mat(Xs, Xt, Gq.detach().numpy(), alpha=0.1)
pl.scatter(Xs[:, 0], Xs[:, 1], marker="+", label="Source samples", zorder=2)
pl.scatter(Xt[:, 0], Xt[:, 1], marker="o", label="Target samples", zorder=2)
pl.legend(loc=0)
pl.title("OT plan with quadratic regularization")
OT plan with quadratic regularization
Text(0.5, 1.0, 'OT plan with quadratic regularization')

Total running time of the script: (0 minutes 9.927 seconds)

Gallery generated by Sphinx-Gallery