summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVaggelis Papoutsellis <22398586+epapoutsellis@users.noreply.github.com>2019-10-29 16:18:53 +0000
committerEdoardo Pasca <edo.paskino@gmail.com>2019-10-29 16:18:53 +0000
commited8a87b02df761cbaec71ecd790a647c3ef49c48 (patch)
tree3ddc39c3119d08329ba1a2d65a05628492c4184a
parente00b88c681f0c906576c4cff0ac7db872ce5ff59 (diff)
downloadframework-plugins-ed8a87b02df761cbaec71ecd790a647c3ef49c48.tar.gz
framework-plugins-ed8a87b02df761cbaec71ecd790a647c3ef49c48.tar.bz2
framework-plugins-ed8a87b02df761cbaec71ecd790a647c3ef49c48.tar.xz
framework-plugins-ed8a87b02df761cbaec71ecd790a647c3ef49c48.zip
fix ROF_TV (#28)
* fix ROF_TV * fix ROF_TV * add TGV proximal * add new regs * added FGP_dTV * fix out * remove test to wip
-rw-r--r--Wrappers/Python/ccpi/plugins/regularisers.py267
-rw-r--r--Wrappers/Python/wip/test_LLT_ROF_framework_ccpi_regulariser.py179
-rw-r--r--Wrappers/Python/wip/test_TGV_framework_ccpi_regulariser.py173
-rw-r--r--Wrappers/Python/wip/test_TVN_framework_ccpi_regulariser.py147
4 files changed, 739 insertions, 27 deletions
diff --git a/Wrappers/Python/ccpi/plugins/regularisers.py b/Wrappers/Python/ccpi/plugins/regularisers.py
index 6ed9fb2..4770593 100644
--- a/Wrappers/Python/ccpi/plugins/regularisers.py
+++ b/Wrappers/Python/ccpi/plugins/regularisers.py
@@ -1,28 +1,28 @@
# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
+# Copyright 2019 Science Technology Facilities Council
+# Copyright 2019 University of Manchester
+#
+# This work is part of the Core Imaging Library developed by Science Technology
+# Facilities Council and University of Manchester
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0.txt
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
-# Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This requires CCPi-Regularisation toolbox to be installed
from ccpi.filters import regularisers
from ccpi.filters.cpu_regularisers import TV_ENERGY
from ccpi.framework import DataContainer
from ccpi.optimisation.functions import Function
import numpy as np
+import warnings
class ROF_TV(Function):
@@ -32,28 +32,34 @@ class ROF_TV(Function):
self.iterationsTV = iterationsTV
self.time_marchstep = time_marchstep
self.device = device # string for 'cpu' or 'gpu'
+ self.tolerance = tolerance
+
def __call__(self,x):
# evaluate objective function of TV gradient
EnergyValTV = TV_ENERGY(np.asarray(x.as_array(), dtype=np.float32), np.asarray(x.as_array(), dtype=np.float32), self.lambdaReg, 2)
return 0.5*EnergyValTV[0]
- def prox(self,x,tau):
+
+ def proximal(self,x,tau, out = None):
pars = {'algorithm' : ROF_TV, \
'input' : np.asarray(x.as_array(), dtype=np.float32),\
'regularization_parameter':self.lambdaReg*tau, \
'number_of_iterations' :self.iterationsTV ,\
- 'time_marching_parameter':self.time_marchstep}
+ 'time_marching_parameter':self.time_marchstep,\
+ 'tolerance':self.tolerance}
res , info = regularisers.ROF_TV(pars['input'],
pars['regularization_parameter'],
pars['number_of_iterations'],
- pars['time_marching_parameter'], self.device)
+ pars['time_marching_parameter'], pars['tolerance'], self.device)
+
+ self.info = info
+
if out is not None:
out.fill(res)
else:
out = x.copy()
out.fill(res)
return out
-
class FGP_TV(Function):
def __init__(self,lambdaReg,iterationsTV,tolerance,methodTV,nonnegativity,printing,device):
# set parameters
@@ -91,7 +97,171 @@ class FGP_TV(Function):
out = x.copy()
out.fill(res)
return out
+
+ def convex_conjugate(self,x):
+ return 0.0
+
+
+class TGV(Function):
+
+ def __init__(self, regularisation_parameter, alpha1, alpha2, iter_TGV, LipshitzConstant, torelance, device ):
+
+ self.regularisation_parameter = regularisation_parameter
+ self.alpha1 = alpha1
+ self.alpha2 = alpha2
+ self.iter_TGV = iter_TGV
+ self.LipshitzConstant = LipshitzConstant
+ self.torelance = torelance
+ self.device = device
+
+ def __call__(self,x):
+ warnings.warn("{}: the __call__ method is not currently implemented. Returning 0.".format(self.__class__.__name__))
+
+ # TODO this is not correct, need a TGV energy same as TV
+ return 0.0
+
+ def proximal(self, x, tau, out=None):
+
+ pars = {'algorithm' : TGV, \
+ 'input' : np.asarray(x.as_array(), dtype=np.float32),\
+ 'regularisation_parameter':self.regularisation_parameter, \
+ 'alpha1':self.alpha1,\
+ 'alpha0':self.alpha2,\
+ 'number_of_iterations' :self.iter_TGV ,\
+ 'LipshitzConstant' :self.LipshitzConstant ,\
+ 'tolerance_constant':self.torelance}
+
+ res , info = regularisers.TGV(pars['input'],
+ pars['regularisation_parameter'],
+ pars['alpha1'],
+ pars['alpha0'],
+ pars['number_of_iterations'],
+ pars['LipshitzConstant'],
+ pars['tolerance_constant'],self.device)
+
+ # info: return number of iteration and reached tolerance
+ # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/master/src/Core/regularisers_CPU/TGV_core.c#L168
+ # Stopping Criteria || u^k - u^(k-1) ||_{2} / || u^{k} ||_{2}
+
+ self.info = info
+
+ if out is not None:
+ out.fill(res)
+ else:
+ out = x.copy()
+ out.fill(res)
+ return out
+
+ def convex_conjugate(self, x):
+ # TODO this is not correct
+ return 0.0
+
+
+class LLT_ROF(Function):
+
+# pars = {'algorithm' : LLT_ROF, \
+# 'input' : noisyVol,\
+# 'regularisation_parameterROF':0.01, \
+# 'regularisation_parameterLLT':0.008, \
+# 'number_of_iterations' : 500 ,\
+# 'time_marching_parameter' :0.001 ,\
+# 'tolerance_constant':1e-06}
+
+ def __init__(self, regularisation_parameterROF,
+ regularisation_parameterLLT,
+ iter_LLT_ROF, time_marching_parameter, torelance, device ):
+
+ self.regularisation_parameterROF = regularisation_parameterROF
+ self.regularisation_parameterLLT = regularisation_parameterLLT
+ self.iter_LLT_ROF = iter_LLT_ROF
+ self.time_marching_parameter = time_marching_parameter
+ self.torelance = torelance
+ self.device = device
+
+ def __call__(self,x):
+ warnings.warn("{}: the __call__ method is not currently implemented. Returning 0.".format(self.__class__.__name__))
+
+ # TODO this is not correct, need a TGV energy same as TV
+ return 0.0
+
+ def proximal(self, x, tau, out=None):
+
+ pars = {'algorithm' : LLT_ROF, \
+ 'input' : np.asarray(x.as_array(), dtype=np.float32),\
+ 'regularisation_parameterROF':self.regularisation_parameterROF, \
+ 'regularisation_parameterLLT':self.regularisation_parameterLLT,
+ 'number_of_iterations' :self.iter_LLT_ROF ,\
+ 'time_marching_parameter': self.time_marching_parameter,\
+ 'tolerance_constant':self.torelance}
+
+
+
+ res , info = regularisers.LLT_ROF(pars['input'],
+ pars['regularisation_parameterROF'],
+ pars['regularisation_parameterLLT'],
+ pars['number_of_iterations'],
+ pars['time_marching_parameter'],
+ pars['tolerance_constant'],self.device)
+
+ # info: return number of iteration and reached tolerance
+ # https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/master/src/Core/regularisers_CPU/TGV_core.c#L168
+ # Stopping Criteria || u^k - u^(k-1) ||_{2} / || u^{k} ||_{2}
+
+ self.info = info
+
+
+class FGP_dTV(Function):
+ def __init__(self, refdata, regularisation_parameter, iterations,
+ tolerance, eta_const, methodTV, nonneg, device='cpu'):
+ # set parameters
+ self.lambdaReg = regularisation_parameter
+ self.iterationsTV = iterations
+ self.tolerance = tolerance
+ self.methodTV = methodTV
+ self.nonnegativity = nonneg
+ self.device = device # string for 'cpu' or 'gpu'
+ self.refData = np.asarray(refdata.as_array(), dtype=np.float32)
+ self.eta = eta_const
+
+ def __call__(self,x):
+ # evaluate objective function of TV gradient
+ EnergyValTV = TV_ENERGY(np.asarray(x.as_array(), dtype=np.float32), np.asarray(x.as_array(), dtype=np.float32), self.lambdaReg, 2)
+ return 0.5*EnergyValTV[0]
+ def proximal(self,x,tau, out=None):
+ pars = {'algorithm' : FGP_dTV, \
+ 'input' : np.asarray(x.as_array(), dtype=np.float32),\
+ 'regularization_parameter':self.lambdaReg*tau, \
+ 'number_of_iterations' :self.iterationsTV ,\
+ 'tolerance_constant':self.tolerance,\
+ 'methodTV': self.methodTV ,\
+ 'nonneg': self.nonnegativity ,\
+ 'eta_const' : self.eta,\
+ 'refdata':self.refData}
+ #inputData, refdata, regularisation_parameter, iterations,
+ # tolerance_param, eta_const, methodTV, nonneg, device='cpu'
+ res , info = regularisers.FGP_dTV(pars['input'],
+ pars['refdata'],
+ pars['regularization_parameter'],
+ pars['number_of_iterations'],
+ pars['tolerance_constant'],
+ pars['eta_const'],
+ pars['methodTV'],
+ pars['nonneg'],
+ self.device)
+ if out is not None:
+ out.fill(res)
+ else:
+ out = x.copy()
+ out.fill(res)
+ return out
+
+ def convex_conjugate(self, x):
+ # TODO this is not correct
+ return 0.0
+
+
+
class SB_TV(Function):
def __init__(self,lambdaReg,iterationsTV,tolerance,methodTV,printing,device):
# set parameters
@@ -101,28 +271,71 @@ class SB_TV(Function):
self.methodTV = methodTV
self.printing = printing
self.device = device # string for 'cpu' or 'gpu'
+
def __call__(self,x):
+
# evaluate objective function of TV gradient
EnergyValTV = TV_ENERGY(np.asarray(x.as_array(), dtype=np.float32), np.asarray(x.as_array(), dtype=np.float32), self.lambdaReg, 2)
return 0.5*EnergyValTV[0]
+
def proximal(self,x,tau, out=None):
pars = {'algorithm' : SB_TV, \
'input' : np.asarray(x.as_array(), dtype=np.float32),\
'regularization_parameter':self.lambdaReg*tau, \
'number_of_iterations' :self.iterationsTV ,\
'tolerance_constant':self.tolerance,\
- 'methodTV': self.methodTV ,\
- 'printingOut': self.printing}
+ 'methodTV': self.methodTV}
res , info = regularisers.SB_TV(pars['input'],
pars['regularization_parameter'],
pars['number_of_iterations'],
pars['tolerance_constant'],
- pars['methodTV'],
- pars['printingOut'], self.device)
+ pars['methodTV'], self.device)
+
+ self.info = info
+
if out is not None:
out.fill(res)
else:
out = x.copy()
out.fill(res)
- return out
+ return out
+
+
+
+class TNV(Function):
+
+ def __init__(self,regularisation_parameter,iterationsTNV,tolerance):
+
+ # set parameters
+ self.regularisation_parameter = regularisation_parameter
+ self.iterationsTNV = iterationsTNV
+ self.tolerance = tolerance
+
+
+ def __call__(self,x):
+ warnings.warn("{}: the __call__ method is not currently implemented. Returning 0.".format(self.__class__.__name__))
+ # evaluate objective function of TV gradient
+ return 0.0
+
+ def proximal(self,x,tau, out=None):
+ pars = {'algorithm' : TNV, \
+ 'input' : np.asarray(x.as_array(), dtype=np.float32),\
+ 'regularisation_parameter':self.regularisation_parameter, \
+ 'number_of_iterations' :self.iterationsTNV,\
+ 'tolerance_constant':self.tolerance}
+
+ res = regularisers.TNV(pars['input'],
+ pars['regularisation_parameter'],
+ pars['number_of_iterations'],
+ pars['tolerance_constant'])
+
+ #self.info = info
+
+ if out is not None:
+ out.fill(res)
+ else:
+ out = x.copy()
+ out.fill(res)
+ return out
+
diff --git a/Wrappers/Python/wip/test_LLT_ROF_framework_ccpi_regulariser.py b/Wrappers/Python/wip/test_LLT_ROF_framework_ccpi_regulariser.py
new file mode 100644
index 0000000..3368732
--- /dev/null
+++ b/Wrappers/Python/wip/test_LLT_ROF_framework_ccpi_regulariser.py
@@ -0,0 +1,179 @@
+from ccpi.framework import ImageData, TestData
+
+import numpy as np
+import numpy
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import PDHG
+
+from ccpi.optimisation.operators import BlockOperator, Identity, \
+ Gradient, SymmetrizedGradient, ZeroOperator
+from ccpi.optimisation.functions import ZeroFunction, L1Norm, \
+ MixedL21Norm, BlockFunction, KullbackLeibler, L2NormSquared
+import os
+import sys
+from ccpi.plugins.regularisers import TGV, LLT_ROF
+
+# user supplied input
+if len(sys.argv) > 1:
+ which_noise = int(sys.argv[1])
+else:
+ which_noise = 0
+print ("Applying {} noise")
+
+if len(sys.argv) > 2:
+ method = sys.argv[2]
+else:
+ method = '0'
+print ("method ", method)
+
+
+loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
+data = loader.load(TestData.SHAPES)
+ig = data.geometry
+ag = ig
+
+# Create noisy data.
+noises = ['gaussian', 'poisson', 's&p']
+noise = noises[which_noise]
+if noise == 's&p':
+ n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10)
+elif noise == 'poisson':
+ scale = 5
+ n1 = TestData.random_noise(data.as_array()/scale, mode = noise, seed = 10)*scale
+elif noise == 'gaussian':
+ n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10)
+else:
+ raise ValueError('Unsupported Noise ', noise)
+noisy_data = ImageData(n1)
+
+# Show Ground Truth and Noisy Data
+plt.figure(figsize=(10,5))
+plt.subplot(1,2,1)
+plt.imshow(data.as_array())
+plt.title('Ground Truth')
+plt.colorbar()
+plt.subplot(1,2,2)
+plt.imshow(noisy_data.as_array())
+plt.title('Noisy Data')
+plt.colorbar()
+plt.show()
+
+# Regularisation Parameter depending on the noise distribution
+if noise == 's&p':
+ alpha = 0.8
+elif noise == 'poisson':
+ alpha = .3
+elif noise == 'gaussian':
+ alpha = .2
+
+beta = 2 * alpha
+
+# Fidelity
+if noise == 's&p':
+ f3 = L1Norm(b=noisy_data)
+elif noise == 'poisson':
+ f3 = KullbackLeibler(noisy_data)
+elif noise == 'gaussian':
+ f3 = 0.5 * L2NormSquared(b=noisy_data)
+
+if method == '0':
+
+ # Create operators
+ op11 = Gradient(ig)
+ op12 = Identity(op11.range_geometry())
+
+ op22 = SymmetrizedGradient(op11.domain_geometry())
+ op21 = ZeroOperator(ig, op22.range_geometry())
+
+ op31 = Identity(ig, ag)
+ op32 = ZeroOperator(op22.domain_geometry(), ag)
+
+ operator = BlockOperator(op11, -1*op12, op21, op22, op31, op32, shape=(3,2) )
+
+ f1 = alpha * MixedL21Norm()
+ f2 = beta * MixedL21Norm()
+
+ f = BlockFunction(f1, f2, f3)
+ g = ZeroFunction()
+
+else:
+
+ # Create operators
+ op11 = Gradient(ig)
+ op12 = Identity(op11.range_geometry())
+ op22 = SymmetrizedGradient(op11.domain_geometry())
+ op21 = ZeroOperator(ig, op22.range_geometry())
+
+ operator = BlockOperator(op11, -1*op12, op21, op22, shape=(2,2) )
+
+ f1 = alpha * MixedL21Norm()
+ f2 = beta * MixedL21Norm()
+
+ f = BlockFunction(f1, f2)
+ g = BlockFunction(f3, ZeroFunction())
+
+# Compute operator Norm
+normK = operator.norm()
+
+# Primal & dual stepsizes
+#sigma = 1/normK
+#tau = 1/normK
+
+sigma = 1
+tau = 1/(sigma*normK**2)
+
+# Setup and run the PDHG algorithm
+pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
+pdhg.max_iteration = 5000
+pdhg.update_objective_interval = 500
+pdhg.run(5000)
+
+# Show results
+plt.figure(figsize=(20,5))
+plt.subplot(1,4,1)
+plt.imshow(data.as_array())
+plt.title('Ground Truth')
+plt.colorbar()
+plt.subplot(1,4,2)
+plt.imshow(noisy_data.as_array())
+plt.title('Noisy Data')
+plt.colorbar()
+plt.subplot(1,4,3)
+plt.imshow(pdhg.get_output()[0].as_array())
+plt.title('TGV Reconstruction')
+plt.colorbar()
+plt.subplot(1,4,4)
+plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), data.as_array()[int(ig.shape[0]/2),:], label = 'GTruth')
+plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output()[0].as_array()[int(ig.shape[0]/2),:], label = 'TGV reconstruction')
+plt.legend()
+plt.title('Middle Line Profiles')
+plt.show()
+
+#%% Run CCPi-regulariser
+# The TGV implementation is using PDHG algorithm with fixed
+# sigma = tau = 1/sqrt(12)
+
+# There is an early stopping criteria
+# https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/master/src/Core/regularisers_CPU/TGV_core.c#L168
+
+g = TGV(1, alpha, beta, 2000, normK**2, 1e-6, 'gpu')
+#alphaROF = 0.1
+#alphaLLT = 0.05
+#g = LLT_ROF(alphaROF, alphaLLT, 500, 0.001, 1e-6, 'gpu')
+sol = g.proximal(noisy_data, 1)
+
+plt.imshow(sol.as_array())
+plt.show()
+
+plt.imshow(pdhg.get_output()[0].as_array())
+plt.show()
+
+plt.imshow(np.abs(sol.as_array() - pdhg.get_output()[0].as_array()))
+plt.colorbar()
+plt.show()
+
+#%%
+
+plt.plot(np.linspace(0,299,300),sol.as_array()[100,:], np.linspace(0,299,300),pdhg.get_output()[0].as_array()[100,:])
+
diff --git a/Wrappers/Python/wip/test_TGV_framework_ccpi_regulariser.py b/Wrappers/Python/wip/test_TGV_framework_ccpi_regulariser.py
new file mode 100644
index 0000000..a6f6094
--- /dev/null
+++ b/Wrappers/Python/wip/test_TGV_framework_ccpi_regulariser.py
@@ -0,0 +1,173 @@
+from ccpi.framework import ImageData, TestData
+
+import numpy as np
+import numpy
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import PDHG
+
+from ccpi.optimisation.operators import BlockOperator, Identity, \
+ Gradient, SymmetrizedGradient, ZeroOperator
+from ccpi.optimisation.functions import ZeroFunction, L1Norm, \
+ MixedL21Norm, BlockFunction, KullbackLeibler, L2NormSquared
+import os
+import sys
+from ccpi.plugins.regularisers import TGV
+
+# user supplied input
+if len(sys.argv) > 1:
+ which_noise = int(sys.argv[1])
+else:
+ which_noise = 0
+print ("Applying {} noise")
+
+if len(sys.argv) > 2:
+ method = sys.argv[2]
+else:
+ method = '1'
+print ("method ", method)
+
+
+loader = TestData(data_dir=os.path.join(sys.prefix, 'share','ccpi'))
+data = loader.load(TestData.SHAPES)
+ig = data.geometry
+ag = ig
+
+# Create noisy data.
+noises = ['gaussian', 'poisson', 's&p']
+noise = noises[which_noise]
+if noise == 's&p':
+ n1 = TestData.random_noise(data.as_array(), mode = noise, salt_vs_pepper = 0.9, amount=0.2, seed=10)
+elif noise == 'poisson':
+ scale = 5
+ n1 = TestData.random_noise(data.as_array()/scale, mode = noise, seed = 10)*scale
+elif noise == 'gaussian':
+ n1 = TestData.random_noise(data.as_array(), mode = noise, seed = 10)
+else:
+ raise ValueError('Unsupported Noise ', noise)
+noisy_data = ImageData(n1)
+
+# Show Ground Truth and Noisy Data
+plt.figure(figsize=(10,5))
+plt.subplot(1,2,1)
+plt.imshow(data.as_array())
+plt.title('Ground Truth')
+plt.colorbar()
+plt.subplot(1,2,2)
+plt.imshow(noisy_data.as_array())
+plt.title('Noisy Data')
+plt.colorbar()
+plt.show()
+
+# Regularisation Parameter depending on the noise distribution
+if noise == 's&p':
+ alpha = 0.8
+elif noise == 'poisson':
+ alpha = .3
+elif noise == 'gaussian':
+ alpha = .2
+
+beta = 2 * alpha
+
+# Fidelity
+if noise == 's&p':
+ f3 = L1Norm(b=noisy_data)
+elif noise == 'poisson':
+ f3 = KullbackLeibler(noisy_data)
+elif noise == 'gaussian':
+ f3 = 0.5 * L2NormSquared(b=noisy_data)
+
+if method == '0':
+
+ # Create operators
+ op11 = Gradient(ig)
+ op12 = Identity(op11.range_geometry())
+
+ op22 = SymmetrizedGradient(op11.domain_geometry())
+ op21 = ZeroOperator(ig, op22.range_geometry())
+
+ op31 = Identity(ig, ag)
+ op32 = ZeroOperator(op22.domain_geometry(), ag)
+
+ operator = BlockOperator(op11, -1*op12, op21, op22, op31, op32, shape=(3,2) )
+
+ f1 = alpha * MixedL21Norm()
+ f2 = beta * MixedL21Norm()
+
+ f = BlockFunction(f1, f2, f3)
+ g = ZeroFunction()
+
+else:
+
+ # Create operators
+ op11 = Gradient(ig)
+ op12 = Identity(op11.range_geometry())
+ op22 = SymmetrizedGradient(op11.domain_geometry())
+ op21 = ZeroOperator(ig, op22.range_geometry())
+
+ operator = BlockOperator(op11, -1*op12, op21, op22, shape=(2,2) )
+
+ f1 = alpha * MixedL21Norm()
+ f2 = beta * MixedL21Norm()
+
+ f = BlockFunction(f1, f2)
+ g = BlockFunction(f3, ZeroFunction())
+
+# Compute operator Norm
+normK = operator.norm()
+
+# Primal & dual stepsizes
+sigma = 1/normK
+tau = 1/normK
+
+# Setup and run the PDHG algorithm
+pdhg = PDHG(f=f,g=g,operator=operator, tau=tau, sigma=sigma)
+pdhg.max_iteration = 5000
+pdhg.update_objective_interval = 500
+pdhg.run(5000)
+
+# Show results
+plt.figure(figsize=(20,5))
+plt.subplot(1,4,1)
+plt.imshow(data.subset(channel=0).as_array())
+plt.title('Ground Truth')
+plt.colorbar()
+plt.subplot(1,4,2)
+plt.imshow(noisy_data.subset(channel=0).as_array())
+plt.title('Noisy Data')
+plt.colorbar()
+plt.subplot(1,4,3)
+plt.imshow(pdhg.get_output()[0].as_array())
+plt.title('TGV Reconstruction')
+plt.colorbar()
+plt.subplot(1,4,4)
+plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), data.as_array()[int(ig.shape[0]/2),:], label = 'GTruth')
+plt.plot(np.linspace(0,ig.shape[1],ig.shape[1]), pdhg.get_output()[0].as_array()[int(ig.shape[0]/2),:], label = 'TGV reconstruction')
+plt.legend()
+plt.title('Middle Line Profiles')
+plt.show()
+
+# Run CCPi-regulariser
+# The TGV implementation is using PDHG algorithm with fixed
+# sigma = tau = 1/sqrt(12)
+
+# There is an early stopping criteria
+# https://github.com/vais-ral/CCPi-Regularisation-Toolkit/blob/master/src/Core/regularisers_CPU/TGV_core.c#L168
+
+g = TGV(1, alpha, beta, 5000, 12, 1e-6, 'gpu')
+sol = g.proximal(noisy_data, 1)
+
+plt.imshow(sol.as_array())
+plt.show()
+
+plt.imshow(pdhg.get_output()[0].as_array())
+plt.show()
+
+plt.imshow(np.abs(sol.as_array() - pdhg.get_output()[0].as_array()))
+plt.colorbar()
+plt.show()
+
+#%%
+
+plt.plot(np.linspace(0,299,300),sol.as_array()[100,:], np.linspace(0,299,300),pdhg.get_output()[0].as_array()[100,:])
+
diff --git a/Wrappers/Python/wip/test_TVN_framework_ccpi_regulariser.py b/Wrappers/Python/wip/test_TVN_framework_ccpi_regulariser.py
new file mode 100644
index 0000000..a0c893d
--- /dev/null
+++ b/Wrappers/Python/wip/test_TVN_framework_ccpi_regulariser.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Feb 22 11:39:43 2018
+
+Demonstration of CPU regularisers
+
+@authors: Daniil Kazantsev, Edoardo Pasca
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+import os
+import timeit
+from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, TGV, LLT_ROF, FGP_dTV, TNV, NDF, Diff4th
+from ccpi.filters.regularisers import PatchSelect, NLTV
+from ccpi.supp.qualitymetrics import QualityTools
+
+from ccpi.plugins.regularisers import TNV as TNV_new
+###############################################################################
+def printParametersToString(pars):
+ txt = r''
+ for key, value in pars.items():
+ if key== 'algorithm' :
+ txt += "{0} = {1}".format(key, value.__name__)
+ elif key == 'input':
+ txt += "{0} = {1}".format(key, np.shape(value))
+ elif key == 'refdata':
+ txt += "{0} = {1}".format(key, np.shape(value))
+ else:
+ txt += "{0} = {1}".format(key, value)
+ txt += '\n'
+ return txt
+###############################################################################
+
+filename = 'lena_gray_512.tif'
+
+# read image
+Im = plt.imread(filename)
+Im = np.asarray(Im, dtype='float32')
+
+Im = Im/255.0
+perc = 0.05
+u0 = Im + np.random.normal(loc = 0 ,
+ scale = perc * Im ,
+ size = np.shape(Im))
+u_ref = Im + np.random.normal(loc = 0 ,
+ scale = 0.01 * Im ,
+ size = np.shape(Im))
+(N,M) = np.shape(u0)
+# map the u0 u0->u0>0
+# f = np.frompyfunc(lambda x: 0 if x < 0 else x, 1,1)
+u0 = u0.astype('float32')
+u_ref = u_ref.astype('float32')
+
+# change dims to check that modules work with non-squared images
+"""
+M = M-100
+u_ref2 = np.zeros([N,M],dtype='float32')
+u_ref2[:,0:M] = u_ref[:,0:M]
+u_ref = u_ref2
+del u_ref2
+
+u02 = np.zeros([N,M],dtype='float32')
+u02[:,0:M] = u0[:,0:M]
+u0 = u02
+del u02
+
+Im2 = np.zeros([N,M],dtype='float32')
+Im2[:,0:M] = Im[:,0:M]
+Im = Im2
+del Im2
+"""
+
+#%%
+print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
+print ("__________Total nuclear Variation__________")
+print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
+
+## plot
+fig = plt.figure()
+plt.suptitle('Performance of TNV regulariser using the CPU')
+a=fig.add_subplot(1,2,1)
+a.set_title('Noisy Image')
+imgplot = plt.imshow(u0,cmap="gray")
+
+channelsNo = 5
+noisyVol = np.zeros((channelsNo,N,M),dtype='float32')
+idealVol = np.zeros((channelsNo,N,M),dtype='float32')
+
+for i in range (channelsNo):
+ noisyVol[i,:,:] = Im + np.random.normal(loc = 0 , scale = perc * Im , size = np.shape(Im))
+ idealVol[i,:,:] = Im
+
+# set parameters
+pars = {'algorithm' : TNV, \
+ 'input' : noisyVol,\
+ 'regularisation_parameter': 0.04, \
+ 'number_of_iterations' : 200 ,\
+ 'tolerance_constant':1e-05
+ }
+
+print ("#############TNV CPU#################")
+start_time = timeit.default_timer()
+tnv_cpu = TNV(pars['input'],
+ pars['regularisation_parameter'],
+ pars['number_of_iterations'],
+ pars['tolerance_constant'])
+
+Qtools = QualityTools(idealVol, tnv_cpu)
+pars['rmse'] = Qtools.rmse()
+
+txtstr = printParametersToString(pars)
+txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time)
+print (txtstr)
+a=fig.add_subplot(1,2,2)
+
+# these are matplotlib.patch.Patch properties
+props = dict(boxstyle='round', facecolor='wheat', alpha=0.75)
+# place a text box in upper left in axes coords
+a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14,
+ verticalalignment='top', bbox=props)
+imgplot = plt.imshow(tnv_cpu[3,:,:], cmap="gray")
+plt.title('{}'.format('CPU results'))
+
+#%%
+from ccpi.framework import ImageData
+g = TNV_new(0.04, 200, 1e-5)
+sol = g.proximal(ImageData(noisyVol), 1)
+
+#%%
+plt.figure(figsize=(10,10))
+plt.subplot(3,1,1)
+plt.imshow(sol.as_array()[2])
+plt.colorbar()
+
+plt.subplot(3,1,2)
+plt.imshow(tnv_cpu[2])
+plt.colorbar()
+
+plt.subplot(3,1,3)
+plt.imshow(np.abs(tnv_cpu[2] - sol.as_array()[2]))
+plt.colorbar()
+
+plt.show()
+
+#plt.imshow(sol.as_array()) \ No newline at end of file