function [Y1,Ypinv] = online_step_loreta_psd(Y,Ypinv,G1,G2,y,t) % [Y1,Ypinv] = online_step_loreta_psd(Y,Ypinv,G1,G2,y,t) % % This function implements a PSD version of Loreta % % % % Do retraction step given the gradient: Z1*Z2' = R_{AB}(t*y*Y*Y') % The gradient is represented as y*x1*x2' (a matrix) % % Inputs: % Y - The low-rank factor of the current model. Y is (n X k) % Ypinv - The pseudo-inverse of Y % G1, G2 - The factors of the rank-1 gradient matrix. x1, x2 are (n X 1) % t - The step size % y - The sign of the step, either -1 or 1 % % Outputs: % Y - The low-rank factor of the model after the retraction step % Ypinv - The pseudo-inverses of Y G1 = t*y*G1; g1 = Ypinv*G1; g2 = Ypinv*G2; norm_g1 = g1'*g1; norm_g2 = g2'*g2; g11 = Y*g1; g22 = Y*g2; q = g2'*g1; L1 = ( (-0.25+q*0.09375)*g11 + (0.5-0.125*q)*G1 + norm_g1*0.09375*g22 - 0.125*norm_g1*G2 ); L2 = ( (-0.25+q*0.09375)*g22 + (0.5-0.125*q)*G2 + norm_g2*0.09375*g11 - 0.125*norm_g2*G1 ); outer1 = L1*g2'; outer2 = L2*g1'; Y1 = Y + outer1+outer2; Ypinv_intermediate = rank1_pinv_update(Y,Ypinv,L1,g2); Ypinv = rank1_pinv_update(Y+outer1,Ypinv_intermediate,L2,g1); end function [Apinv_new] = rank1_pinv_update(A,Apinv,c,d) % % A is nxk, c is nx1 and d is kx1 % Apinv_new is the pseudoinverse of A+c*d' % ref.: Carl D Mayer, "Generalized inversion of modified matrices" beta = 1+d'*(Apinv*c); v = Apinv*c; n = Apinv'*d; w = c-A*(Apinv*c); %m = d-A'*(Apinv'*d); we deal only with full column rank matrices, therefore norm_m %should be always zero norm_w = w'*w; %norm_m = m'*m; norm_m = 0; %we deal only with full column rank matrices, therefore norm_m should be always zero norm_v = v'*v; norm_n = n'*n; if abs(beta)>eps && norm_meps && abs(beta)eps && norm_m>eps G = -v*w'/norm_w-m*n'/norm(m)+beta*m*w'/(norm_w*norm_m); elseif norm_weps && abs(beta)eps G = m*(v'*Apinv)/beta - (beta/(norm_v*norm_m+beta^2))* (norm_v*m/beta+v)*( (norm_m/beta)*(Apinv'*v)+n)'; elseif norm_w