Logo Search packages:      
Sourcecode: libneuralnet version File versions  Download package

algo.hxx

/*
** algo.hxx
** Login : <dax@happycoders.org>
** Started on  Thu Jul 17 03:00:28 2003 David Rousselie
** $Id: algo.hxx,v 1.12 2003/09/29 21:03:50 dax Exp $
** 
** Copyright (C) 2003 David Rousselie
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU Lesser General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
** 
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
** GNU Lesser General Public License for more details.
** 
** You should have received a copy of the GNU Lesser General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/

#ifndef     ALGO_HXX_
# define    ALGO_HXX_

template <class TypeName>
Matrix<TypeName> PCAAlgo<TypeName>::random_init(unsigned int nb_lines, 
                                    unsigned int nb_cols)
{
  Matrix<TypeName> result(nb_lines, nb_cols);
  Matrix<float> wi(nb_lines, 1);

  for (unsigned int i = 0; i < nb_cols; i++)
    {
      for (unsigned int j = 0; j < nb_lines; j++)
      wi.set_data(j, 0, rand() / (float) RAND_MAX);
      result.insert(0, i, wi * (1 / (float)sqrt(wi.norme())));
    }
  return result;
}

template <class TypeName>
Matrix<TypeName> PCAAlgo<TypeName>::random_init2(unsigned int nb_lines, 
                                     unsigned int nb_cols)
{
  Matrix<TypeName> result(nb_lines, nb_cols);
  Matrix<float> wi(nb_lines, 1);
  float init = sqrt(1 / (float)(64 * nb_lines));

  for (unsigned int i = 0; i < nb_cols; i++)
    {
      for (unsigned int j = 0; j < nb_lines; j++)
      //    wi.set_data(j, 0, rand() / (float) RAND_MAX);
      wi.set_data(j, 0, init);
      //      result.insert(0, i, wi * (1 /
      //      (float)sqrt(wi.norme())));
      result.insert(0, i, wi);
    }
  return result;
}

template <class TypeName>
Matrix<TypeName> PCAAlgo<TypeName>::lt(Matrix<TypeName> m)
{
  Matrix<TypeName> res(m);
  
  for (int i = 0; i < res.get_nb_lines(); i++)
    for (int j = i + 1; j < res.get_nb_cols(); j++)
      res.set_data(i, j, 0);
  return res;
}


template <class TypeName>
Matrix<float> PCAAlgo<TypeName>::gha2()
{
  Matrix<float> W = random_init(_x.get_nb_lines(), _x.get_nb_lines());
  float error = 0;
  float serror = 0;

  do 
    {
      serror = error;
      for (int ex = 0; ex < _x.get_nb_cols(); ex++)
      {
        Matrix<float> xx = _x.get_col(ex);

        while (xx.norme() == 0)
          {
            ex = (ex + 1) % _x.get_nb_cols();
            xx = _x.get_col(ex);
          }
        Matrix<float> y = W * xx;
        W = W + (y * xx.transpose() - lt(y * y.transpose()) * W) * _epsilon;
        for (int i = 0; i < W.get_nb_lines(); i++)
          error += (W.get_line(i).transpose() * y(i, 0) - xx).norme();
      }
      error /= (float)(W.get_nb_lines() * _x.get_nb_cols());
      std::cout << "W:" << W.transpose() << std::endl << "error = " 
            << error << std::endl;
    }
  while (fabs(serror - error) > 1e-10);
  return W.transpose();
}

/*
** Iterative computation of GHA
** W passed must be initialized randomly, for columns < i, it must be
** the eigenvector computed previously
** the exemple matrix x must be centered
*/
template <class TypeName>
float       PCAAlgo<TypeName>::gha(Matrix<float>& W, int i)
{
  unsigned int    iteration = 0;
  float           error = 0;
  float           serror = 0;
  float           gamma = 0.005;
  float           lambda = rand() / (float) RAND_MAX;

  std::cout << "Launching GHA for " << i << "e eigenvector :" << std::endl;
  do 
    {
      iteration++;
      serror = error;  
      for (int ex = 0; ex < _x.get_nb_cols(); ex++)
      {
        Matrix<float> xprime(W.get_nb_lines(), 1);
        Matrix<float> sum(W.get_nb_lines(), 1);
        Matrix<float> xx = _x.get_col(ex);
        Matrix<float> wj(W.get_nb_lines(), 1);
        float           yj;

        while (xx.norme() == 0)
          {
            ex = (ex + 1) % _x.get_nb_cols();
            xx = _x.get_col(ex);
          }
        for (int j = 0; j < i; j++)
          {
            wj = W.get_col(j);
            yj = (wj.transpose() * xx)(0, 0);
            sum += wj * yj;
          }
        xprime = xx - sum;
        wj = W.get_col(i);
        yj = (wj.transpose() * xx)(0, 0);
        lambda += gamma * ((((wj.transpose() * xprime)(0, 0) 
                         * (wj.transpose() * xprime)(0, 0)) / wj.norme()) - lambda);
        W.insert(0, i, wj + ((xx - sum) * yj * _epsilon));
        error += (wj * yj - xx).norme();
      }
      error /= (float)_x.get_nb_cols();
    }
  while (fabs(error - serror) > 1e-10);
  std::cout << "lambda estimated: " << lambda << std::endl;
  std::cout << "W fixed for " << i << "e eigenvector in " << iteration 
          << " iterations with final error = " << error << std::endl;
  return lambda;
}

template <class TypeName>
float       PCAAlgo<TypeName>::ala(Matrix<float>& W, int i)
{
  // beta must be < 2(sqrt(2) - 1)
  // params that work :)
  float           beta = 0.00125;
  float           gamma = 0.005;
  unsigned int    iteration = 0;
  double    error = 0;
  double    serror = 0;
  float           lambda = rand() / (float) RAND_MAX;  

  std::cout << "Launching ALA for " << i << "e eigenvector :" << std::endl;
  do 
    {
      iteration++;
      serror = error; 
      for (int ex = 0; ex < _x.get_nb_cols(); ex++)
      {
        Matrix<float>   xprime(W.get_nb_lines(), 1);
        Matrix<float> sum(W.get_nb_lines(), 1);
        Matrix<float> xx = _x.get_col(ex);
        Matrix<float> wj(W.get_nb_lines(), 1);
        float           yj;

        while (xx.norme() == 0)
          {
            ex = (ex + 1) % _x.get_nb_cols();
            xx = _x.get_col(ex);
          }
        // sum (wj * yj) for j < i to compute xprime
        for (int j = 0; j < i; j++)
          {
            wj = W.get_col(j);
            yj = (wj.transpose() * xx)(0, 0);
            sum += wj * yj;
          }
        xprime = xx - sum;
        wj = W.get_col(i);
        yj = (wj.transpose() * xx)(0, 0);

        // add wj * yj for j=i to compute new wj
        sum += wj * yj;

        // compute incremental lambda do determine alpha, the
        // learning rate
        lambda += gamma * ((((wj.transpose() * xprime)(0, 0) 
                         * (wj.transpose() * xprime)(0, 0)) / wj.norme()) - lambda);
        float           alpha = beta / lambda;

        // compute new wj
        wj += (xx - sum) * yj * alpha;

        // update the W matrix
        if (wj.norme() > ((1 / beta) + 0.5))
          W.insert(0, i, wj * (1 / (sqrt(2 * wj.norme()))));
        else
          W.insert(0, i, wj);
        error += (wj * yj - xx).norme();
      }
      error /= (float)_x.get_nb_cols();
    }
  while (fabs(error - serror) > 1e-10);
  //while (error > 0.0001 && iteration < 1000);
  std::cout << "lambda estimated: " << lambda << std::endl;
  std::cout << "W fixed for " << i << "e eigenvector in " << iteration 
          << " iterations with final error = " << error << std::endl;
  return lambda;
}


#endif          /* !ALGO_HXX_ */

Generated by  Doxygen 1.6.0   Back to index