Logo Search packages:      
Sourcecode: libneuralnet version File versions  Download package

timelaggedneuron.cc

/*
** timelaggedneuron.cc
** Login : <nico@Altarion.marmotte.ath.cx>
** Started on  Tue Jul 22 15:32:18 2003 Nicolas
** $Id: timelaggedneuron.cc,v 1.15 2003/09/16 10:20:18 nico Exp $
**
** Copyright (C) 2003 Nicolas
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU Lesser General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
** GNU Lesser General Public License for more details.
**
** You should have received a copy of the GNU Lesser General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/

#include <iostream>
#include "timelaggedneuron.hh"


namespace NeuralNet
{


  void TimeLaggedNeuron::addConnection(TimeLaggedInputNeuron* in)
  {
    assert(in != 0x0);

    _inputNeurons.push_back(in);

    //    _tri.push_back(0.05);
    // _treshold = 0.5;

    std::vector<float> w;
    for (int i = 0; i < in->getSize(); i++)
      w.push_back(randWeight());
    _weights.push_back(w);

    for (int i = 0; i < in->getSize(); i++)
      {
      w[i] = 0.0;
      _activ.push_back(0);
      }

    _lagDeltae.push_back(_activ);
    _oldDeltae.push_back(w);
    _tri.push_back(w);  
    _deltae.push_back(w);
    _momentum.push_back(w);
  }

  void TimeLaggedNeuron::addConnection(TimeLaggedInputNeuron* in,
                               std::vector<float> weights)
  {

    assert(in != 0x0);
    _inputNeurons.push_back(in);

    //    _tri.push_back(0.05);
    //    _treshold = 0.5;

    std::vector<float> w;
    for (unsigned int i = 0; i < weights.size(); i++)
      w.push_back(weights[i]);
    _weights.push_back(w);

    for (int i = 0; i < in->getSize(); i++)
      {
      w[i] = 0.0;
      _activ.push_back(0);
      }

    _lagDeltae.push_back(_activ);
    _oldDeltae.push_back(w);
    _tri.push_back(w); 
    _deltae.push_back(w);
    _momentum.push_back(w);
  }

  float TimeLaggedNeuron::refreshOutput()
  {
    assert(_inputNeurons.size() == _weights.size());

    std::vector<TimeLaggedInputNeuron*>::const_iterator neurons = _inputNeurons.begin();
    std::vector< std::vector<float> >::const_iterator weights = _weights.begin();
    _s = 0;//_treshold;
    for (; neurons != _inputNeurons.end(); ++ neurons, ++weights)
      {
      assert(*neurons != 0x0);

      CIter w = weights->begin();
      pair<CIter> p = (*neurons)->getOutput();

      for (unsigned t = 0; w != weights->end() && p.first != p.second; ++w, ++p.first, ++t)
        _s += *(p.first) * (*w);
      assert(p.first == p.second); // Should have been through all the memory...
      }
    _activ.pop_back(); _activ.push_front(_s);
    _output = activation(_s); // where activation is the activation
                        // function...
    return _output;
  }

  void TimeLaggedNeuron::display() const
  {
    std::cout << "    Time Lagged Neuron" << std::endl;
    std::cout << "    Activation function: Sigmoid" << std::endl;

    //    std::cout << "    Threshold: " << _treshold << std::endl;
    for (unsigned i = 0; i < _weights.size(); i++)
      for (unsigned j = 0; j < _weights[i].size(); j++)
      std::cout << "    Weight[" << i << "," << j << "]: " << _weights[i][j] << std::endl;

  }

  float TimeLaggedNeuron::getFPrime() const
  {
    return (_output * (1 - _output));
  }

  float TimeLaggedNeuron::_fprime(unsigned n) const
  {
    assert(n < _activ.size());
    std::list<float>::const_iterator it = _activ.begin();
    advance(it, n);
    float v = (1 + expf(-*it));
    return 1/v - 1/(v*v); ///(*it * (1 - *it));
  }


  void TimeLaggedNeuron::updateBackpropStochastic(float     lRate,
                                      float     moment,
                                      float     delta)
  {
    for (unsigned n = 0; n < _inputNeurons.size(); n++)
      for (unsigned j = 0; j < _weights[n].size(); j++)
      {
        double wd = 0;
        pair<CIter> in = (_inputNeurons[n]->getOutput());
        for (; in.first != in.second; ++in.first)
          wd += _deltae[n][j] * *in.first;
        wd = lRate * wd  + moment * _momentum[n][j];
        
        _momentum[n][j] = wd;
        _weights[n][j] += wd;
      }
    for (unsigned n = 0; n < _inputNeurons.size(); n++)
      {
      _lagDeltae[n].pop_back();
      _lagDeltae[n].push_front(_dwsum);
      std::list<float>::iterator deltar = _lagDeltae[n].begin();

      for (unsigned j = 0; j < _deltae[n].size(); ++j, deltar++)
        {// j is the time lag
          for (unsigned i = 0; i < _weights[n].size(); i++)
            _deltae[n][j] += (*deltar * _weights[n][i]) / 1000;
          _deltae[n][j] *= _fprime(j); // Lagged fprime        
        }
      }
  }

  // Check Treshold ...
  void TimeLaggedNeuron::updateWeights(float lRate, float moment)
  {
    for (unsigned n = 0; n < _inputNeurons.size(); n++)
      for (unsigned j = 0; j < _weights[n].size(); j++)
      {
        double wd = 0;
        pair<CIter> in = (_inputNeurons[n]->getOutput());
        for (; in.first != in.second; ++in.first)
          wd += _deltae[n][j] * *in.first;
        wd = lRate * wd  + moment * _momentum[n][j];

        _momentum[n][j] = wd;
        _weights[n][j] += wd;
      }
  }

  // rprop
  void TimeLaggedNeuron::updateWeights(float nPlus, float nMinus,
                               float deltaMin, float deltaMax,
                               bool errUp)
  {
    for (unsigned n = 0; n < _inputNeurons.size(); n++)
      for (unsigned j = 0; j < _weights[n].size(); j++)
      {
        double deltae = 0;
        pair<CIter> in = (_inputNeurons[n]->getOutput());
        for (; in.first != in.second; ++in.first)
          deltae += _deltae[n][j] * *in.first;
        
        double wd = 0;
        double tri = 0;
        if ((_oldDeltae[n][j] > 0 && _deltae[n][j] > 0) ||
            (_oldDeltae[n][j] < 0 && _deltae[n][j] < 0))
          {
            tri = MIN(_tri[n][j] * nPlus, deltaMax);
            _tri[n][j] = tri;
            wd = (deltae > 0) ? _tri[n][j] : -_tri[n][j];
            _oldDeltae[n][j] = deltae;
            //std::cout << "(+)wd = " << wd << " ";
          }
        else if ((_oldDeltae[n][j] > 0 && _deltae[n][j] < 0) ||
               (_oldDeltae[n][j] < 0 && _deltae[n][j] > 0))
          {
            tri = MAX(_tri[n][j] * nMinus, deltaMin);
            _tri[n][j] = tri;
            if (errUp)
            wd = -_momentum[n][j];
            _oldDeltae[n][j] = 0;
            //std::cout << "(-)wd = " << wd << " ";
          }
        else
          {
            wd = (deltae > 0) ? _tri[n][j] : -_tri[n][j];
            _oldDeltae[n][j] = deltae;
            //std::cout << "(0)wd = " << wd << " ";
          } 
        _momentum[n][j] = wd;
        _weights[n][j] += wd;
      }
  }

  // quickprop
  void TimeLaggedNeuron::updateWeights(float lRate, float moment, float mu)
  {
    float shrink = mu / (1.0 + mu);
    for (unsigned n = 0; n < _inputNeurons.size(); n++)
      for (unsigned j = 0; j < _weights[n].size(); j++)
      {
        double deltae = 0.0;
        pair<CIter> in = (_inputNeurons[n]->getOutput());
        for (; in.first != in.second; ++in.first)
          deltae += _deltae[n][j] * *in.first;

        double wd = 0;
        if (_momentum[n][j] > EPSILON)
          {
            if (deltae > 0.0)
            {
              wd  = (lRate * deltae) / (float)_inputNeurons.size();
            }
            if (deltae > (shrink * _oldDeltae[n][j]))
            wd += mu * _momentum[n][j];
            else
            wd += (deltae / (_oldDeltae[n][j] - deltae)) 
              * _momentum[n][j];
          }
        else if (_momentum[n][j] < -EPSILON)
          {
            if (deltae < 0.0)
            wd = (lRate * deltae) / (float)_inputNeurons.size();
            if (deltae < (shrink * _oldDeltae[n][j]))
            wd += mu * _momentum[n][j];
            else
            wd += (deltae / (_oldDeltae[n][j] - deltae)) 
              * _momentum[n][j];
          }
        else
          {
            wd = ((lRate * deltae) / (float)_inputNeurons.size())
            + (moment * _momentum[n][j]);
          }
        //      wd = lRate * wd  + moment * _momentum[n][j];         
        _oldDeltae[n][j] = deltae;
        _momentum[n][j] = wd;
        _weights[n][j] += wd;
      }   
  }


  void TimeLaggedNeuron::updateBatch(float delta)
  {
    //    std::cout << "Updating the deltas"  << std::endl;
    for (unsigned n = 0; n < _inputNeurons.size(); n++)
      {
      _lagDeltae[n].pop_back();
      _lagDeltae[n].push_front(_dwsum);
      std::list<float>::iterator deltar = _lagDeltae[n].begin();

      for (unsigned j = 0; j < _deltae[n].size(); ++j, deltar++)
        {// j is the time lag
          for (unsigned i = 0; i < _weights[n].size(); i++)
            {// Numeric Stabilisation....
            _deltae[n][j] += (*deltar * _weights[n][i]) / 1000;
            }
          _deltae[n][j] *= _fprime(j); // Lagged fprime       
        }

      }
  }

  void      TimeLaggedNeuron::clearset(int flags, float decay, float tri)
  {
    //assert(_deltae.size() == _oldDeltae.size() ==
    //_momentum.size() == _tri.size() == _inputNeurons.size());
    if (flags & DWSUM)
      _dwsum = 0;
    if (flags == DWSUM)
      return;
    for (unsigned i = 0; i < _inputNeurons.size(); i++)
      {
      if (flags & TRI)
        if (_tri.size() > i)
          for (unsigned j = 0; j < _tri[i].size(); ++j)
            _tri[i][j] = tri;
      if (flags & OLD_DELTAE)
        {
          for (std::list<float>::iterator it = _lagDeltae[i].begin();
             it != _lagDeltae[i].end(); ++it)
            *it = 0;
          if (_oldDeltae.size() > i)
            for (std::vector<float>::iterator it = _oldDeltae[i].begin();
               it != _oldDeltae[i].end(); ++it)
            *it = 0;
        }
      if (flags & DELTAE)
        for (unsigned j = 0; j < _deltae[i].size(); ++j)
          (_deltae[i])[j] = 0;
      if (flags & DECAY_DELTAE)
        for (unsigned j = 0; j < _deltae[i].size(); ++j)
          (_deltae[i])[j] = decay * _weights[i][j];
      if (flags & MOMENTUM)
        for (unsigned j = 0; j < _deltae[i].size(); ++j)
          (_momentum[i])[j] = 0;
      }
  }

}// !namespace NeuralNet



Generated by  Doxygen 1.6.0   Back to index