Simplest Neural Net with Trainer Error

Be patient when running the model. For the first 30 seconds it is training the neuronatron.

This is hardly any different than the Simplest Neural Net. It is the version we built in class, which had a few embellishments:

  1. We tried adding a 10% bad training rate to the trainer.
  2. We made the line that demarcates the two answers tilted (y = m * x + b).
  3. We drew the line showing the correct answer, so you can see how well the neuronatron is doing relative to what we tried to train it to do.
  4. With 10% bad trainers, the neuronatron can get surprisingly far off!
  5. I also turned the learning rate down some so that the neuronatron is less influenced by the most recent bad training data.

Implementation

static final float m = 0.3; // slope of line
static final float b = 30; // offset of line
static final float learningRate = 0.005;
static final float trainerErrorRate = 0.02; // 2% bad training rate!!

class Neuronatron {

  float[] weights;

  Neuronatron(int numberOfInputs) {
    weights = new float[numberOfInputs];
    for (int i = 0; i < numberOfInputs; ++i) {
      weights[i] = random(-1, 1);
    }
  }

  int feedforward(float[] inputs) {
    float sum = 0.0;
    for (int i = 0; i < weights.length; ++i) sum += inputs[i] * weights[i];
    int answer = activate(sum);
    return answer;
  }

  int activate(float sum) {
    return sum > 0 ? 1 : -1;
  }

  void learn(float[] inputs, int correctAnswer) {
    int educatedGuess = feedforward(inputs);
    int error = educatedGuess - correctAnswer;
    for (int i = 0; i < weights.length; ++i) weights[i] -= error * learningRate * inputs[i];
  }
}

class Trainer {

  float[] inputs;
  int correctAnswer;

  Trainer(float x, float y) {
    inputs = new float[3];
    inputs[0] = x;
    inputs[1] = y;
    inputs[2] = 1;
    float calculated_y = m * x + b;
    if (random(1) < trainerErrorRate) {
       int wrongAnswer = y > calculated_y ? 1 : -1;
       correctAnswer = wrongAnswer;
    } else {
       correctAnswer = y < calculated_y ? 1 : -1;
    }
  }
}

Trainer[] trainers;
static final int NUMBER_OF_TRAINERS = 1800;

Neuronatron neuronatron;

void setup() {
  size(600, 400);
  frameRate(60);

  neuronatron = new Neuronatron(3);

  trainers = new Trainer[NUMBER_OF_TRAINERS];

  for (int i = 0; i < NUMBER_OF_TRAINERS; ++i) {
    float x = random(-300, 300);
    float y = random(-200, 200);
    trainers[i] = new Trainer(x, y);
  }
}

void draw() {
  line(0.0, 200 - 300 * m + b, 600.0, 200 + 300 * m + b);
  if (frameCount < NUMBER_OF_TRAINERS) {
    Trainer trainer = trainers[frameCount];
    neuronatron.learn(trainer.inputs, trainer.correctAnswer);
  } else {
    float x = random(600);
    float y = random(400);
    float inputs[] = new float[3];
    inputs[0] = x - 300;
    inputs[1] = y - 200;
    inputs[2] = 100;
    int answer = neuronatron.feedforward(inputs);
    if (answer == 1) {
      fill(0, 0, 255);
    } else {
      fill(0, 255, 0);
    }
    rect(x - 2, y - 2, 4, 4);
  }
}