Lesson 15: Advanced miniAuto Features

Computer vision, machine learning, and intelligent behaviors

🎯 Learning Objectives

Knowledge Goals:

  • Understand computer vision principles
  • Learn machine learning basics for robotics
  • Explore advanced sensor fusion techniques
  • Master intelligent behavior programming

Practical Skills:

  • Implement object detection and tracking
  • Create adaptive navigation algorithms
  • Build intelligent decision-making systems
  • Develop complex autonomous missions

👁️ Section 1: Computer Vision (20 minutes)

Object Detection and Recognition

The miniAuto camera module enables sophisticated visual processing for autonomous navigation:

#include <miniAuto.h>
#include <OpenCV.h>

miniAuto robot;

void setup() {
  robot.begin();
  robot.initCamera(640, 480);  // Initialize camera
  Serial.begin(9600);
}

void loop() {
  // Capture and process image
  Mat frame = robot.captureFrame();
  
  // Detect objects using color filtering
  vector<Point> redObjects = detectColorObjects(frame, "red");
  vector<Point> blueObjects = detectColorObjects(frame, "blue");
  
  // Navigate based on detected objects
  if (!redObjects.empty()) {
    navigateToObject(redObjects[0], "red");
  } else if (!blueObjects.empty()) {
    avoidObject(blueObjects[0], "blue");
  } else {
    exploreEnvironment();
  }
  
  delay(100);
}
                

Line Following with Vision

Advanced line following using computer vision instead of simple sensors:

vector<Point> detectLine(Mat frame) {
  Mat gray, edges;
  cvtColor(frame, gray, COLOR_BGR2GRAY);
  Canny(gray, edges, 50, 150);
  
  vector<Vec4i> lines;
  HoughLinesP(edges, lines, 1, CV_PI/180, 50, 50, 10);
  
  vector<Point> linePoints;
  for (auto line : lines) {
    Point start(line[0], line[1]);
    Point end(line[2], line[3]);
    linePoints.push_back(start);
    linePoints.push_back(end);
  }
  
  return linePoints;
}

void followLineWithVision() {
  Mat frame = robot.captureFrame();
  vector<Point> linePoints = detectLine(frame);
  
  if (!linePoints.empty()) {
    // Calculate line direction and adjust robot heading
    float lineAngle = calculateLineAngle(linePoints);
    float robotHeading = robot.getHeading();
    float correction = lineAngle - robotHeading;
    
    // Smooth steering based on line position
    int leftSpeed = 70 + (correction * 2);
    int rightSpeed = 70 - (correction * 2);
    
    robot.setMotorSpeeds(leftSpeed, rightSpeed);
  } else {
    // No line detected - search pattern
    robot.turnRight(15);
    delay(200);
  }
}
                

💡 Key Insight

Computer vision allows robots to understand their environment like humans do, making decisions based on visual information rather than just proximity sensors.

🧠 Section 2: Machine Learning Integration (20 minutes)

Adaptive Behavior Learning

Implement simple machine learning to help the robot adapt to its environment:

class AdaptiveNavigation {
private:
  float successRates[4] = {0.5, 0.5, 0.5, 0.5}; // N, E, S, W
  int attempts[4] = {0, 0, 0, 0};
  int successes[4] = {0, 0, 0, 0};
  
public:
  int chooseBestDirection() {
    int bestDir = 0;
    float bestRate = successRates[0];
    
    for (int i = 1; i < 4; i++) {
      if (successRates[i] > bestRate) {
        bestRate = successRates[i];
        bestDir = i;
      }
    }
    
    return bestDir;
  }
  
  void recordOutcome(int direction, bool success) {
    attempts[direction]++;
    if (success) successes[direction]++;
    
    // Update success rate
    successRates[direction] = (float)successes[direction] / attempts[direction];
    
    Serial.print("Direction ");
    Serial.print(direction);
    Serial.print(" success rate: ");
    Serial.println(successRates[direction]);
  }
};

AdaptiveNavigation navigator;

void intelligentExploration() {
  int chosenDirection = navigator.chooseBestDirection();
  
  // Execute movement in chosen direction
  bool success = moveInDirection(chosenDirection);
  
  // Learn from the outcome
  navigator.recordOutcome(chosenDirection, success);
}
                

Pattern Recognition

Train the robot to recognize and respond to environmental patterns:

struct EnvironmentPattern {
  float lightLevel;
  float temperature;
  float distance[4];  // N, E, S, W distances
  String action;      // Learned optimal action
  float confidence;   // How confident we are in this action
};

class PatternLearner {
private:
  EnvironmentPattern patterns[50];
  int patternCount = 0;
  
public:
  String predictAction(float light, float temp, float distances[4]) {
    float bestMatch = 0;
    String bestAction = "explore";
    
    for (int i = 0; i < patternCount; i++) {
      float similarity = calculateSimilarity(light, temp, distances, patterns[i]);
      
      if (similarity > bestMatch && patterns[i].confidence > 0.7) {
        bestMatch = similarity;
        bestAction = patterns[i].action;
      }
    }
    
    return bestAction;
  }
  
  void learnPattern(float light, float temp, float distances[4], String action, bool successful) {
    // Find or create pattern
    int patternIndex = findSimilarPattern(light, temp, distances);
    
    if (patternIndex == -1 && patternCount < 50) {
      // Create new pattern
      patternIndex = patternCount++;
      patterns[patternIndex].lightLevel = light;
      patterns[patternIndex].temperature = temp;
      for (int i = 0; i < 4; i++) {
        patterns[patternIndex].distance[i] = distances[i];
      }
      patterns[patternIndex].confidence = 0.5;
    }
    
    if (patternIndex != -1) {
      // Update pattern
      patterns[patternIndex].action = action;
      if (successful) {
        patterns[patternIndex].confidence = min(1.0, patterns[patternIndex].confidence + 0.1);
      } else {
        patterns[patternIndex].confidence = max(0.0, patterns[patternIndex].confidence - 0.1);
      }
    }
  }
};
                

🎯 Pro Tip

Start with simple pattern recognition and gradually increase complexity. The robot should learn from both successful and failed attempts to improve its decision-making.

🔗 Section 3: Advanced Sensor Fusion (20 minutes)

Multi-Modal Sensor Integration

Combine multiple sensor types for robust navigation and decision-making:

struct SensorReading {
  float ultrasonicDistance;
  float imuHeading;
  float lightLevel;
  float temperature;
  Point visualTarget;
  bool visualTargetValid;
  unsigned long timestamp;
};

class AdvancedSensorFusion {
private:
  SensorReading readings[10];  // Circular buffer
  int currentIndex = 0;
  
public:
  SensorReading getCurrentReading() {
    SensorReading reading;
    reading.ultrasonicDistance = robot.getDistance();
    reading.imuHeading = robot.getHeading();
    reading.lightLevel = robot.getLightLevel();
    reading.temperature = robot.getTemperature();
    reading.visualTarget = robot.detectVisualTarget();
    reading.visualTargetValid = (reading.visualTarget.x != -1);
    reading.timestamp = millis();
    
    // Store in circular buffer
    readings[currentIndex] = reading;
    currentIndex = (currentIndex + 1) % 10;
    
    return reading;
  }
  
  NavigationDecision makeIntelligentDecision() {
    SensorReading current = getCurrentReading();
    NavigationDecision decision;
    
    // Confidence-weighted decision making
    float ultrasonicConfidence = calculateUltrasonicConfidence(current);
    float visualConfidence = current.visualTargetValid ? 0.9 : 0.1;
    float imuConfidence = 0.8;
    
    // Combine multiple sensor inputs
    if (visualConfidence > 0.7 && current.visualTargetValid) {
      // Trust visual system most when target is clearly visible
      decision = navigateToVisualTarget(current.visualTarget);
      decision.confidence = visualConfidence;
    }
    else if (ultrasonicConfidence > 0.6) {
      // Use ultrasonic for obstacle avoidance
      decision = avoidObstacles(current.ultrasonicDistance);
      decision.confidence = ultrasonicConfidence;
    }
    else {
      // Fall back to IMU-based exploration
      decision = exploreWithHeading(current.imuHeading);
      decision.confidence = imuConfidence;
    }
    
    return decision;
  }
};
                

Predictive Navigation

Use sensor history to predict future conditions and plan accordingly:

void predictiveNavigation() {
  // Analyze recent sensor trends
  float distanceTrend = calculateDistanceTrend();
  float headingStability = calculateHeadingStability();
  
  if (distanceTrend < -5) {
    // Distance decreasing rapidly - obstacle approaching
    Serial.println("Predictive: Obstacle approaching, preparing evasive action");
    prepareEvasiveManeuver();
  }
  else if (headingStability < 0.3) {
    // Heading unstable - possible wheel slippage or external force
    Serial.println("Predictive: Heading unstable, reducing speed");
    robot.setMaxSpeed(50);
  }
  else if (distanceTrend > 10 && headingStability > 0.8) {
    // Clear path ahead and stable heading - safe to increase speed
    Serial.println("Predictive: Clear path, increasing speed");
    robot.setMaxSpeed(100);
  }
}

float calculateDistanceTrend() {
  if (currentIndex < 5) return 0;  // Not enough data
  
  float recentAvg = 0, olderAvg = 0;
  for (int i = 0; i < 3; i++) {
    int idx = (currentIndex - 1 - i + 10) % 10;
    recentAvg += readings[idx].ultrasonicDistance;
  }
  for (int i = 3; i < 6; i++) {
    int idx = (currentIndex - 1 - i + 10) % 10;
    olderAvg += readings[idx].ultrasonicDistance;
  }
  
  return (recentAvg / 3) - (olderAvg / 3);
}
                

🎯 Section 4: Intelligent Mission Planning (20 minutes)

Complex Mission Framework

Create sophisticated missions that combine multiple objectives and adapt to changing conditions:

enum MissionState {
  EXPLORING,
  TARGET_ACQUIRED,
  APPROACHING_TARGET,
  COLLECTING_DATA,
  RETURNING_HOME,
  MISSION_COMPLETE
};

class IntelligentMission {
private:
  MissionState currentState = EXPLORING;
  Point homePosition;
  vector<Point> targetsFound;
  vector<Point> targetsVisited;
  unsigned long missionStartTime;
  
public:
  void executeMission() {
    switch (currentState) {
      case EXPLORING:
        if (exploreForTargets()) {
          currentState = TARGET_ACQUIRED;
        }
        break;
        
      case TARGET_ACQUIRED:
        Point nearestTarget = findNearestUnvisitedTarget();
        if (navigateToTarget(nearestTarget)) {
          currentState = APPROACHING_TARGET;
        }
        break;
        
      case APPROACHING_TARGET:
        if (isAtTarget()) {
          currentState = COLLECTING_DATA;
        }
        break;
        
      case COLLECTING_DATA:
        if (collectDataAtTarget()) {
          markTargetVisited();
          if (allTargetsVisited() || missionTimeExpired()) {
            currentState = RETURNING_HOME;
          } else {
            currentState = EXPLORING;
          }
        }
        break;
        
      case RETURNING_HOME:
        if (navigateToHome()) {
          currentState = MISSION_COMPLETE;
        }
        break;
        
      case MISSION_COMPLETE:
        robot.stop();
        reportMissionResults();
        break;
    }
  }
  
  bool exploreForTargets() {
    // Use computer vision to scan for targets
    Mat frame = robot.captureFrame();
    vector<Point> newTargets = detectTargets(frame);
    
    for (Point target : newTargets) {
      if (!isTargetKnown(target)) {
        targetsFound.push_back(target);
        Serial.println("New target discovered!");
        return true;
      }
    }
    
    // Continue exploration pattern
    intelligentExploration();
    return false;
  }
};
                

🛠️ Hands-On Activity: Intelligent Search and Rescue Mission (25 minutes)

Project: Smart Search and Rescue Robot

Program your miniAuto to conduct an intelligent search and rescue mission using computer vision, machine learning, and advanced sensor fusion.

Mission Requirements:

  • Autonomously search a defined area for colored targets (representing survivors)
  • Use computer vision to identify and classify targets
  • Adapt search strategy based on environmental conditions
  • Return to starting position and report findings
  • Complete mission within 5 minutes

🏆 Advanced Challenges

  • Implement priority-based target selection (red = high priority, blue = low priority)
  • Add obstacle avoidance while maintaining search efficiency
  • Create a map of discovered targets and obstacles
  • Implement cooperative behavior with multiple robots

📝 Assessment & Homework

📊 Quick Assessment

  • Demonstrate computer vision object detection
  • Show adaptive learning in action
  • Explain sensor fusion decision-making
  • Present intelligent mission results

🏠 Homework Assignment

  • Research advanced AI techniques in robotics
  • Design a complex multi-objective mission
  • Study real-world autonomous systems
  • Prepare for final competition project
← Previous: Lesson 14 📚 Semester Overview 📝 Take Lesson 15 Quiz Next: Lesson 16 →