Browse Source

refactor: organize debug tools and clean up excessive logging

- Move debug scripts to tools/debug_scripts/ directory with README
- Create requirements.txt for Python debug environment setup
- Remove excessive debug logging from ScreenCaptureService
- Disable verbose detection logging in YOLOOnnxDetector
- Update .gitignore to exclude debug artifacts
- Preserve core BGR/RGB fix while cleaning up temporary code

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
feature/debug-shiny-pokeball-detection
Quildra 5 months ago
parent
commit
4bc86fede3
  1. 7
      .gitignore
  2. 117
      app/src/main/java/com/quillstudios/pokegoalshelper/ScreenCaptureService.kt
  3. 10
      app/src/main/java/com/quillstudios/pokegoalshelper/YOLOOnnxDetector.kt
  4. 51
      tools/debug_scripts/README.md
  5. 0
      tools/debug_scripts/debug_model_comparison.py
  6. 78
      tools/debug_scripts/export_model_variants.py
  7. 61
      tools/debug_scripts/inspect_onnx_model.py
  8. 6
      tools/debug_scripts/requirements.txt
  9. 0
      tools/debug_scripts/test_static_onnx.py

7
.gitignore

@ -112,3 +112,10 @@ fastlane/readme.md
# Android Profiling # Android Profiling
*.hprof *.hprof
model_export_env/
# Debug tools and temporary files
tools/debug_scripts/debug_env/
raw_models/exports/
package-lock.json

117
app/src/main/java/com/quillstudios/pokegoalshelper/ScreenCaptureService.kt

@ -1114,9 +1114,6 @@ class ScreenCaptureService : Service() {
val rowStride = planes[0].rowStride val rowStride = planes[0].rowStride
val rowPadding = rowStride - pixelStride * screenWidth val rowPadding = rowStride - pixelStride * screenWidth
Log.d(TAG, "🖼️ MANUAL CAPTURE DEBUG: pixelStride=$pixelStride, rowStride=$rowStride, rowPadding=$rowPadding")
Log.d(TAG, "🖼️ MANUAL CAPTURE DEBUG: screenSize=${screenWidth}x${screenHeight}")
// Create bitmap from image // Create bitmap from image
val bitmap = Bitmap.createBitmap( val bitmap = Bitmap.createBitmap(
screenWidth + rowPadding / pixelStride, screenWidth + rowPadding / pixelStride,
@ -1125,66 +1122,23 @@ class ScreenCaptureService : Service() {
) )
bitmap.copyPixelsFromBuffer(buffer) bitmap.copyPixelsFromBuffer(buffer)
Log.d(TAG, "🖼️ MANUAL CAPTURE DEBUG: created bitmap=${bitmap.width}x${bitmap.height}")
// Crop bitmap to remove padding if needed // Crop bitmap to remove padding if needed
val croppedBitmap = if (rowPadding == 0) { val croppedBitmap = if (rowPadding == 0) {
Log.d(TAG, "🖼️ MANUAL CAPTURE DEBUG: No padding, using original bitmap")
bitmap bitmap
} else { } else {
Log.d(TAG, "🖼️ MANUAL CAPTURE DEBUG: Cropping bitmap from ${bitmap.width}x${bitmap.height} to ${screenWidth}x${screenHeight}")
val cropped = Bitmap.createBitmap(bitmap, 0, 0, screenWidth, screenHeight) val cropped = Bitmap.createBitmap(bitmap, 0, 0, screenWidth, screenHeight)
bitmap.recycle() // Clean up original bitmap.recycle() // Clean up original
cropped cropped
} }
Log.d(TAG, "🖼️ MANUAL CAPTURE DEBUG: final bitmap=${croppedBitmap.width}x${croppedBitmap.height}")
// Convert bitmap to Mat // Convert bitmap to Mat
val mat = Mat() val mat = Mat()
Utils.bitmapToMat(croppedBitmap, mat) Utils.bitmapToMat(croppedBitmap, mat)
Log.d(TAG, "🎨 MANUAL COLOR DEBUG: Mat type=${mat.type()}, channels=${mat.channels()}, size=${mat.cols()}x${mat.rows()}") // Convert from RGBA to BGR (OpenCV format for proper color channel handling)
// Sample specific pixels to check color values
if (mat.rows() > 0 && mat.cols() > 0) {
// Sample center pixel
val centerY = mat.rows() / 2
val centerX = mat.cols() / 2
val centerPixel = mat.get(centerY, centerX)
if (centerPixel != null && centerPixel.size >= 3) {
val b = centerPixel[0].toInt()
val g = centerPixel[1].toInt()
val r = centerPixel[2].toInt()
val a = if (centerPixel.size >= 4) centerPixel[3].toInt() else 255
Log.d(TAG, "🎨 MANUAL COLOR DEBUG: Center pixel (${centerX},${centerY}) BGRA=($b,$g,$r,$a) -> RGBA=($r,$g,$b,$a)")
Log.d(TAG, "🎨 MANUAL COLOR DEBUG: Center pixel hex = #${String.format("%02x%02x%02x", r, g, b)}")
}
// Sample shiny icon pixel at x=155, y=1087
val shinyX = 155
val shinyY = 1087
if (shinyX < mat.cols() && shinyY < mat.rows()) {
val shinyPixel = mat.get(shinyY, shinyX)
if (shinyPixel != null && shinyPixel.size >= 3) {
val b = shinyPixel[0].toInt()
val g = shinyPixel[1].toInt()
val r = shinyPixel[2].toInt()
val a = if (shinyPixel.size >= 4) shinyPixel[3].toInt() else 255
Log.d(TAG, "✨ SHINY PIXEL DEBUG: Shiny icon pixel (${shinyX},${shinyY}) BGRA=($b,$g,$r,$a) -> RGBA=($r,$g,$b,$a)")
Log.d(TAG, "✨ SHINY PIXEL DEBUG: Shiny icon pixel hex = #${String.format("%02x%02x%02x", r, g, b)}")
}
} else {
Log.w(TAG, "⚠️ SHINY PIXEL DEBUG: Coordinates (${shinyX},${shinyY}) out of bounds for ${mat.cols()}x${mat.rows()} image")
}
}
// Convert from RGBA to BGR (OpenCV format, then YOLO preprocessing will handle RGB conversion)
val bgrMat = Mat() val bgrMat = Mat()
Imgproc.cvtColor(mat, bgrMat, Imgproc.COLOR_RGBA2BGR) Imgproc.cvtColor(mat, bgrMat, Imgproc.COLOR_RGBA2BGR)
Log.d(TAG, "🎨 COLOR FIX: Converted RGBA to BGR format for OpenCV compatibility")
// Clean up // Clean up
mat.release() mat.release()
croppedBitmap.recycle() croppedBitmap.recycle()
@ -1205,11 +1159,6 @@ class ScreenCaptureService : Service() {
val mat = convertImageToMat(image) val mat = convertImageToMat(image)
if (mat != null) { if (mat != null) {
// DEBUG: Save captured image for comparison with working test image
saveDebugImage(mat, "captured_screen_${System.currentTimeMillis()}")
// Also test this captured image through ONNX pipeline directly
testCapturedImageThroughONNX(mat)
// Use controller to process detection (this will notify UI via callbacks) // Use controller to process detection (this will notify UI via callbacks)
val detections = detectionController.processDetection(mat) val detections = detectionController.processDetection(mat)
@ -1238,70 +1187,6 @@ class ScreenCaptureService : Service() {
} }
} }
/**
* Save debug image to external storage for comparison
*/
private fun saveDebugImage(mat: Mat, filename: String) {
try {
val debugDir = File(getExternalFilesDir(null), "debug_images")
if (!debugDir.exists()) {
debugDir.mkdirs()
}
val imageFile = File(debugDir, "$filename.jpg")
val success = Imgcodecs.imwrite(imageFile.absolutePath, mat)
if (success) {
Log.d(TAG, "🖼️ DEBUG: Saved captured image to ${imageFile.absolutePath}")
Log.d(TAG, "🖼️ DEBUG: Image properties - Size: ${mat.cols()}x${mat.rows()}, Type: ${mat.type()}, Channels: ${mat.channels()}")
} else {
Log.e(TAG, "❌ DEBUG: Failed to save image")
}
} catch (e: Exception) {
Log.e(TAG, "❌ DEBUG: Error saving image", e)
}
}
/**
* Test captured image directly through ONNX pipeline to isolate issues
*/
private fun testCapturedImageThroughONNX(mat: Mat) {
try {
Log.d(TAG, "🧪 DEBUG: Testing captured image through ONNX pipeline")
Log.d(TAG, "🧪 DEBUG: Input image - Size: ${mat.cols()}x${mat.rows()}, Type: ${mat.type()}")
// Test with existing initialized ONNX detector
val currentDetector = yoloDetector
val detections = if (currentDetector is YOLOOnnxDetector) {
Log.d(TAG, "🧪 DEBUG: Using existing ONNX detector")
currentDetector.detect(mat)
} else {
Log.d(TAG, "🧪 DEBUG: Creating new ONNX detector for test")
val testDetector = YOLOOnnxDetector(this)
testDetector.detect(mat)
}
Log.d(TAG, "🧪 DEBUG: Direct ONNX test found ${detections.size} detections")
// Check specifically for shiny icons (class 50)
val shinyDetections = detections.filter { detection -> detection.classId == 50 }
if (shinyDetections.isNotEmpty()) {
Log.d(TAG, "✨ DEBUG: FOUND ${shinyDetections.size} SHINY ICONS in captured image!")
shinyDetections.forEach { detection ->
Log.d(TAG, "✨ DEBUG: Shiny detection - conf: ${detection.confidence}, box: [${detection.boundingBox.x}, ${detection.boundingBox.y}, ${detection.boundingBox.width}, ${detection.boundingBox.height}]")
}
} else {
Log.d(TAG, "❌ DEBUG: NO SHINY ICONS found in captured image")
}
// Log all detections for comparison
val classGroups = detections.groupBy { detection -> detection.classId }
Log.d(TAG, "🧪 DEBUG: Detection classes found: ${classGroups.keys.sorted()}")
} catch (e: Exception) {
Log.e(TAG, "❌ DEBUG: Error testing captured image", e)
}
}
override fun onDestroy() { override fun onDestroy() {
super.onDestroy() super.onDestroy()

10
app/src/main/java/com/quillstudios/pokegoalshelper/YOLOOnnxDetector.kt

@ -39,8 +39,8 @@ class YOLOOnnxDetector(private val context: Context) {
var DEBUG_CLASS_FILTER: String? = null // Set to class name to show only that class var DEBUG_CLASS_FILTER: String? = null // Set to class name to show only that class
var SHOW_ALL_CONFIDENCES = false // Show all detections with their confidences var SHOW_ALL_CONFIDENCES = false // Show all detections with their confidences
// Special debug mode for shiny icon investigation // Debug flag for troubleshooting detection issues (disable in production)
var DEBUG_SHINY_DETECTION = true // Enable detailed shiny icon debugging private const val DEBUG_DETECTION = false
fun setCoordinateMode(mode: String) { fun setCoordinateMode(mode: String) {
COORD_TRANSFORM_MODE = mode COORD_TRANSFORM_MODE = mode
@ -406,7 +406,7 @@ class YOLOOnnxDetector(private val context: Context) {
val flatOutput = outputTensor[0].flatMap { it.asIterable() }.toFloatArray() val flatOutput = outputTensor[0].flatMap { it.asIterable() }.toFloatArray()
// Debug: Log raw output statistics when looking for shiny icons // Debug: Log raw output statistics when looking for shiny icons
if (DEBUG_SHINY_DETECTION) { if (DEBUG_DETECTION) {
val maxVal = flatOutput.maxOrNull() ?: 0f val maxVal = flatOutput.maxOrNull() ?: 0f
val nonZeroCount = flatOutput.count { it > 0.01f } val nonZeroCount = flatOutput.count { it > 0.01f }
Log.w(TAG, "🔬 [RAW OUTPUT] Method: $method, FlatOutput size: ${flatOutput.size}, Max value: %.4f, Non-zero (>0.01): $nonZeroCount".format(maxVal)) Log.w(TAG, "🔬 [RAW OUTPUT] Method: $method, FlatOutput size: ${flatOutput.size}, Max value: %.4f, Non-zero (>0.01): $nonZeroCount".format(maxVal))
@ -894,7 +894,7 @@ class YOLOOnnxDetector(private val context: Context) {
} }
// Special debug logging for shiny icon (class 50) // Special debug logging for shiny icon (class 50)
if (DEBUG_SHINY_DETECTION && (classId == 50 || className == "shiny_icon") && mappedConfidence > 0.05f) { if (DEBUG_DETECTION && (classId == 50 || className == "shiny_icon") && mappedConfidence > 0.05f) {
Log.w(TAG, "✨ [SHINY DEBUG] Found shiny_icon candidate! ClassID: $classId, Confidence: %.4f (mapped: %.4f), Coords: [%.1f,%.1f,%.1f,%.1f]".format(confidence, mappedConfidence, x1, y1, x2, y2)) Log.w(TAG, "✨ [SHINY DEBUG] Found shiny_icon candidate! ClassID: $classId, Confidence: %.4f (mapped: %.4f), Coords: [%.1f,%.1f,%.1f,%.1f]".format(confidence, mappedConfidence, x1, y1, x2, y2))
} }
@ -1231,7 +1231,7 @@ class YOLOOnnxDetector(private val context: Context) {
} }
// Special debug logging for shiny icon (class 50) // Special debug logging for shiny icon (class 50)
if (DEBUG_SHINY_DETECTION && (classId == 50 || className == "shiny_icon") && mappedConfidence > 0.05f) { if (DEBUG_DETECTION && (classId == 50 || className == "shiny_icon") && mappedConfidence > 0.05f) {
Log.w(TAG, "✨ [SHINY DEBUG] Found shiny_icon candidate! ClassID: $classId, Confidence: %.4f (mapped: %.4f), Coords: [%.1f,%.1f,%.1f,%.1f]".format(confidence, mappedConfidence, x1, y1, x2, y2)) Log.w(TAG, "✨ [SHINY DEBUG] Found shiny_icon candidate! ClassID: $classId, Confidence: %.4f (mapped: %.4f), Coords: [%.1f,%.1f,%.1f,%.1f]".format(confidence, mappedConfidence, x1, y1, x2, y2))
} }

51
tools/debug_scripts/README.md

@ -0,0 +1,51 @@
# Debug Scripts for YOLO ONNX Detection
This directory contains debugging tools for troubleshooting YOLO object detection issues.
## Setup
1. Create a Python virtual environment:
```bash
python -m venv debug_env
source debug_env/bin/activate # On Windows: debug_env\Scripts\activate
```
2. Install dependencies:
```bash
pip install -r requirements.txt
```
## Scripts
### `debug_model_comparison.py`
Compares .pt model predictions with ONNX model outputs on the same static test image.
- Tests both PyTorch and ONNX models side-by-side
- Provides detailed debug output including preprocessing steps
- Useful for identifying model export issues
### `test_static_onnx.py`
Tests ONNX model against static images to isolate Android capture issues.
- Bypasses Android screen capture pipeline
- Tests multiple ONNX model variants
- Good for validating model functionality
### `export_model_variants.py`
Exports YOLO model variants with different NMS settings.
- Creates models with different confidence/IoU thresholds
- Useful for debugging detection sensitivity issues
### `inspect_onnx_model.py`
Inspects ONNX model structure and metadata.
- Verifies class mappings and model architecture
- Helpful for debugging model export problems
## Usage
Place test images in `../../test_images/` and ensure model files are in `../../raw_models/`.
Example:
```bash
cd tools/debug_scripts
source debug_env/bin/activate
python debug_model_comparison.py
```

0
debug_model_comparison.py → tools/debug_scripts/debug_model_comparison.py

78
tools/debug_scripts/export_model_variants.py

@ -0,0 +1,78 @@
#!/usr/bin/env python3
"""
Export YOLO model variants with different NMS settings for shiny icon debugging
"""
from ultralytics import YOLO
import os
def export_model_variants():
model_path = "./raw_models/best.pt"
output_dir = "./raw_models/exports"
# Create output directory
os.makedirs(output_dir, exist_ok=True)
print(f"Loading model from: {model_path}")
model = YOLO(model_path)
# Export configurations to test
configs = [
{
"name": "no_nms",
"nms": False,
"simplify": True,
"description": "Raw model output without NMS - for debugging shiny detection"
},
{
"name": "nms_relaxed",
"nms": True,
"max_det": 500, # Increase from default 300
"conf": 0.1, # Lower confidence threshold
"simplify": True,
"description": "NMS with more detections and lower confidence"
},
{
"name": "nms_very_relaxed",
"nms": True,
"max_det": 1000, # Even more detections
"conf": 0.05, # Very low confidence
"simplify": True,
"description": "NMS with maximum detections for rare classes"
}
]
for config in configs:
try:
print(f"\n🚀 Exporting {config['name']}: {config['description']}")
# Extract export parameters
export_params = {k: v for k, v in config.items()
if k not in ['name', 'description']}
# Export model
exported_path = model.export(
format='onnx',
**export_params
)
# Move to organized location
output_file = os.path.join(output_dir, f"best_{config['name']}.onnx")
if os.path.exists(exported_path):
os.rename(exported_path, output_file)
print(f"✅ Exported: {output_file}")
else:
print(f"❌ Export failed for {config['name']}")
except Exception as e:
print(f"❌ Error exporting {config['name']}: {e}")
print(f"\n📁 All exports saved to: {output_dir}")
print("\n📋 Summary:")
print("- best_no_nms.onnx: Raw 8400x99 output for debugging")
print("- best_nms_relaxed.onnx: NMS with 500 max detections")
print("- best_nms_very_relaxed.onnx: NMS with 1000 max detections")
print("\nNext: Copy desired model to app/src/main/assets/ as best.onnx")
if __name__ == "__main__":
export_model_variants()

61
tools/debug_scripts/inspect_onnx_model.py

@ -0,0 +1,61 @@
#!/usr/bin/env python3
"""
Inspect ONNX model structure to verify class mappings
"""
import onnx
import numpy as np
def inspect_onnx_model(model_path):
print(f"Inspecting ONNX model: {model_path}")
try:
# Load the model
model = onnx.load(model_path)
print(f"\n📋 Model Info:")
print(f"IR Version: {model.ir_version}")
print(f"Producer: {model.producer_name} {model.producer_version}")
# Check inputs
print(f"\n📥 Inputs:")
for input_info in model.graph.input:
print(f" {input_info.name}: {[d.dim_value for d in input_info.type.tensor_type.shape.dim]}")
# Check outputs
print(f"\n📤 Outputs:")
for output_info in model.graph.output:
shape = [d.dim_value for d in output_info.type.tensor_type.shape.dim]
print(f" {output_info.name}: {shape}")
# For NMS models, try to interpret the output format
if len(shape) == 3 and shape[2] == 6:
print(f" → NMS format: [batch, {shape[1]} detections, 6 values (x,y,w,h,conf,class)]")
elif len(shape) == 3 and shape[1] > 90:
print(f" → Raw format: [batch, {shape[1]} channels, {shape[2]} anchors]")
print(f" → Channels: 4 coords + {shape[1]-4} classes")
# Check for any metadata about classes
print(f"\n🏷️ Metadata:")
for prop in model.metadata_props:
print(f" {prop.key}: {prop.value}")
print(f"\n🔍 Model Summary: {len(model.graph.node)} nodes, {len(model.graph.initializer)} initializers")
except Exception as e:
print(f"❌ Error inspecting model: {e}")
if __name__ == "__main__":
models_to_check = [
"app/src/main/assets/best.onnx",
"raw_models/exports/best_no_nms.onnx",
"raw_models/exports/best_nms_relaxed.onnx",
"raw_models/exports/best_nms_very_relaxed.onnx"
]
for model_path in models_to_check:
try:
inspect_onnx_model(model_path)
print("\n" + "="*60 + "\n")
except FileNotFoundError:
print(f"⚠️ Model not found: {model_path}\n")

6
tools/debug_scripts/requirements.txt

@ -0,0 +1,6 @@
ultralytics>=8.0.0
opencv-python>=4.5.0
onnxruntime>=1.15.0
onnx>=1.14.0
numpy>=1.21.0
Pillow>=8.0.0

0
test_static_onnx.py → tools/debug_scripts/test_static_onnx.py

Loading…
Cancel
Save