保存时间:2026/4/2 18:18:18
sgfmill 等 Python 库来创建和处理 SGF 格式的棋谱文件,将识别到的棋局信息转换为 SGF 格式并保存。Tkinter 或 PyQt 等库创建简单的图形界面,用于显示当前棋局、操作按钮等。findChessboardCorners 函数来检测棋盘的角点,从而确定棋盘的位置和大小。如果棋盘是有颜色的,可以先进行灰度化、高斯模糊、边缘检测等预处理操作,提高检测精度。HoughCircles 函数检测圆形棋子的位置和半径,再根据颜色判断棋子的颜色。import cv2
import numpy as np
# 读取图像
image = cv2.imread('your_go_board_image.jpg') # 请将此处替换为实际的围棋棋盘图片路径
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 寻找棋盘角点
ret, corners = cv2.findChessboardCorners(gray, (19, 19), None) # 假设是19x19的围棋棋盘
if ret:
# 绘制检测到的角点
cv2.drawChessboardCorners(image, (19, 19), corners, ret)
# 将角点坐标转换为整数
corners = np.int0(corners)
# 简单划分棋盘格子,判断可能的棋子位置(这里只是简单判断是否有较大灰度变化,不准确)
cell_size = (image.shape[0] // 19, image.shape[1] // 19)
for i in range(19):
for j in range(19):
cell = gray[i * cell_size[0]:(i + 1) * cell_size[0], j * cell_size[1]:(j + 1) * cell_size[1]]
mean_gray = np.mean(cell)
if mean_gray < 100: # 简单阈值判断,认为较暗区域可能有棋子
cv2.circle(image, (j * cell_size[1] + cell_size[1] // 2, i * cell_size[0] + cell_size[0] // 2), 5, (0, 0, 255), -1)
cv2.imshow('Chessboard with Corners and Possible Pieces', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("棋盘角点未检测到")
'your_go_board_image.jpg' 需要替换为你实际的围棋棋盘图片路径。cv2 是 OpenCV 库在Python中的模块名称,要使用上述代码中的 cv2 相关功能,你需要安装 opencv-python 这个Python包。pip 安装(最常用):打开命令行(在Windows上是命令提示符或PowerShell,在Linux和macOS上是终端),然后运行以下命令:pip install opencv-python
pip3 来安装,命令如下:pip3 install opencv-python
conda 安装(如果你使用Anaconda或Miniconda环境):打开Anaconda Prompt(Windows)或终端(Linux、macOS),然后运行:conda install -c conda-forge opencv
import cv2 导入 OpenCV 库进行图像处理和计算机视觉相关的操作了。 sudo apt update
sudo apt upgrade
sudo apt-get install build-essential cmake git libgtk2.0-dev pkg-config libavcodec-dev libavformat-dev libswscale-dev
sudo apt-get install python3-dev python3-numpy libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libjasper-dev libdc1394-22-dev
libjasper-dev 可能没有,你可以尝试添加源或跳过这一项,如果后续 OpenCV 安装和使用时出现问题再处理)git clone https://github.com/opencv/opencv.git
cd opencv
git checkout 4.5.5 # 可以指定你想要的版本号,这里以 4.5.5 为例
mkdir build
cd build
cmake 进行配置,这里指定安装 Python 3 版本的 OpenCV(根据你的实际情况调整):cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D PYTHON3_EXECUTABLE=/usr/bin/python3 \
-D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \
-D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \
..
make -j4 # 根据树莓派的核心数调整 -j 后面的数字,加快编译速度
sudo make install
sgfmill 库将识别结果转换为 SGF 格式棋谱。需要注意的是,这只是一个基础示例,实际的围棋棋谱识别会面临很多复杂情况(如棋子粘连、光线干扰等),该示例仅作参考,你可能需要根据实际情况进一步优化和完善。opencv-python 和 sgfmill 库。可以使用以下命令安装:pip install opencv-python sgfmill
import cv2
import numpy as np
from sgfmill import sgf, board
# 读取图像
image = cv2.imread('your_go_board_image.jpg') # 请替换为实际的围棋棋谱图片路径
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 寻找棋盘角点
board_size = (19, 19) # 假设是19x19的围棋棋盘
ret, corners = cv2.findChessboardCorners(gray, board_size, None)
if ret:
# 对棋盘角点进行排序和矫正,以适应棋盘的正确布局
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
corners = corners.reshape(-1, 2)
# 计算棋盘格子的大小
cell_size_x = (corners[1, 0] - corners[0, 0])
cell_size_y = (corners[board_size[0], 1] - corners[0, 1])
# 创建SGF棋盘对象
go_board = board.Board(board_size[0], board_size[1])
# 遍历棋盘格子,判断是否有棋子并添加到SGF棋谱中
for y in range(board_size[1]):
for x in range(board_size[0]):
cell_x = int(corners[x + y * board_size[0], 0])
cell_y = int(corners[x + y * board_size[0], 1])
cell = gray[cell_y:cell_y + cell_size_y, cell_x:cell_x + cell_size_x]
mean_gray = np.mean(cell)
if mean_gray < 100: # 简单阈值判断,认为较暗区域可能有黑子
go_board.set((x, y), 'b')
elif mean_gray > 150: # 简单阈值判断,认为较亮区域可能有白子
go_board.set((x, y), 'w')
# 创建SGF棋谱对象并添加棋局信息
game = sgf.Sgf_game()
game.set_size(go_board.size)
moves = go_board.get_empty_points() + go_board.get_occupied_points()
for move in moves:
color, pos = go_board.get(move)
if color == 'b':
game.add_move('b', pos)
elif color == 'w':
game.add_move('w', pos)
# 将SGF棋谱保存为文件
with open('output.sgf', 'w') as f:
f.write(game.serialize())
print("已成功将棋谱转换为SGF格式并保存为 output.sgf")
else:
print("未检测到棋盘角点")
'your_go_board_image.jpg' 替换为你实际的围棋棋谱图片路径。该程序通过检测棋盘角点来确定棋盘布局,然后简单地根据每个格子的灰度值判断是否有棋子以及棋子的颜色,最后将这些信息转换为 SGF 格式并保存为文件。 sgfmill 库中找不到 board 模块,可能是因为 sgfmill 库的版本发生了变化,不同版本的模块结构有所不同。sgfmill 版本中,棋盘相关的功能可能在不同的模块里。你可以尝试以下解决办法:sgfmill 文档和模块结构sgfmill 库的正确模块结构和使用方法,可以参考其官方文档。不过,我们也可以通过代码简单探索一下 sgfmill 库中的模块:import sgfmill
print(dir(sgfmill))
sgfmill 库中所有可用的属性和模块,这样你就能知道正确的模块名了。board 模块名称有变化,你得相应地修改代码。在较新的 sgfmill 版本中,棋盘操作可能在 sgfmill.boards 模块里。以下是修改后的代码:import cv2
import numpy as np
from sgfmill import sgf
from sgfmill.boards import Board
# 读取图像
image = cv2.imread('your_go_board_image.jpg') # 请替换为实际的围棋棋谱图片路径
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 寻找棋盘角点
board_size = (19, 19) # 假设是19x19的围棋棋盘
ret, corners = cv2.findChessboardCorners(gray, board_size, None)
if ret:
# 对棋盘角点进行排序和矫正,以适应棋盘的正确布局
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
corners = corners.reshape(-1, 2)
# 计算棋盘格子的大小
cell_size_x = (corners[1, 0] - corners[0, 0])
cell_size_y = (corners[board_size[0], 1] - corners[0, 1])
# 创建SGF棋盘对象
go_board = Board(board_size[0])
# 遍历棋盘格子,判断是否有棋子并添加到SGF棋谱中
for y in range(board_size[1]):
for x in range(board_size[0]):
cell_x = int(corners[x + y * board_size[0], 0])
cell_y = int(corners[x + y * board_size[0], 1])
cell = gray[cell_y:cell_y + cell_size_y, cell_x:cell_x + cell_size_x]
mean_gray = np.mean(cell)
if mean_gray < 100: # 简单阈值判断,认为较暗区域可能有黑子
go_board.play((x, y), 'b')
elif mean_gray > 150: # 简单阈值判断,认为较亮区域可能有白子
go_board.play((x, y), 'w')
# 创建SGF棋谱对象并添加棋局信息
game = sgf.Sgf_game(size=board_size[0])
for move in go_board.list_moves():
color, pos = move
game.add_move(color, pos)
# 将SGF棋谱保存为文件
with open('output.sgf', 'w') as f:
f.write(game.serialise())
print("已成功将棋谱转换为SGF格式并保存为 output.sgf")
else:
print("未检测到棋盘角点")
sgfmill 版本sgfmill 到一个已知稳定的版本:pip install --upgrade sgfmill
pip install sgfmill==1.1
ImportError 问题。 print(dir(sgfmill)) 的输出结果来看,导入的 sgfmill 模块似乎没有包含我们预期的子模块,这可能意味着 sgfmill 库没有正确安装或者安装的版本不完整。sgfmillsgfmill 库,然后重新安装:pip uninstall sgfmill
pip install sgfmill
pip list
sgfmill 确实已经安装在你当前使用的 Python 环境中。sgfmill 安装路径sgfmill 的安装路径:import sgfmill
print(sgfmill.__file__)
sgfmill 安装目录,并且该目录下包含完整的库文件。sgfmill 有问题,可以尝试安装其他版本。例如,安装 1.1 版本:pip install sgfmill==1.1
sgfmill 并查看其属性和子模块:import sgfmill
print(dir(sgfmill))
sgfmill 的官方文档和社区论坛,以获取更多帮助。 pip list | grep sgf 的结果可知,sgfmill 1.1.1 版本已成功安装。不过在导入时,未显示出预期的子模块,这或许是由于库的初始化文件存在问题,或者 Python 解释器未能正确加载这些子模块。sgfmill 中的子模块,查看是否能正常工作,示例如下:from sgfmill import sgf
from sgfmill import boards
dir(sgfmill) 时未正确显示。sgfmill 1.1.1 版本兼容:import cv2
import numpy as np
from sgfmill import sgf
from sgfmill import boards
# 读取图像
image = cv2.imread('your_go_board_image.jpg') # 请替换为实际的围棋棋谱图片路径
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 寻找棋盘角点
board_size = (19, 19) # 假设是19x19的围棋棋盘
ret, corners = cv2.findChessboardCorners(gray, board_size, None)
if ret:
# 对棋盘角点进行排序和矫正,以适应棋盘的正确布局
corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1))
corners = corners.reshape(-1, 2)
# 计算棋盘格子的大小
cell_size_x = (corners[1, 0] - corners[0, 0])
cell_size_y = (corners[board_size[0], 1] - corners[0, 1])
# 创建SGF棋盘对象
go_board = boards.Board(board_size[0])
# 遍历棋盘格子,判断是否有棋子并添加到SGF棋谱中
for y in range(board_size[1]):
for x in range(board_size[0]):
cell_x = int(corners[x + y * board_size[0], 0])
cell_y = int(corners[x + y * board_size[0], 1])
cell = gray[cell_y:cell_y + cell_size_y, cell_x:cell_x + cell_size_x]
mean_gray = np.mean(cell)
if mean_gray < 100: # 简单阈值判断,认为较暗区域可能有黑子
go_board.play((x, y), 'b')
elif mean_gray > 150: # 简单阈值判断,认为较亮区域可能有白子
go_board.play((x, y), 'w')
# 创建SGF棋谱对象并添加棋局信息
game = sgf.Sgf_game(size=board_size[0])
for color, move in go_board.list_moves():
if move is not None:
game.add_move(color, move)
# 将SGF棋谱保存为文件
with open('output.sgf', 'w') as f:
f.write(game.serialise())
print("已成功将棋谱转换为SGF格式并保存为 output.sgf")
else:
print("未检测到棋盘角点")
gogo.py),并把 'your_go_board_image.jpg' 替换成实际的围棋棋谱图片路径,然后运行这个脚本,看看能否正常将棋谱转换为 SGF 格式。 LAB 或 HSV 色彩空间,再提取灰度图。示例代码如下:# 转换到LAB色彩空间后提取灰度图
image_lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
gray = cv2.cvtColor(image_lab, cv2.COLOR_LAB2GRAY)
# 转换到HSV色彩空间后提取灰度图
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # (5, 5)是高斯核大小,0是标准差
19x19,如果实际棋盘尺寸不是这个,就检测不到角点。解决办法:确认实际棋盘尺寸,修改 board_size 参数。比如如果是 13x13 棋盘,修改为 board_size = (13, 13) 。cv2.findChessboardCorners 函数默认参数可能不适合当前图像。解决办法:尝试调整函数的参数,比如增加 flags 参数的值。示例:ret, corners = cv2.findChessboardCorners(gray, board_size, None, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
cv2.CALIB_CB_ADAPTIVE_THRESH 会使用自适应阈值,cv2.CALIB_CB_FAST_CHECK 会快速检查棋盘是否存在,cv2.CALIB_CB_NORMALIZE_IMAGE 会对图像进行归一化处理。cv2.resize 函数,示例:image = cv2.resize(image, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR) # 放大2倍
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
struct Line {
Point p1;
Point p2;
Vec4f lineParams; // For fitLine results
};
string toSGFCoord(int x_index, int y_index) {
if (x_index < 0 || x_index >= 19 || y_index < 0 || y_index >= 19) {
return "";
}
char col = 'a' + x_index;
if (col >= 'i')
col++; // Skip 'i'
char row = 'a' + y_index;
return string(1, col) + string(1, row);
}
vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat gray = image.clone();
cvtColor(gray, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(5, 5), 0);
vector<Vec4i> lines;
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 50, 10); // Adjusted threshold
vector<Vec4i> horizontalLines, verticalLines;
for (const auto &line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
// Sort lines by their y or x coordinates
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
// Take the topmost and bottommost horizontal lines, and leftmost and
// rightmost vertical
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i &l1, const Vec4i &l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // Parallel
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 &&
u <= 1) { // Intersection within line segments
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
} else {
return Point2f(x1 + t * (x2 - x1),
y1 + t * (y2 - y1)); // Lines extended
}
};
intersections[0] = intersect(topH, leftV); // Top-left
intersections[1] = intersect(topH, rightV); // Top-right
intersections[2] = intersect(bottomH, rightV); // Bottom-right
intersections[3] = intersect(bottomH, leftV); // Bottom-left
bool allValid = true;
for (const auto &p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = {Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3])};
} else {
cerr << "Error: Could not find reliable board corners using Hough lines."
<< endl;
}
processedImage = image.clone();
for (const auto &p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
// In correctPerspective():
Mat correctPerspective(const Mat &image, const vector<Point> &corners) {
if (corners.size() != 4) {
cerr << "Error: Need 4 corners for perspective correction." << endl;
return image.clone();
}
float side = 500.0;
Point2f target_corners[4] = {Point2f(0, 0), Point2f(side - 1, 0),
Point2f(side - 1, side - 1),
Point2f(0, side - 1)};
Point2f src_corners_array[4];
for (int i = 0; i < 4; ++i) {
src_corners_array[i] = Point2f(corners[i]);
}
// Wrap points in OpenCV Mat structures
Mat src_points(1, 4, CV_32FC2, src_corners_array);
Mat dst_points(1, 4, CV_32FC2, target_corners);
Mat homography = findHomography(src_points, dst_points, 0); // Corrected call
Mat warped_image;
warpPerspective(image, warped_image, homography, Size(side, side));
return warped_image;
}
void detectGridLines(const Mat &image, vector<Line> &horizontalLines,
vector<Line> &verticalLines) {
Mat gray, blurred, edges;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, blurred, Size(5, 5), 0);
Canny(blurred, edges, 30, 100); // Adjusted Canny thresholds
vector<Vec4i> lines;
HoughLinesP(edges, lines, 1, CV_PI / 180, 50, 30, 10); // Adjusted parameters
for (const auto &line : lines) {
float dx = line[2] - line[0];
float dy = line[3] - line[1];
float angle = atan2(dy, dx) * 180 / CV_PI;
if (abs(angle) < 15) { // Increased angle tolerance
horizontalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
} else if (abs(abs(angle) - 90) < 15) {
verticalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
}
}
}
void identifyGrid(vector<Line> &horizontalLines, vector<Line> &verticalLines,
vector<Line> &selectedHorizontal,
vector<Line> &selectedVertical, const Mat &image) {
// Sort lines
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.y + a.p2.y) / 2 < (b.p1.y + b.p2.y) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.x + a.p2.x) / 2 < (b.p1.x + b.p2.x) / 2;
});
// Select 19 lines with approximate spacing
auto selectLines = [](vector<Line> &lines, bool horizontal, float imageSize) {
vector<Line> selected;
if (lines.empty())
return selected;
float avgSpacing = imageSize / 18.0f; // Average spacing for 19 lines
selected.push_back(lines.front());
for (size_t i = 1; i < lines.size(); ++i) {
float pos = horizontal ? (lines[i].p1.y + lines[i].p2.y) / 2.0f
: (lines[i].p1.x + lines[i].p2.x) / 2.0f;
float lastPos =
horizontal ? (selected.back().p1.y + selected.back().p2.y) / 2.0f
: (selected.back().p1.x + selected.back().p2.x) / 2.0f;
if (abs(pos - lastPos) >= avgSpacing * 0.7 && selected.size() < 19) {
selected.push_back(lines[i]);
}
if (selected.size() >= 19)
break; // Ensure we don't go over 19
}
// Extend lines to image boundaries
if (horizontal) {
for (auto &line : selected) {
line.p1.x = 0;
line.p2.x = imageSize - 1;
}
} else {
for (auto &line : selected) {
line.p1.y = 0;
line.p2.y = imageSize - 1;
}
}
return selected;
};
selectedHorizontal =
selectLines(horizontalLines, true, static_cast<float>(image.rows));
selectedVertical =
selectLines(verticalLines, false, static_cast<float>(image.cols));
}
vector<vector<char>>
detectStonesAtIntersections(const Mat &image,
const vector<Line> &horizontalLines,
const vector<Line> &verticalLines) {
int boardSize = 19;
vector<vector<char>> boardState(boardSize, vector<char>(boardSize, '.'));
if (horizontalLines.size() < boardSize || verticalLines.size() < boardSize) {
cerr << "Not enough grid lines detected." << endl;
return boardState;
}
for (int i = 0; i < boardSize; ++i) {
for (int j = 0; j < boardSize; ++j) {
Point2f intersection(
(verticalLines[j].p1.x + verticalLines[j].p2.x) / 2.0f,
(horizontalLines[i].p1.y + horizontalLines[i].p2.y) / 2.0f);
if (intersection.x < 0 || intersection.x >= image.cols ||
intersection.y < 0 || intersection.y >= image.rows)
continue;
// Check the center color
Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
Vec3b centerColor = hsv.at<Vec3b>(intersection.y, intersection.x);
int v = centerColor[2];
int s = centerColor[1];
if (v < 80 && s > 30) { // Adjusted black stone threshold
boardState[i][j] = 'B';
} else if (v > 180 && s < 80) { // Adjusted white stone threshold
boardState[i][j] = 'W';
}
}
}
return boardState;
}
string boardStateToSGF(const vector<vector<char>> &boardState) {
string sgfContent = "(;FF[4]GM[1]SZ[19]";
string addBlack, addWhite;
for (int y = 0; y < 19; ++y) {
for (int x = 0; x < 19; ++x) {
string coord = toSGFCoord(x, 18 - y); // Invert y-coordinate here
if (!coord.empty()) {
if (boardState[y][x] == 'B') {
addBlack += "[" + coord + "]";
} else if (boardState[y][x] == 'W') {
addWhite += "[" + coord + "]";
}
}
}
}
if (!addBlack.empty())
sgfContent += "AB" + addBlack;
if (!addWhite.empty())
sgfContent += "AW" + addWhite;
sgfContent += ")\n";
return sgfContent;
}
int main() {
string imageFile = "go_board.jpg";
Mat originalImage = imread(imageFile, IMREAD_COLOR);
if (originalImage.empty()) {
cerr << "Error: Could not open image." << endl;
return -1;
}
Mat processedImage;
vector<Point> corners = detectBoardCorners(originalImage, processedImage);
Mat warpedImage;
if (corners.size() == 4) {
warpedImage = correctPerspective(originalImage, corners);
} else {
warpedImage = originalImage.clone();
}
vector<Line> horizontalLines, verticalLines;
detectGridLines(warpedImage, horizontalLines, verticalLines);
vector<Line> selectedHorizontal, selectedVertical;
identifyGrid(horizontalLines, verticalLines, selectedHorizontal,
selectedVertical, warpedImage);
vector<vector<char>> boardState =
detectStonesAtIntersections(warpedImage, selectedHorizontal,
selectedVertical);
string sgfContent = boardStateToSGF(boardState);
cout << sgfContent << endl;
return 0;
}
Error: Could not find reliable board corners using Hough lines. 可知,程序在检测棋盘角点时碰到了问题,未能找到可靠的角点。这或许会对后续的透视变换、网格线检测以及棋子识别产生影响,进而造成识别的棋子数量比实际数量少。下面为你分析可能的原因并给出相应的解决办法:detectBoardCorners 函数里,HoughLinesP 的参数可能设置得过于严格,从而导致无法检测到足够的直线来确定角点。你可以试着降低 threshold 参数的值。// 修改前
HoughLinesP(gray, lines, 1, CV_PI / 180, 100, 50, 10);
// 修改后
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10); // 降低阈值
Mat gray = image.clone();
cvtColor(gray, gray, COLOR_BGR2GRAY);
// 使用自适应阈值处理
adaptiveThreshold(gray, gray, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY_INV, 11, 2);
findChessboardCorners 或者 goodFeaturesToTrack。detectStonesAtIntersections 函数中,黑白棋子的识别阈值可能设置得不够准确,导致部分棋子无法被正确识别。你可以尝试调整阈值。// 修改前
if (v < 50 && s > 50) { // Black stone
boardState[i][j] = 'B';
} else if (v > 200 && s < 50) { // White stone
boardState[i][j] = 'W';
}
// 修改后
if (v < 80 && s > 30) { // Black stone
boardState[i][j] = 'B';
} else if (v > 180 && s < 80) { // White stone
boardState[i][j] = 'W';
}
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
struct Line {
Point p1;
Point p2;
Vec4f lineParams; // For fitLine results
};
string toSGFCoord(int x_index, int y_index) {
if (x_index < 0 || x_index >= 19 || y_index < 0 || y_index >= 19) {
return "";
}
char col = 'a' + x_index;
if (col >= 'i')
col++; // Skip 'i'
char row = 'a' + y_index;
return string(1, col) + string(1, row);
}
vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat gray = image.clone();
cvtColor(gray, gray, COLOR_BGR2GRAY);
// 使用自适应阈值处理
adaptiveThreshold(gray, gray, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY_INV, 11, 2);
vector<Vec4i> lines;
// 降低阈值
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10);
vector<Vec4i> horizontalLines, verticalLines;
for (const auto &line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
// Sort lines by their y or x coordinates
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
// Take the topmost and bottommost horizontal lines, and leftmost and
// rightmost vertical
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i &l1, const Vec4i &l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // Parallel
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 &&
u <= 1) { // Intersection within line segments
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
} else {
return Point2f(x1 + t * (x2 - x1),
y1 + t * (y2 - y1)); // Lines extended
}
};
intersections[0] = intersect(topH, leftV); // Top-left
intersections[1] = intersect(topH, rightV); // Top-right
intersections[2] = intersect(bottomH, rightV); // Bottom-right
intersections[3] = intersect(bottomH, leftV); // Bottom-left
bool allValid = true;
for (const auto &p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = {Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3])};
} else {
cerr << "Error: Could not find reliable board corners using Hough lines." << endl;
}
processedImage = image.clone();
for (const auto &p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
// In correctPerspective():
Mat correctPerspective(const Mat &image, const vector<Point> &corners) {
if (corners.size() != 4) {
cerr << "Error: Need 4 corners for perspective correction." << endl;
return image.clone();
}
float side = 500.0;
Point2f target_corners[4] = {Point2f(0, 0), Point2f(side - 1, 0),
Point2f(side - 1, side - 1),
Point2f(0, side - 1)};
Point2f src_corners_array[4];
for (int i = 0; i < 4; ++i) {
src_corners_array[i] = Point2f(corners[i]);
}
// Wrap points in OpenCV Mat structures
Mat src_points(1, 4, CV_32FC2, src_corners_array);
Mat dst_points(1, 4, CV_32FC2, target_corners);
Mat homography = findHomography(src_points, dst_points, 0); // Corrected call
Mat warped_image;
warpPerspective(image, warped_image, homography, Size(side, side));
return warped_image;
}
void detectGridLines(const Mat &image, vector<Line> &horizontalLines,
vector<Line> &verticalLines) {
Mat gray, blurred, edges;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, blurred, Size(5, 5), 0);
Canny(blurred, edges, 50, 150);
vector<Vec4i> lines;
HoughLinesP(edges, lines, 1, CV_PI / 180, 100, 30, 10); // Adjusted parameters
for (const auto &line : lines) {
float dx = line[2] - line[0];
float dy = line[3] - line[1];
float angle = atan2(dy, dx) * 180 / CV_PI;
if (abs(angle) < 15) { // Increased angle tolerance
horizontalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
} else if (abs(abs(angle) - 90) < 15) {
verticalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
}
}
}
void identifyGrid(vector<Line> &horizontalLines, vector<Line> &verticalLines,
vector<Line> &selectedHorizontal,
vector<Line> &selectedVertical, const Mat &image) {
// Sort lines
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.y + a.p2.y) / 2 < (b.p1.y + b.p2.y) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.x + a.p2.x) / 2 < (b.p1.x + b.p2.x) / 2;
});
// Select 19 lines with approximate spacing
auto selectLines = [](vector<Line> &lines, bool horizontal, float imageSize) {
vector<Line> selected;
if (lines.empty())
return selected;
float avgSpacing = imageSize / 18.0f; // Average spacing for 19 lines
selected.push_back(lines.front());
for (size_t i = 1; i < lines.size(); ++i) {
float pos = horizontal ? (lines[i].p1.y + lines[i].p2.y) / 2.0f
: (lines[i].p1.x + lines[i].p2.x) / 2.0f;
float lastPos =
horizontal ? (selected.back().p1.y + selected.back().p2.y) / 2.0f
: (selected.back().p1.x + selected.back().p2.x) / 2.0f;
if (abs(pos - lastPos) >= avgSpacing * 0.7 && selected.size() < 19) {
selected.push_back(lines[i]);
}
if (selected.size() >= 19)
break; // Ensure we don't go over 19
}
// Extend lines to image boundaries
if (horizontal) {
for (auto &line : selected) {
line.p1.x = 0;
line.p2.x = imageSize - 1;
}
} else {
for (auto &line : selected) {
line.p1.y = 0;
line.p2.y = imageSize - 1;
}
}
return selected;
};
selectedHorizontal =
selectLines(horizontalLines, true, static_cast<float>(image.rows));
selectedVertical =
selectLines(verticalLines, false, static_cast<float>(image.cols));
}
vector<vector<char>>
detectStonesAtIntersections(const Mat &image,
const vector<Line> &horizontalLines,
const vector<Line> &verticalLines) {
int boardSize = 19;
vector<vector<char>> boardState(boardSize, vector<char>(boardSize, '.'));
if (horizontalLines.size() < boardSize || verticalLines.size() < boardSize) {
cerr << "Not enough grid lines detected." << endl;
return boardState;
}
for (int i = 0; i < boardSize; ++i) {
for (int j = 0; j < boardSize; ++j) {
Point2f intersection(
(verticalLines[j].p1.x + verticalLines[j].p2.x) / 2.0f,
(horizontalLines[i].p1.y + horizontalLines[i].p2.y) / 2.0f);
if (intersection.x < 0 || intersection.x >= image.cols ||
intersection.y < 0 || intersection.y >= image.rows)
continue;
// Check the center color
Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
Vec3b centerColor = hsv.at<Vec3b>(intersection.y, intersection.x);
int v = centerColor[2];
int s = centerColor[1];
// 调整棋子识别阈值
if (v < 80 && s > 30) { // Black stone
boardState[i][j] = 'B';
} else if (v > 180 && s < 80) { // White stone
boardState[i][j] = 'W';
}
}
}
return boardState;
}
string boardStateToSGF(const vector<vector<char>> &boardState) {
string sgfContent = "(;FF[4]GM[1]SZ[19]";
string addBlack, addWhite;
for (int y = 0; y < 19; ++y) {
for (int x = 0; x < 19; ++x) {
string coord = toSGFCoord(x, 18 - y); // Invert y-coordinate here
if (!coord.empty()) {
if (boardState[y][x] == 'B') {
addBlack += "[" + coord + "]";
} else if (boardState[y][x] == 'W') {
addWhite += "[" + coord + "]";
}
}
}
}
if (!addBlack.empty())
sgfContent += "AB" + addBlack;
if (!addWhite.empty())
sgfContent += "AW" + addWhite;
sgfContent += ")\n";
return sgfContent;
}
int main() {
string imageFile = "go_board.jpg";
Mat originalImage = imread(imageFile, IMREAD_COLOR);
if (originalImage.empty()) {
cerr << "Error: Could not open image." << endl;
return -1;
}
Mat processedImage;
vector<Point> corners = detectBoardCorners(originalImage, processedImage);
Mat warpedImage;
if (corners.size() == 4) {
warpedImage = correctPerspective(originalImage, corners);
imshow("Warped", warpedImage);
} else {
warpedImage = originalImage.clone();
cerr << "Perspective correction skipped." << endl;
}
vector<
main 函数后续的逻辑。以下是补全后的完整代码,包含了检测网格线、识别网格、检测棋子、生成 SGF 文件以及可视化等功能:#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
struct Line {
Point p1;
Point p2;
Vec4f lineParams; // For fitLine results
};
string toSGFCoord(int x_index, int y_index) {
if (x_index < 0 || x_index >= 19 || y_index < 0 || y_index >= 19) {
return "";
}
char col = 'a' + x_index;
if (col >= 'i')
col++; // Skip 'i'
char row = 'a' + y_index;
return string(1, col) + string(1, row);
}
vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat gray = image.clone();
cvtColor(gray, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(5, 5), 0);
vector<Vec4i> lines;
HoughLinesP(gray, lines, 1, CV_PI / 180, 100, 50, 10); // Adjusted threshold
vector<Vec4i> horizontalLines, verticalLines;
for (const auto &line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
// Sort lines by their y or x coordinates
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
// Take the topmost and bottommost horizontal lines, and leftmost and
// rightmost vertical
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i &l1, const Vec4i &l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // Parallel
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 &&
u <= 1) { // Intersection within line segments
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
} else {
return Point2f(x1 + t * (x2 - x1),
y1 + t * (y2 - y1)); // Lines extended
}
};
intersections[0] = intersect(topH, leftV); // Top-left
intersections[1] = intersect(topH, rightV); // Top-right
intersections[2] = intersect(bottomH, rightV); // Bottom-right
intersections[3] = intersect(bottomH, leftV); // Bottom-left
bool allValid = true;
for (const auto &p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = {Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3])};
} else {
cerr << "Error: Could not find reliable board corners using Hough lines." << endl;
}
processedImage = image.clone();
for (const auto &p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
// In correctPerspective():
Mat correctPerspective(const Mat &image, const vector<Point> &corners) {
if (corners.size() != 4) {
cerr << "Error: Need 4 corners for perspective correction." << endl;
return image.clone();
}
float side = 500.0;
Point2f target_corners[4] = {Point2f(0, 0), Point2f(side - 1, 0),
Point2f(side - 1, side - 1),
Point2f(0, side - 1)};
Point2f src_corners_array[4];
for (int i = 0; i < 4; ++i) {
src_corners_array[i] = Point2f(corners[i]);
}
// Wrap points in OpenCV Mat structures
Mat src_points(1, 4, CV_32FC2, src_corners_array);
Mat dst_points(1, 4, CV_32FC2, target_corners);
Mat homography = findHomography(src_points, dst_points, 0); // Corrected call
Mat warped_image;
warpPerspective(image, warped_image, homography, Size(side, side));
return warped_image;
}
void detectGridLines(const Mat &image, vector<Line> &horizontalLines,
vector<Line> &verticalLines) {
Mat gray, blurred, edges;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, blurred, Size(5, 5), 0);
Canny(blurred, edges, 50, 150);
vector<Vec4i> lines;
HoughLinesP(edges, lines, 1, CV_PI / 180, 100, 30, 10); // Adjusted parameters
for (const auto &line : lines) {
float dx = line[2] - line[0];
float dy = line[3] - line[1];
float angle = atan2(dy, dx) * 180 / CV_PI;
if (abs(angle) < 15) { // Increased angle tolerance
horizontalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
} else if (abs(abs(angle) - 90) < 15) {
verticalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
}
}
}
void identifyGrid(vector<Line> &horizontalLines, vector<Line> &verticalLines,
vector<Line> &selectedHorizontal,
vector<Line> &selectedVertical, const Mat &image) {
// Sort lines
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.y + a.p2.y) / 2 < (b.p1.y + b.p2.y) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.x + a.p2.x) / 2 < (b.p1.x + b.p2.x) / 2;
});
// Select 19 lines with approximate spacing
auto selectLines = [](vector<Line> &lines, bool horizontal, float imageSize) {
vector<Line> selected;
if (lines.empty())
return selected;
float avgSpacing = imageSize / 18.0f; // Average spacing for 19 lines
selected.push_back(lines.front());
for (size_t i = 1; i < lines.size(); ++i) {
float pos = horizontal ? (lines[i].p1.y + lines[i].p2.y) / 2.0f
: (lines[i].p1.x + lines[i].p2.x) / 2.0f;
float lastPos =
horizontal ? (selected.back().p1.y + selected.back().p2.y) / 2.0f
: (selected.back().p1.x + selected.back().p2.x) / 2.0f;
if (abs(pos - lastPos) >= avgSpacing * 0.7 && selected.size() < 19) {
selected.push_back(lines[i]);
}
if (selected.size() >= 19)
break; // Ensure we don't go over 19
}
// Extend lines to image boundaries
if (horizontal) {
for (auto &line : selected) {
line.p1.x = 0;
line.p2.x = imageSize - 1;
}
} else {
for (auto &line : selected) {
line.p1.y = 0;
line.p2.y = imageSize - 1;
}
}
return selected;
};
selectedHorizontal =
selectLines(horizontalLines, true, static_cast<float>(image.rows));
selectedVertical =
selectLines(verticalLines, false, static_cast<float>(image.cols));
}
vector<vector<char>>
detectStonesAtIntersections(const Mat &image,
const vector<Line> &horizontalLines,
const vector<Line> &verticalLines) {
int boardSize = 19;
vector<vector<char>> boardState(boardSize, vector<char>(boardSize, '.'));
if (horizontalLines.size() < boardSize || verticalLines.size() < boardSize) {
cerr << "Not enough grid lines detected." << endl;
return boardState;
}
for (int i = 0; i < boardSize; ++i) {
for (int j = 0; j < boardSize; ++j) {
Point2f intersection(
(verticalLines[j].p1.x + verticalLines[j].p2.x) / 2.0f,
(horizontalLines[i].p1.y + horizontalLines[i].p2.y) / 2.0f);
if (intersection.x < 0 || intersection.x >= image.cols ||
intersection.y < 0 || intersection.y >= image.rows)
continue;
// Check the center color
Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
Vec3b centerColor = hsv.at<Vec3b>(intersection.y, intersection.x);
int v = centerColor[2];
int s = centerColor[1];
if (v < 50 && s > 50) { // Black stone
boardState[i][j] = 'B';
} else if (v > 200 && s < 50) { // White stone
boardState[i][j] = 'W';
}
}
}
return boardState;
}
string boardStateToSGF(const vector<vector<char>> &boardState) {
string sgfContent = "(;FF[4]GM[1]SZ[19]";
string addBlack, addWhite;
for (int y = 0; y < 19; ++y) {
for (int x = 0; x < 19; ++x) {
string coord = toSGFCoord(x, 18 - y); // Invert y-coordinate here
if (!coord.empty()) {
if (boardState[y][x] == 'B') {
addBlack += "[" + coord + "]";
} else if (boardState[y][x] == 'W') {
addWhite += "[" + coord + "]";
}
}
}
}
if (!addBlack.empty())
sgfContent += "AB" + addBlack;
if (!addWhite.empty())
sgfContent += "AW" + addWhite;
sgfContent += ")\n";
return sgfContent;
}
int main() {
string imageFile = "go_board.jpg";
Mat originalImage = imread(imageFile, IMREAD_COLOR);
if (originalImage.empty()) {
cerr << "Error: Could not open image." << endl;
return -1;
}
Mat processedImage;
vector<Point> corners = detectBoardCorners(originalImage, processedImage);
Mat warpedImage;
if (corners.size() == 4) {
warpedImage = correctPerspective(originalImage, corners);
imshow("Warped", warpedImage);
} else {
warpedImage = originalImage.clone();
cerr << "Perspective correction skipped." << endl;
}
vector<Line> horizontalLines, verticalLines;
detectGridLines(warpedImage, horizontalLines, verticalLines);
vector<Line> selectedHorz, selectedVert;
identifyGrid(horizontalLines, verticalLines, selectedHorz, selectedVert, warpedImage);
vector<vector<char>> board = detectStonesAtIntersections(warpedImage, selectedHorz, selectedVert);
string sgf = boardStateToSGF(board);
ofstream sgfFile("current_state.sgf");
if (sgfFile.is_open()) {
sgfFile << sgf;
cout << "SGF saved." << endl;
} else {
cerr << "Failed to save SGF." << endl;
}
// Visualization
Mat display = warpedImage.clone();
for (const auto &line : selectedHorz)
cv::line(display, line.p1, line.p2, Scalar(0, 255, 0), 1);
for (const auto &line : selectedVert)
cv::line(display, line.p1, line.p2, Scalar(0, 0, 255), 1);
imshow("Grid", display);
waitKey(0);
return 0;
}
go_board.jpg 中读取图像。detectBoardCorners 函数检测棋盘的四个角点。correctPerspective 函数进行透视变换。detectGridLines 函数检测图像中的水平和垂直网格线。identifyGrid 函数从检测到的网格线中选择 19 条水平和 19 条垂直网格线。detectStonesAtIntersections 函数检测网格线交点处的棋子。boardStateToSGF 函数将检测到的棋子状态转换为 SGF 格式,并保存到 current_state.sgf 文件中。go_board.jpg 文件存在于当前工作目录中。Error: Could not find reliable board corners using Hough lines. 可以知道,代码在利用霍夫变换检测棋盘角点时未能找到可靠的角点,进而跳过了透视校正环节。下面为你分析可能的原因以及对应的解决办法:HoughLinesP 函数的参数设置可能不够恰当,无法准确检测到棋盘的直线。Mat enhanceImage(const Mat &image) {
Mat enhanced;
// 直方图均衡化增强对比度
if (image.channels() == 1) {
equalizeHist(image, enhanced);
} else {
Mat ycrcb;
cvtColor(image, ycrcb, COLOR_BGR2YCrCb);
vector<Mat> channels;
split(ycrcb, channels);
equalizeHist(channels[0], channels[0]);
merge(channels, ycrcb);
cvtColor(ycrcb, enhanced, COLOR_YCrCb2BGR);
}
return enhanced;
}
detectBoardCorners 函数中调用该函数:vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat enhanced = enhanceImage(image);
Mat gray;
cvtColor(enhanced, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(5, 5), 0);
// 后续代码保持不变
// ...
}
HoughLinesP 函数的参数,以提高直线检测的准确性。HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10); // 调整阈值
cv::goodFeaturesToTrack 或者 cv::findChessboardCorners,来辅助检测棋盘角点。vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
// 原有的霍夫变换角点检测代码
// ...
// 尝试使用其他角点检测算法
vector<Point2f> corners;
bool found = findChessboardCorners(gray, Size(19, 19), corners);
if (found) {
// 提取四个角点
vector<Point> selected_corners = {
Point(corners[0]),
Point(corners[18]),
Point(corners[342]),
Point(corners[360])
};
return selected_corners;
}
// 如果其他方法也失败,返回原有的角点检测结果
return selected_corners;
}
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
struct Line {
Point p1;
Point p2;
Vec4f lineParams; // For fitLine results
};
string toSGFCoord(int x_index, int y_index) {
if (x_index < 0 || x_index >= 19 || y_index < 0 || y_index >= 19) {
return "";
}
char col = 'a' + x_index;
if (col >= 'i')
col++; // Skip 'i'
char row = 'a' + y_index;
return string(1, col) + string(1, row);
}
Mat enhanceImage(const Mat &image) {
Mat enhanced;
if (image.channels() == 1) {
equalizeHist(image, enhanced);
} else {
Mat ycrcb;
cvtColor(image, ycrcb, COLOR_BGR2YCrCb);
vector<Mat> channels;
split(ycrcb, channels);
equalizeHist(channels[0], channels[0]);
merge(channels, ycrcb);
cvtColor(ycrcb, enhanced, COLOR_YCrCb2BGR);
}
return enhanced;
}
vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat enhanced = enhanceImage(image);
Mat gray;
cvtColor(enhanced, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(5, 5), 0);
vector<Vec4i> lines;
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10); // Adjusted threshold
vector<Vec4i> horizontalLines, verticalLines;
for (const auto &line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
// Sort lines by their y or x coordinates
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
// Take the topmost and bottommost horizontal lines, and leftmost and
// rightmost vertical
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i &l1, const Vec4i &l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // Parallel
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 &&
u <= 1) { // Intersection within line segments
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
} else {
return Point2f(x1 + t * (x2 - x1),
y1 + t * (y2 - y1)); // Lines extended
}
};
intersections[0] = intersect(topH, leftV); // Top-left
intersections[1] = intersect(topH, rightV); // Top-right
intersections[2] = intersect(bottomH, rightV); // Bottom-right
intersections[3] = intersect(bottomH, leftV); // Bottom-left
bool allValid = true;
for (const auto &p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = {Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3])};
} else {
// 尝试使用其他角点检测算法
vector<Point2f> corners;
bool found = findChessboardCorners(gray, Size(19, 19), corners);
if (found) {
selected_corners = {
Point(corners[0]),
Point(corners[18]),
Point(corners[342]),
Point(corners[360])
};
} else {
cerr << "Error: Could not find reliable board corners using Hough lines." << endl;
}
}
processedImage = image.clone();
for (const auto &p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
// In correctPerspective():
Mat correctPerspective(const Mat &image, const vector<Point> &corners) {
if (corners.size() != 4) {
cerr << "Error: Need 4 corners for perspective correction." << endl;
return image.clone();
}
float side = 500.0;
Point2f target_corners[4] = {Point2f(0, 0), Point2f(side - 1, 0),
Point2f(side - 1, side - 1),
Point2f(0, side - 1)};
Point2f src_corners_array[4];
for (int i = 0; i < 4; ++i) {
src_corners_array[i] = Point2f(corners[i]);
}
// Wrap points in OpenCV Mat structures
Mat src_points(1, 4, CV_32FC2, src_corners_array);
Mat dst_points(1, 4, CV_32FC2, target_corners);
Mat homography = findHomography(src_points, dst_points, 0); // Corrected call
Mat warped_image;
warpPerspective(image, warped_image, homography, Size(side, side));
return warped_image;
}
void detectGridLines(const Mat &image, vector<Line> &horizontalLines,
vector<Line> &verticalLines) {
Mat gray, blurred, edges;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, blurred, Size(5, 5), 0);
Canny(blurred, edges, 50, 150);
vector<Vec4i> lines;
HoughLinesP(edges, lines, 1, CV_PI / 180, 100, 30, 10); // Adjusted parameters
for (const auto &line : lines) {
float dx = line[2] - line[0];
float dy = line[3] - line[1];
float angle = atan2(dy, dx) * 180 / CV_PI;
if (abs(angle) < 15) { // Increased angle tolerance
horizontalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
} else if (abs(abs(angle) - 90) < 15) {
verticalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
}
}
}
void identifyGrid(vector<Line> &horizontalLines, vector<Line> &verticalLines,
vector<Line> &selectedHorizontal,
vector<Line> &selectedVertical, const Mat &image) {
// Sort lines
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.y + a.p2.y) / 2 < (b.p1.y + b.p2.y) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.x + a.p2.x) / 2 < (b.p1.x + b.p2.x) / 2;
});
// Select 19 lines with approximate spacing
auto selectLines = [](vector<Line> &lines, bool horizontal, float imageSize) {
vector<Line> selected;
if (lines.empty())
return selected;
float avgSpacing = imageSize / 18.0f; // Average spacing for 19 lines
selected.push_back(lines.front());
for (size_t i = 1; i < lines.size(); ++i) {
float pos = horizontal ? (lines[i].p1.y + lines[i].p2.y) / 2.0f
: (lines[i].p1.x + lines[i].p2.x) / 2.0f;
float lastPos =
horizontal ? (selected.back().p1.y + selected.back().p2.y) / 2.0f
: (selected.back().p1.x + selected.back().p2.x) / 2.0f;
if (abs(pos - lastPos) >= avgSpacing * 0.7 && selected.size() < 19) {
selected.push_back(lines[i]);
}
if (selected.size() >= 19)
break; // Ensure we don't go over 19
}
// Extend lines to image boundaries
if (horizontal) {
for (auto &line : selected) {
line.p1.x = 0;
line.p2.x = imageSize - 1;
}
} else {
for (auto &line : selected) {
line.p1.y = 0;
line.p2.y = imageSize - 1;
}
}
return selected;
};
selectedHorizontal =
selectLines(horizontalLines, true, static_cast<float>(image.rows));
selectedVertical =
selectLines(verticalLines, false, static_cast<float>(image.cols));
}
vector<vector<char>>
detectStonesAtIntersections(const Mat &image,
const vector<Line> &horizontalLines,
const vector<Line> &verticalLines) {
int boardSize = 19;
vector<vector<char>> boardState(boardSize, vector<char>(boardSize, '.'));
if (horizontalLines.size() < boardSize || verticalLines.size() < boardSize) {
cerr << "Not enough grid lines detected." << endl;
return boardState;
}
for (int i = 0; i < boardSize; ++i) {
for (int j = 0; j < boardSize; ++j) {
Point2f intersection(
(verticalLines[j].p1.x + verticalLines[j].p2.x) / 2.0f,
(horizontalLines[i].p1.y + horizontalLines[i].p2.y) / 2.0f);
if (intersection.x < 0 || intersection.x >= image.cols ||
intersection.y < 0 || intersection.y >= image.rows)
continue;
// Check the center color
Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
Vec3b centerColor = hsv.at<Vec3b>(intersection.y, intersection.x);
int v = centerColor[2];
int s = centerColor[1];
if (v < 50 && s > 50) { // Black stone
boardState[i][j] = 'B';
} else if (v > 200 && s < 50) { // White stone
boardState[i][j] = 'W';
}
}
}
return boardState;
}
string boardStateToSGF(const vector<vector<char>> &boardState) {
string sgfContent = "(;FF[4]GM[1]SZ[19]";
string addBlack, addWhite;
detectBoardCorners 函数中,在灰度化之后添加直方图均衡化步骤。vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
// 添加直方图均衡化
equalizeHist(gray, gray);
GaussianBlur(gray, gray, Size(5, 5), 0);
// 后续代码保持不变
// ...
}
HoughLinesP 函数的参数,降低阈值以检测更多的直线。vector<Vec4i> lines;
// 降低阈值
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10);
Laplacian 或 Sobel 算子,以增强棋盘线条的特征。vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
GaussianBlur(gray, gray, Size(5, 5), 0);
// 使用 Sobel 算子进行边缘检测
Mat edges;
Sobel(gray, edges, CV_8U, 1, 1);
threshold(edges, edges, 100, 255, THRESH_BINARY);
vector<Vec4i> lines;
HoughLinesP(edges, lines, 1, CV_PI / 180, 50, 30, 10);
// 后续代码保持不变
// ...
}
cv::goodFeaturesToTrack 或 cv::findChessboardCorners 作为备用方法。vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
GaussianBlur(gray, gray, Size(5, 5), 0);
vector<Vec4i> lines;
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10);
// 原有的霍夫变换角点检测代码
// ...
// 如果霍夫变换未能找到可靠角点,尝试使用其他方法
vector<Point2f> corners;
bool found = findChessboardCorners(gray, Size(19, 19), corners);
if (found) {
vector<Point> selected_corners = {
Point(corners[0]),
Point(corners[18]),
Point(corners[342]),
Point(corners[360])
};
return selected_corners;
}
// 如果其他方法也失败,返回空向量
return {};
}
detectBoardCorners function with some potential improvements based on the analysis in the previous response. This code includes histogram equalization, adjusted Hough transform parameters, and an alternative corner detection method using findChessboardCorners as a fallback.#include <algorithm>
#include <cmath>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
// Add histogram equalization to enhance contrast
equalizeHist(gray, gray);
GaussianBlur(gray, gray, Size(5, 5), 0);
vector<Vec4i> lines;
// Adjusted HoughLinesP parameters to detect more lines
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10);
vector<Vec4i> horizontalLines, verticalLines;
for (const auto &line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
// Sort lines by their y or x coordinates
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
// Take the topmost and bottommost horizontal lines, and leftmost and
// rightmost vertical
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i &l1, const Vec4i &l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // Parallel
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 &&
u <= 1) { // Intersection within line segments
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
} else {
return Point2f(x1 + t * (x2 - x1),
y1 + t * (y2 - y1)); // Lines extended
}
};
intersections[0] = intersect(topH, leftV); // Top-left
intersections[1] = intersect(topH, rightV); // Top-right
intersections[2] = intersect(bottomH, rightV); // Bottom-right
intersections[3] = intersect(bottomH, leftV); // Bottom-left
bool allValid = true;
for (const auto &p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = {Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3])};
} else {
// Try alternative corner detection method
vector<Point2f> corners;
bool found = findChessboardCorners(gray, Size(19, 19), corners);
if (found) {
selected_corners = {
Point(corners[0]),
Point(corners[18]),
Point(corners[342]),
Point(corners[360])
};
} else {
cerr << "Error: Could not find reliable board corners using Hough lines." << endl;
}
}
processedImage = image.clone();
for (const auto &p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
int main() {
string imageFile = "go_board.jpg";
Mat originalImage = imread(imageFile, IMREAD_COLOR);
if (originalImage.empty()) {
cerr << "Error: Could not open image." << endl;
return -1;
}
Mat processedImage;
vector<Point> corners = detectBoardCorners(originalImage, processedImage);
// Further processing...
return 0;
}
findChessboardCorners as an alternative method. If both methods fail, it prints an error message.HoughLinesP function for corner detection might be too strict. This can lead to missing some important lines, making it difficult to find the reliable intersections (corners).HoughLinesP function to detect more lines. For example:vector<Vec4i> lines;
// Reduce the threshold from 100 to 50
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10);
Mat gray = image.clone();
cvtColor(gray, gray, COLOR_BGR2GRAY);
// Add histogram equalization
equalizeHist(gray, gray);
GaussianBlur(gray, gray, Size(5, 5), 0);
detectBoardCorners function incorporating the above improvements:vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat gray = image.clone();
cvtColor(gray, gray, COLOR_BGR2GRAY);
// Add histogram equalization
equalizeHist(gray, gray);
GaussianBlur(gray, gray, Size(5, 5), 0);
vector<Vec4i> lines;
// Reduce the threshold
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10);
vector<Vec4i> horizontalLines, verticalLines;
for (const auto &line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
// Sort lines by their y or x coordinates
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
// Take the topmost and bottommost horizontal lines, and leftmost and
// rightmost vertical
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i &l1, const Vec4i &l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // Parallel
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 &&
u <= 1) { // Intersection within line segments
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
} else {
return Point2f(x1 + t * (x2 - x1),
y1 + t * (y2 - y1)); // Lines extended
}
};
intersections[0] = intersect(topH, leftV); // Top-left
intersections[1] = intersect(topH, rightV); // Top-right
intersections[2] = intersect(bottomH, rightV); // Bottom-right
intersections[3] = intersect(bottomH, leftV); // Bottom-left
bool allValid = true;
for (const auto &p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = {Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3])};
} else {
cerr << "Error: Could not find reliable board corners using Hough lines."
<< endl;
}
processedImage = image.clone();
for (const auto &p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
detectBoardCorners function in your go.cpp file with this updated version to see if it improves the corner detection.Mat preprocessImage(const Mat &image) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
// 直方图均衡化增强对比度
equalizeHist(gray, gray);
// 高斯模糊减少噪声
GaussianBlur(gray, gray, Size(5, 5), 0);
// 使用双边滤波进一步平滑图像
bilateralFilter(gray, gray, 9, 75, 75);
return gray;
}
vector<Vec4i> detectLines(const Mat &image) {
vector<Vec4i> lines;
// 调整霍夫变换参数
HoughLinesP(image, lines, 1, CV_PI / 180, 30, 30, 10);
return lines;
}
vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat preprocessed = preprocessImage(image);
vector<Vec4i> lines = detectLines(preprocessed);
vector<Vec4i> horizontalLines, verticalLines;
for (const auto &line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
// 对检测到的直线进行排序
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
// 选取最顶部、最底部的水平线和最左侧、最右侧的垂直线
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i &l1, const Vec4i &l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // 平行直线
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 && u <= 1) { // 线段相交
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
} else {
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1)); // 延长线相交
}
};
intersections[0] = intersect(topH, leftV); // 左上角
intersections[1] = intersect(topH, rightV); // 右上角
intersections[2] = intersect(bottomH, rightV); // 右下角
intersections[3] = intersect(bottomH, leftV); // 左下角
bool allValid = true;
for (const auto &p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = {Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3])};
} else {
// 尝试使用其他角点检测算法
vector<Point2f> corners;
bool found = findChessboardCorners(preprocessed, Size(19, 19), corners);
if (found) {
selected_corners = {
Point(corners[0]),
Point(corners[18]),
Point(corners[342]),
Point(corners[360])
};
} else {
cerr << "Error: Could not find reliable board corners using Hough lines." << endl;
}
}
processedImage = image.clone();
for (const auto &p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
struct Line {
Point p1;
Point p2;
Vec4f lineParams; // For fitLine results
};
string toSGFCoord(int x_index, int y_index) {
if (x_index < 0 || x_index >= 19 || y_index < 0 || y_index >= 19) {
return "";
}
char col = 'a' + x_index;
if (col >= 'i')
col++; // Skip 'i'
char row = 'a' + y_index;
return string(1, col) + string(1, row);
}
Mat preprocessImage(const Mat &image) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
GaussianBlur(gray, gray, Size(5, 5), 0);
bilateralFilter(gray, gray, 9, 75, 75);
return gray;
}
vector<Vec4i> detectLines(const Mat &image) {
vector<Vec4i> lines;
HoughLinesP(image, lines, 1, CV_PI / 180, 30, 30, 10);
return lines;
}
vector<Point> detectBoardCorners(const Mat &image, Mat &processedImage) {
Mat preprocessed = preprocessImage(image);
vector<Vec4i> lines = detectLines(preprocessed);
vector<Vec4i> horizontalLines, verticalLines;
for (const auto &line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i &a, const Vec4i &b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i &l1, const Vec4i &l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1);
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 && u <= 1) {
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
} else {
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
}
};
intersections[0] = intersect(topH, leftV);
intersections[1] = intersect(topH, rightV);
intersections[2] = intersect(bottomH, rightV);
intersections[3] = intersect(bottomH, leftV);
bool allValid = true;
for (const auto &p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = {Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3])};
} else {
vector<Point2f> corners;
bool found = findChessboardCorners(preprocessed, Size(19, 19), corners);
if (found) {
selected_corners = {
Point(corners[0]),
Point(corners[18]),
Point(corners[342]),
Point(corners[360])
};
} else {
cerr << "Error: Could not find reliable board corners using Hough lines." << endl;
}
}
processedImage = image.clone();
for (const auto &p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
// In correctPerspective():
Mat correctPerspective(const Mat &image, const vector<Point> &corners) {
if (corners.size() != 4) {
cerr << "Error: Need 4 corners for perspective correction." << endl;
return image.clone();
}
float side = 500.0;
Point2f target_corners[4] = {Point2f(0, 0), Point2f(side - 1, 0),
Point2f(side - 1, side - 1),
Point2f(0, side - 1)};
Point2f src_corners_array[4];
for (int i = 0; i < 4; ++i) {
src_corners_array[i] = Point2f(corners[i]);
}
// Wrap points in OpenCV Mat structures
Mat src_points(1, 4, CV_32FC2, src_corners_array);
Mat dst_points(1, 4, CV_32FC2, target_corners);
Mat homography = findHomography(src_points, dst_points, 0); // Corrected call
Mat warped_image;
warpPerspective(image, warped_image, homography, Size(side, side));
return warped_image;
}
void detectGridLines(const Mat &image, vector<Line> &horizontalLines,
vector<Line> &verticalLines) {
Mat gray, blurred, edges;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, blurred, Size(5, 5), 0);
Canny(blurred, edges, 50, 150);
vector<Vec4i> lines;
HoughLinesP(edges, lines, 1, CV_PI / 180, 100, 30, 10); // Adjusted parameters
for (const auto &line : lines) {
float dx = line[2] - line[0];
float dy = line[3] - line[1];
float angle = atan2(dy, dx) * 180 / CV_PI;
if (abs(angle) < 15) { // Increased angle tolerance
horizontalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
} else if (abs(abs(angle) - 90) < 15) {
verticalLines.push_back(
{Point(line[0], line[1]), Point(line[2], line[3])});
}
}
}
void identifyGrid(vector<Line> &horizontalLines, vector<Line> &verticalLines,
vector<Line> &selectedHorizontal,
vector<Line> &selectedVertical, const Mat &image) {
// Sort lines
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.y + a.p2.y) / 2 < (b.p1.y + b.p2.y) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Line &a, const Line &b) {
return (a.p1.x + a.p2.x) / 2 < (b.p1.x + b.p2.x) / 2;
});
// Select 19 lines with approximate spacing
auto selectLines = [](vector<Line> &lines, bool horizontal, float imageSize) {
vector<Line> selected;
if (lines.empty())
return selected;
float avgSpacing = imageSize / 18.0f; // Average spacing for 19 lines
selected.push_back(lines.front());
for (size_t i = 1; i < lines.size(); ++i) {
float pos = horizontal ? (lines[i].p1.y + lines[i].p2.y) / 2.0f
: (lines[i].p1.x + lines[i].p2.x) / 2.0f;
float lastPos =
horizontal ? (selected.back().p1.y + selected.back().p2.y) / 2.0f
: (selected.back().p1.x + selected.back().p2.x) / 2.0f;
if (abs(pos - lastPos) >= avgSpacing * 0.7 && selected.size() < 19) {
selected.push_back(lines[i]);
}
if (selected.size() >= 19)
break; // Ensure we don't go over 19
}
// Extend lines to image boundaries
if (horizontal) {
for (auto &line : selected) {
line.p1.x = 0;
line.p2.x = imageSize - 1;
}
} else {
for (auto &line : selected) {
line.p1.y = 0;
line.p2.y = imageSize - 1;
}
}
return selected;
};
selectedHorizontal =
selectLines(horizontalLines, true, static_cast<float>(image.rows));
selectedVertical =
selectLines(verticalLines, false, static_cast<float>(image.cols));
}
vector<vector<char>>
detectStonesAtIntersections(const Mat &image,
const vector<Line> &horizontalLines,
const vector<Line> &verticalLines) {
int boardSize = 19;
vector<vector<char>> boardState(boardSize, vector<char>(boardSize, '.'));
if (horizontalLines.size() < boardSize || verticalLines.size() < boardSize) {
cerr << "Not enough grid lines detected." << endl;
return boardState;
}
for (int i = 0; i < boardSize; ++i) {
for (int j = 0; j < boardSize; ++j) {
Point2f intersection(
(verticalLines[j].p1.x + verticalLines[j].p2.x) / 2.0f,
(horizontalLines[i].p1.y + horizontalLines[i].p2.y) / 2.0f);
if (intersection.x < 0 || intersection.x >= image.cols ||
intersection.y < 0 || intersection.y >= image.rows)
continue;
// Check the center color
Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
Vec3b centerColor = hsv.at<Vec3b>(intersection.y, intersection.x);
int v = centerColor[2];
int s = centerColor[1];
if (v < 50 && s > 50) { // Black stone
boardState[i][j] = 'B';
} else if (v > 200 && s < 50) { // White stone
boardState[i][j] = 'W';
}
}
}
return boardState;
}
string boardStateToSGF(const vector<vector<char>> &boardState) {
string sgfContent = "(;FF[4]GM[1]SZ[19]";
string addBlack, addWhite;
for (int y = 0; y < 19; ++y) {
for (int x = 0; x < 19; ++x) {
string coord = toSGFCoord(x, 18 - y); // Invert y-coordinate here
if (!coord.empty()) {
if (boardState[y][x] == 'B') {
addBlack += "[" + coord + "]";
} else if (boardState[y][x] == 'W') {
addWhite += "[" + coord + "]";
}
}
}
}
if (!addBlack.empty())
sgfContent += "AB" + addBlack;
if (!addWhite.empty())
sgfContent += "AW" + addWhite;
sgfContent += ")\n";
return sgfContent;
}
int main() {
string imageFile = "go_board.jpg";
Mat originalImage = imread(imageFile, IMREAD_COLOR);
if (originalImage.empty()) {
cerr << "Error: Could not open image." << endl;
return -1;
}
Mat processedImage;
vector<Point> corners = detectBoardCorners(originalImage, processedImage);
Mat warpedImage;
if (corners.size() == 4) {
warpedImage = correctPerspective(originalImage, corners);
imshow("Warped", warpedImage);
} else {
warpedImage = originalImage.clone();
cerr << "Perspective correction skipped." << endl;
}
vector<Line> horizontalLines, verticalLines;
detectGridLines(warpedImage, horizontalLines, verticalLines);
vector<Line> selectedHorz, selectedVert;
identifyGrid(horizontalLines, verticalLines, selectedHorz, selectedVert, warpedImage);
vector<vector<char>> board = detectStonesAtIntersections(warpedImage, selectedHorz, selectedVert);
string sgf = boardStateToSGF(board);
ofstream sgfFile("current_state.sgf");
if (sgfFile.is_open()) {
sgfFile << sgf;
cout << "SGF saved." << endl;
} else {
cerr << "Failed to save SGF." << endl;
}
// Visualization
Mat display = warpedImage.clone();
for (const auto &line : selectedHorz)
cv::line(display, line.p1, line.p2, Scalar(0, 255, 0), 1);
for (const auto &line : selectedVert)
cv::line(display, line.p1, line.p2, Scalar(0, 0, 255), 1);
imshow("Grid", display);
waitKey(0);
return 0;
}
Insufficient lines detected 以及调试图像的情况来看,问题或许出在图像预处理、霍夫变换参数设置或者边缘检测环节。下面为你详细分析可能的原因并给出相应的解决办法:GaussianBlur 可能将图像中的重要边缘信息模糊掉了,使得后续的边缘检测和直线检测难以进行。Canny 边缘检测和 adaptiveThreshold 的参数可能设置得不合理,导致检测到的边缘要么过多要么过少。HoughLinesP 的参数(如累加器阈值、最小线长、最大线间隙)可能设置得过高,使得只有很少的直线被检测到。GaussianBlur 的标准差,或者使用不同的模糊方法。Canny 边缘检测和 adaptiveThreshold 的参数,以获得更合适的边缘信息。// 减少模糊程度
GaussianBlur(gray, blurred, Size(3, 3), 1);
// 调整 Canny 边缘检测参数
Canny(blurred, edges, 20, 80);
// 调整自适应阈值参数
adaptiveThreshold(blurred, thresh, 255, ADAPTIVE_THRESH_GAUSSIAN_C,
THRESH_BINARY, 11, 2);
HoughLinesP 的累加器阈值,以检测更多的直线。// 降低累加器阈值
HoughLinesP(thresh, lines, 1, CV_PI/180, 30, 20, 20);
// 直方图均衡化
equalizeHist(gray, gray);
// 形态学开运算去除噪声
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
morphologyEx(edges, edges, MORPH_OPEN, kernel);
vector<Point> detectBoardCorners(const Mat& image, Mat& processedImage) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
// 直方图均衡化
equalizeHist(gray, gray);
// 减少模糊程度
Mat blurred, edges;
GaussianBlur(gray, blurred, Size(3, 3), 1);
// 调整 Canny 边缘检测参数
Canny(blurred, edges, 20, 80);
// 形态学开运算去除噪声
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
morphologyEx(edges, edges, MORPH_OPEN, kernel);
// 调整自适应阈值参数
Mat thresh;
adaptiveThreshold(blurred, thresh, 255, ADAPTIVE_THRESH_GAUSSIAN_C,
THRESH_BINARY, 11, 2);
vector<Vec4i> lines;
// 降低累加器阈值
HoughLinesP(thresh, lines, 1, CV_PI/180, 30, 20, 20);
// 显示检测到的直线
Mat lineVisual = Mat::zeros(thresh.size(), CV_8UC3);
for (const auto& line : lines) {
cv::line(lineVisual, Point(line[0], line[1]),
Point(line[2], line[3]), Scalar(0,255,0), 2);
}
imshow("Detected Lines", lineVisual);
waitKey(0);
vector<Vec4i> horizontalLines, verticalLines;
const double angle_tolerance = CV_PI/6; // ~30 degrees tolerance
for (const auto& line : lines) {
float dx = line[2] - line[0];
float dy = line[3] - line[1];
float angle = atan2(dy, dx);
// 分类直线
if (abs(angle) < angle_tolerance) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI/2) < angle_tolerance) {
verticalLines.push_back(line);
}
}
// 检查是否有足够的直线
if (horizontalLines.size() < 5 || verticalLines.size() < 5) {
cerr << "Insufficient lines detected" << endl;
return {};
}
// 改进的直线排序和异常值去除
auto sortAndFilter = [](vector<Vec4i>& lines, bool horizontal) {
sort(lines.begin(), lines.end(), [horizontal](const Vec4i& a, const Vec4i& b) {
return horizontal ? ((a[1]+a[3])/2 < (b[1]+b[3])/2)
: ((a[0]+a[2])/2 < (b[0]+b[2])/2);
});
// 更改异常值去除以保留更多直线
if (lines.size() > 4) {
size_t q1 = lines.size() / 6; // 保留更多直线
size_t q3 = 5 * lines.size() / 6;
lines = vector<Vec4i>(lines.begin() + q1, lines.begin() + q3);
}
};
sortAndFilter(horizontalLines, true);
sortAndFilter(verticalLines, false);
// 选择候选直线
Vec4i topH = horizontalLines[horizontalLines.size()/3];
Vec4i bottomH = horizontalLines[2*horizontalLines.size()/3];
Vec4i leftV = verticalLines[verticalLines.size()/3];
Vec4i rightV = verticalLines[2*verticalLines.size()/3];
auto safeIntersect = [&image](const Vec4i& l1, const Vec4i& l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6) return Point2f(-1, -1); // 平行直线
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
Point2f result(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
// 检查交点质量
if (result.x < 0 || result.y < 0 ||
result.x >= image.cols || result.y >= image.rows) {
return Point2f(-1, -1);
}
return result;
};
Point2f intersections[4] = {
safeIntersect(topH, leftV),
safeIntersect(topH, rightV),
safeIntersect(bottomH, rightV),
safeIntersect(bottomH, leftV)
};
// 验证四边形形状
vector<Point> corners;
for (auto& p : intersections) {
if (p.x < 0 || p.y < 0) {
corners.clear();
break;
}
corners.emplace_back(p);
}
// 如果霍夫变换失败,使用轮廓检测作为后备方案
if (corners.empty()) {
vector<vector<Point>> contours;
findContours(edges, contours, RETR_LIST,
CHAIN_APPROX_SIMPLE); // 使用所有轮廓
// 找到最大的四边形
double maxArea = 0;
for (auto &contour : contours) {
vector<Point> approx;
approxPolyDP(contour, approx, arcLength(contour, true) * 0.03,
true); // 更多的容差
if (approx.size() == 4) {
double area = contourArea(approx);
if (area > maxArea) {
maxArea = area;
corners = approx;
}
}
}
}
processedImage = image.clone();
for (const auto& p : corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
if (corners.size() != 4) {
cerr << "Board detection failed. Try: \n"
<< "1. Better lighting\n"
<< "2. Clearer board edges\n"
<< "3. Adjust Hough parameters in code\n";
}
return corners;
}
detectBoardCorners 函数,重新编译并运行程序,查看是否能够解决问题。要是问题依旧存在,你可以提供更多的图像信息或者错误日志,以便进一步分析。 detectBoardCorners函数:vector<Point> detectBoardCorners(const Mat& image, Mat& processedImage) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
// Enhanced preprocessing
Mat blurred, edges;
GaussianBlur(gray, blurred, Size(5, 5), 2);
// Modify Canny and adaptive threshold parameters
Canny(blurred, edges, 30, 100); // Lower thresholds for more edges
Mat thresh;
adaptiveThreshold(blurred, thresh, 255, ADAPTIVE_THRESH_GAUSSIAN_C,
THRESH_BINARY, 15, 5); // Larger block size
vector<Vec4i> lines;
// Improved HoughLinesP parameters
HoughLinesP(thresh, lines, 1, CV_PI/180, 40, 30, 30);
// After HoughLinesP call:
Mat lineVisual = Mat::zeros(thresh.size(), CV_8UC3);
for (const auto& line : lines) {
cv::line(lineVisual, Point(line[0], line[1]),
Point(line[2], line[3]), Scalar(0,255,0), 2);
}
imshow("Detected Lines", lineVisual);
waitKey(0);
vector<Vec4i> horizontalLines, verticalLines;
const double angle_tolerance = CV_PI/6; // ~30 degrees tolerance
//const double angle_tolerance = CV_PI/8; // Increased tolerance
for (const auto& line : lines) {
float dx = line[2] - line[0];
float dy = line[3] - line[1];
float angle = atan2(dy, dx);
// Classify with wider tolerance
if (abs(angle) < angle_tolerance) {
horizontalLines.push_back(line);
} else if (abs(abs(angle) - CV_PI/2) < angle_tolerance) {
verticalLines.push_back(line);
}
}
// Require minimum lines for reliable detection
if (horizontalLines.size() < 5 || verticalLines.size() < 5) {
// 当检测到的直线不足时,将图像边缘当作棋盘边缘处理
vector<Point> corners;
corners.push_back(Point(0, 0));
corners.push_back(Point(image.cols - 1, 0));
corners.push_back(Point(image.cols - 1, image.rows - 1));
corners.push_back(Point(0, image.rows - 1));
processedImage = image.clone();
for (const auto& p : corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return corners;
}
// Improved line sorting with outlier removal
auto sortAndFilter = [](vector<Vec4i>& lines, bool horizontal) {
sort(lines.begin(), lines.end(), [horizontal](const Vec4i& a, const Vec4i& b) {
return horizontal? ((a[1]+a[3])/2 < (b[1]+b[3])/2)
: ((a[0]+a[2])/2 < (b[0]+b[2])/2);
});
// Change outlier removal to keep more lines
if (lines.size() > 4) {
size_t q1 = lines.size() / 6; // Keep more lines
size_t q3 = 5 * lines.size() / 6;
lines = vector<Vec4i>(lines.begin() + q1, lines.begin() + q3);
}
};
sortAndFilter(horizontalLines, true);
sortAndFilter(verticalLines, false);
// Select candidate lines from middle of sorted lists
Vec4i topH = horizontalLines[horizontalLines.size()/3];
Vec4i bottomH = horizontalLines[2*horizontalLines.size()/3];
Vec4i leftV = verticalLines[verticalLines.size()/3];
Vec4i rightV = verticalLines[2*verticalLines.size()/3];
auto safeIntersect = [&image](const Vec4i& l1, const Vec4i& l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6) return Point2f(-1, -1); // Parallel
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
Point2f result(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
// Add additional check for intersection quality
if (result.x < 0 || result.y < 0 ||
result.x >= image.cols || result.y >= image.rows) {
return Point2f(-1, -1);
}
return result;
};
Point2f intersections[4] = {
safeIntersect(topH, leftV),
safeIntersect(topH, rightV),
safeIntersect(bottomH, rightV),
safeIntersect(bottomH, leftV)
};
// Verify quadrilateral shape
vector<Point> corners;
for (auto& p : intersections) {
if (p.x < 0 || p.y < 0) {
corners.clear();
break;
}
corners.emplace_back(p);
}
// Fallback to contour detection if Hough fails
if (corners.empty()) {
vector<vector<Point>> contours;
// In contour fallback section:
findContours(edges, contours, RETR_LIST,
CHAIN_APPROX_SIMPLE); // Use all contours
// Find largest quadrilateral
double maxArea = 0;
for (auto &contour : contours) {
vector<Point> approx;
approxPolyDP(contour, approx, arcLength(contour, true) * 0.03,
true); // More tolerance
if (approx.size() == 4) {
double area = contourArea(approx);
if (area > maxArea) {
maxArea = area;
corners = approx;
}
}
}
}
processedImage = image.clone();
for (const auto& p : corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
if (corners.size() != 4) {
cerr << "Board detection failed. Try: \n"
<< "1. Better lighting\n"
<< "2. Clearer board edges\n"
<< "3. Adjust Hough parameters in code\n";
}
return corners;
}
horizontalLines.size() < 5 || verticalLines.size() < 5),直接将图像的四个角点作为棋盘的角点返回,不再执行后续复杂的直线处理和交点计算逻辑。这样,在假定图像边缘就是棋盘边缘的情况下,程序能够更稳定地获取棋盘角点。 #include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>
using namespace cv;
using namespace std;
vector<Point> detectBoardCorners(const Mat& image, Mat& processedImage) {
// 假设图像边缘就是棋盘边缘,直接获取四个角点
vector<Point> corners;
corners.push_back(Point(0, 0));
corners.push_back(Point(image.cols - 1, 0));
corners.push_back(Point(image.cols - 1, image.rows - 1));
corners.push_back(Point(0, image.rows - 1));
// 利用19x19棋盘信息计算交叉点
const int boardSize = 19;
float cellWidth = static_cast<float>(image.cols) / (boardSize - 1);
float cellHeight = static_cast<float>(image.rows) / (boardSize - 1);
vector<vector<Point>> intersectionPoints(boardSize, vector<Point>(boardSize));
for (int i = 0; i < boardSize; ++i) {
for (int j = 0; j < boardSize; ++j) {
intersectionPoints[i][j] = Point(static_cast<int>(j * cellWidth), static_cast<int>(i * cellHeight));
}
}
// 显示交叉点(调试用)
processedImage = image.clone();
for (const auto& row : intersectionPoints) {
for (const Point& p : row) {
circle(processedImage, p, 2, Scalar(255, 0, 0), -1);
}
}
imshow("Intersection Points", processedImage);
waitKey(0);
return corners;
}
int main() {
Mat image = imread("your_image.jpg");
if (image.empty()) {
cerr << "Could not open or find the image" << endl;
return -1;
}
Mat processedImage;
vector<Point> corners = detectBoardCorners(image, processedImage);
if (corners.empty()) {
cerr << "Failed to detect board corners" << endl;
} else {
cout << "Board corners detected successfully" << endl;
}
return 0;
}
corners向量中。19x19的信息,计算每个交叉点的坐标。通过将图像的宽度和高度分别除以18(因为19条线有18个间隔),得到每个单元格的宽度和高度。然后根据单元格的宽度和高度计算每个交叉点的坐标,并存储在intersectionPoints二维向量中。imshow和waitKey来避免显示图像。detectBoardCorners函数,根据返回结果输出相应的信息。your_image.jpg替换为实际的图像文件名。如果在运行过程中遇到问题,可以检查图像路径是否正确,以及OpenCV库是否正确安装和配置。 #include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
// 将坐标转换为SGF格式的坐标字符串
string toSGFCoord(int x_index, int y_index) {
if (x_index < 0 || x_index >= 19 || y_index < 0 || y_index >= 19) {
return "";
}
char col = 'a' + x_index;
if (col >= 'i')
col++; // 跳过'i'
char row = 'a' + y_index;
return string(1, col) + string(1, row);
}
// 检测棋盘角点
vector<Point> detectBoardCorners(const Mat& image, Mat& processedImage) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(5, 5), 0);
vector<Vec4i> lines;
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10);
vector<Vec4i> horizontalLines, verticalLines;
for (const auto& line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
}
else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i& a, const Vec4i& b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i& a, const Vec4i& b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i& l1, const Vec4i& l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // 平行
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 && u <= 1) { // 线段内相交
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
}
else {
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1)); // 延长线相交
}
};
intersections[0] = intersect(topH, leftV); // 左上角
intersections[1] = intersect(topH, rightV); // 右上角
intersections[2] = intersect(bottomH, rightV); // 右下角
intersections[3] = intersect(bottomH, leftV); // 左下角
bool allValid = true;
for (const auto& p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = { Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3]) };
}
else {
// 若霍夫变换失败,尝试轮廓检测
Mat edges;
Canny(gray, edges, 50, 150);
vector<vector<Point>> contours;
findContours(edges, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (const auto& contour : contours) {
if (contour.size() == 4) {
double area = contourArea(contour);
if (area > 1000) { // 根据实际情况调整面积阈值
selected_corners = contour;
break;
}
}
}
if (selected_corners.empty()) {
cerr << "Error: Could not find reliable board corners." << endl;
}
}
processedImage = image.clone();
for (const auto& p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
// 透视变换校正棋盘图像
Mat correctPerspective(const Mat& image, const vector<Point>& corners) {
if (corners.size() != 4) {
cerr << "Error: Need 4 corners for perspective correction." << endl;
return image.clone();
}
float side = 500.0;
Point2f target_corners[4] = { Point2f(0, 0), Point2f(side - 1, 0),
Point2f(side - 1, side - 1),
Point2f(0, side - 1) };
Point2f src_corners_array[4];
for (int i = 0; i < 4; ++i) {
src_corners_array[i] = Point2f(corners[i]);
}
Mat src_points(1, 4, CV_32FC2, src_corners_array);
Mat dst_points(1, 4, CV_32FC2, target_corners);
Mat homography = findHomography(src_points, dst_points, 0);
Mat warped_image;
warpPerspective(image, warped_image, homography, Size(side, side));
return warped_image;
}
// 检测网格线
void detectGridLines(const Mat& image, vector<Vec4i>& horizontalLines, vector<Vec4i>& verticalLines) {
Mat gray, blurred, edges;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, blurred, Size(5, 5), 0);
Canny(blurred, edges, 50, 150);
HoughLinesP(edges, horizontalLines, 1, CV_PI / 180, 50, 30, 10);
verticalLines.clear();
for (const auto& line : horizontalLines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
horizontalLines.erase(remove_if(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i& line) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
return abs(abs(angle) - CV_PI / 2) < CV_PI / 10;
}),
horizontalLines.end());
}
// 识别网格
void identifyGrid(vector<Vec4i>& horizontalLines, vector<Vec4i>& verticalLines,
vector<Vec4i>& selectedHorizontal, vector<Vec4i>& selectedVertical) {
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i& a, const Vec4i& b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i& a, const Vec4i& b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
selectedHorizontal.clear();
selectedVertical.clear();
float avgHorizontalSpacing = (horizontalLines.back()[1] + horizontalLines.back()[3]) / 2 -
(horizontalLines.front()[1] + horizontalLines.front()[3]) / 2;
avgHorizontalSpacing /= 18;
float avgVerticalSpacing = (verticalLines.back()[0] + verticalLines.back()[2]) / 2 -
(verticalLines.front()[0] + verticalLines.front()[2]) / 2;
avgVerticalSpacing /= 18;
selectedHorizontal.push_back(horizontalLines.front());
selectedVertical.push_back(verticalLines.front());
for (size_t i = 1; i < horizontalLines.size(); ++i) {
float currentY = (horizontalLines[i][1] + horizontalLines[i][3]) / 2;
float lastY = (selectedHorizontal.back()[1] + selectedHorizontal.back()[3]) / 2;
if (abs(currentY - lastY) >= avgHorizontalSpacing * 0.7 && selectedHorizontal.size() < 19) {
selectedHorizontal.push_back(horizontalLines[i]);
}
}
for (size_t i = 1; i < verticalLines.size(); ++i) {
float currentX = (verticalLines[i][0] + verticalLines[i][2]) / 2;
float lastX = (selectedVertical.back()[0] + selectedVertical.back()[2]) / 2;
if (abs(currentX - lastX) >= avgVerticalSpacing * 0.7 && selectedVertical.size() < 19) {
selectedVertical.push_back(verticalLines[i]);
}
}
}
// 检测交叉点处的棋子
vector<vector<char>> detectStonesAtIntersections(const Mat& image,
const vector<Vec4i>& horizontalLines,
const vector<Vec4i>& verticalLines) {
int boardSize = 19;
vector<vector<char>> boardState(boardSize, vector<char>(boardSize, '.'));
if (horizontalLines.size() < boardSize || verticalLines.size() < boardSize) {
cerr << "Not enough grid lines detected." << endl;
return boardState;
}
Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
for (int i = 0; i < boardSize; ++i) {
for (int j = 0; j < boardSize; ++j) {
Point2f intersection(
(verticalLines[j][0] + verticalLines[j][2]) / 2.0f,
(horizontalLines[i][1] + horizontalLines[i][3]) / 2.0f);
if (intersection.x < 0 || intersection.x >= image.cols ||
intersection.y < 0 || intersection.y >= image.rows)
continue;
Vec3b centerColor = hsv.at<Vec3b>(intersection.y, intersection.x);
int v = centerColor[2];
int s = centerColor[1];
if (v < 80 && s > 30) { // 黑子
boardState[i][j] = 'B';
}
else if (v > 180 && s < 80) { // 白子
boardState[i][j] = 'W';
}
}
}
return boardState;
}
// 将棋盘状态转换为SGF格式
string boardStateToSGF(const vector<vector<char>>& boardState) {
string sgfContent = "(;FF[4]GM[1]SZ[19]";
string addBlack, addWhite;
for (int y = 0; y < 19; ++y) {
for (int x = 0; x < 19; ++x) {
string coord = toSGFCoord(x, 18 - y);
if (!coord.empty()) {
if (boardState[y][x] == 'B') {
addBlack += "[" + coord + "]";
}
else if (boardState[y][x] == 'W') {
addWhite += "[" + coord + "]";
}
}
}
}
if (!addBlack.empty())
sgfContent += "AB" + addBlack;
if (!addWhite.empty())
sgfContent += "AW" + addWhite;
sgfContent += ")";
return sgfContent;
}
int main() {
string imageFile = "go_board.jpg";
Mat originalImage = imread(imageFile, IMREAD_COLOR);
if (originalImage.empty()) {
cerr << "Error: Could not open image." << endl;
return -1;
}
Mat processedImage;
vector<Point> corners = detectBoardCorners(originalImage, processedImage);
Mat warpedImage;
if (corners.size() == 4) {
warpedImage = correctPerspective(originalImage, corners);
}
else {
warpedImage = originalImage.clone();
cerr << "Perspective correction skipped." << endl;
}
vector<Vec4i> horizontalLines, verticalLines;
detectGridLines(warpedImage, horizontalLines, verticalLines);
vector<Vec4i> selectedHorz, selectedVert;
identifyGrid(horizontalLines, verticalLines, selectedHorz, selectedVert);
vector<vector<char>> board = detectStonesAtIntersections(warpedImage, selectedHorz, selectedVert);
string sgf = boardStateToSGF(board);
ofstream sgfFile("current_state.sgf");
if (sgfFile.is_open()) {
sgfFile << sgf;
cout << "SGF saved." << endl;
}
else {
cerr << "Failed to save SGF." << endl;
}
return 0;
}
detectBoardCorners函数先用霍夫变换检测棋盘角点,若失败则尝试通过轮廓检测寻找四边形轮廓作为角点。correctPerspective函数利用找到的角点进行透视变换,将棋盘图像校正为正视图。detectGridLines函数检测图像中的水平和垂直网格线,identifyGrid函数从检测到的线中筛选出19条水平和19条垂直的网格线。detectStonesAtIntersections函数通过HSV颜色空间判断网格交叉点处是否有棋子,并区分黑白子。boardStateToSGF函数将检测到的棋子状态转换为SGF格式字符串,并保存到文件中。go_board.jpg替换为实际的围棋棋盘图像路径即可。 examine_position() to gather information about the board, identify groups of stones, and analyze life - death situations. But these functions operate on the internal data representation of the board state, not on image data.#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
// 将坐标转换为SGF格式的坐标字符串
string toSGFCoord(int x_index, int y_index) {
if (x_index < 0 || x_index >= 19 || y_index < 0 || y_index >= 19) {
return "";
}
char col = 'a' + x_index;
if (col >= 'i')
col++; // 跳过'i'
char row = 'a' + y_index;
return string(1, col) + string(1, row);
}
// 检测棋盘角点
vector<Point> detectBoardCorners(const Mat& image, Mat& processedImage) {
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, gray, Size(5, 5), 0);
// 增加直方图均衡化以增强对比度
equalizeHist(gray, gray);
vector<Vec4i> lines;
HoughLinesP(gray, lines, 1, CV_PI / 180, 50, 30, 10);
vector<Vec4i> horizontalLines, verticalLines;
for (const auto& line : lines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(angle) < CV_PI / 10) {
horizontalLines.push_back(line);
}
else if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i& a, const Vec4i& b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i& a, const Vec4i& b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
Point2f intersections[4];
bool foundAll = false;
if (horizontalLines.size() >= 2 && verticalLines.size() >= 2) {
Vec4i topH = horizontalLines.front();
Vec4i bottomH = horizontalLines.back();
Vec4i leftV = verticalLines.front();
Vec4i rightV = verticalLines.back();
auto intersect = [](const Vec4i& l1, const Vec4i& l2) -> Point2f {
float x1 = l1[0], y1 = l1[1], x2 = l1[2], y2 = l1[3];
float x3 = l2[0], y3 = l2[1], x4 = l2[2], y4 = l2[3];
float det = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4);
if (abs(det) < 1e-6)
return Point2f(-1, -1); // 平行
float t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / det;
float u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / det;
if (t >= 0 && t <= 1 && u >= 0 && u <= 1) { // 线段内相交
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1));
}
else {
return Point2f(x1 + t * (x2 - x1), y1 + t * (y2 - y1)); // 延长线相交
}
};
intersections[0] = intersect(topH, leftV); // 左上角
intersections[1] = intersect(topH, rightV); // 右上角
intersections[2] = intersect(bottomH, rightV); // 右下角
intersections[3] = intersect(bottomH, leftV); // 左下角
bool allValid = true;
for (const auto& p : intersections) {
if (p.x < 0 || p.y < 0 || p.x >= image.cols || p.y >= image.rows) {
allValid = false;
break;
}
}
if (allValid) {
foundAll = true;
}
}
vector<Point> selected_corners;
if (foundAll) {
selected_corners = { Point(intersections[0]), Point(intersections[1]),
Point(intersections[2]), Point(intersections[3]) };
}
else {
// 若霍夫变换失败,尝试轮廓检测
Mat edges;
Canny(gray, edges, 50, 150);
// 增加形态学操作以增强边缘
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
morphologyEx(edges, edges, MORPH_CLOSE, kernel);
vector<vector<Point>> contours;
findContours(edges, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (const auto& contour : contours) {
if (contour.size() == 4) {
double area = contourArea(contour);
if (area > 1000) { // 根据实际情况调整面积阈值
selected_corners = contour;
break;
}
}
}
if (selected_corners.empty()) {
cerr << "Error: Could not find reliable board corners." << endl;
}
}
processedImage = image.clone();
for (const auto& p : selected_corners) {
circle(processedImage, p, 10, Scalar(255, 0, 0), -1);
}
return selected_corners;
}
// 透视变换校正棋盘图像
Mat correctPerspective(const Mat& image, const vector<Point>& corners) {
if (corners.size()!= 4) {
cerr << "Error: Need 4 corners for perspective correction." << endl;
return image.clone();
}
float side = 500.0;
Point2f target_corners[4] = { Point2f(0, 0), Point2f(side - 1, 0),
Point2f(side - 1, side - 1),
Point2f(0, side - 1) };
Point2f src_corners_array[4];
for (int i = 0; i < 4; ++i) {
src_corners_array[i] = Point2f(corners[i]);
}
Mat src_points(1, 4, CV_32FC2, src_corners_array);
Mat dst_points(1, 4, CV_32FC2, target_corners);
Mat homography = findHomography(src_points, dst_points, 0);
Mat warped_image;
warpPerspective(image, warped_image, homography, Size(side, side));
return warped_image;
}
// 检测网格线
void detectGridLines(const Mat& image, vector<Vec4i>& horizontalLines, vector<Vec4i>& verticalLines) {
Mat gray, blurred, edges;
cvtColor(image, gray, COLOR_BGR2GRAY);
GaussianBlur(gray, blurred, Size(5, 5), 0);
Canny(blurred, edges, 50, 150);
HoughLinesP(edges, horizontalLines, 1, CV_PI / 180, 50, 30, 10);
verticalLines.clear();
for (const auto& line : horizontalLines) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
if (abs(abs(angle) - CV_PI / 2) < CV_PI / 10) {
verticalLines.push_back(line);
}
}
horizontalLines.erase(remove_if(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i& line) {
float angle = atan2(line[3] - line[1], line[2] - line[0]);
return abs(abs(angle) - CV_PI / 2) < CV_PI / 10;
}),
horizontalLines.end());
}
// 识别网格
void identifyGrid(vector<Vec4i>& horizontalLines, vector<Vec4i>& verticalLines,
vector<Vec4i>& selectedHorizontal, vector<Vec4i>& selectedVertical) {
sort(horizontalLines.begin(), horizontalLines.end(),
[](const Vec4i& a, const Vec4i& b) {
return (a[1] + a[3]) / 2 < (b[1] + b[3]) / 2;
});
sort(verticalLines.begin(), verticalLines.end(),
[](const Vec4i& a, const Vec4i& b) {
return (a[0] + a[2]) / 2 < (b[0] + b[2]) / 2;
});
selectedHorizontal.clear();
selectedVertical.clear();
float avgHorizontalSpacing = (horizontalLines.back()[1] + horizontalLines.back()[3]) / 2 -
(horizontalLines.front()[1] + horizontalLines.front()[3]) / 2;
avgHorizontalSpacing /= 18;
float avgVerticalSpacing = (verticalLines.back()[0] + verticalLines.back()[2]) / 2 -
(verticalLines.front()[0] + verticalLines.front()[2]) / 2;
avgVerticalSpacing /= 18;
selectedHorizontal.push_back(horizontalLines.front());
selectedVertical.push_back(verticalLines.front());
for (size_t i = 1; i < horizontalLines.size(); ++i) {
float currentY = (horizontalLines[i][1] + horizontalLines[i][3]) / 2;
float lastY = (selectedHorizontal.back()[1] + selectedHorizontal.back()[3]) / 2;
if (abs(currentY - lastY) >= avgHorizontalSpacing * 0.7 && selectedHorizontal.size() < 19) {
selectedHorizontal.push_back(horizontalLines[i]);
}
}
for (size_t i = 1; i < verticalLines.size(); ++i) {
float currentX = (verticalLines[i][0] + verticalLines[i][2]) / 2;
float lastX = (selectedVertical.back()[0] + selectedVertical.back()[2]) / 2;
if (abs(currentX - lastX) >= avgVerticalSpacing * 0.7 && selectedVertical.size() < 19) {
selectedVertical.push_back(verticalLines[i]);
}
}
}
// 检测交叉点处的棋子
vector<vector<char>> detectStonesAtIntersections(const Mat& image,
const vector<Vec4i>& horizontalLines,
const vector<Vec4i>& verticalLines) {
int boardSize = 19;
vector<vector<char>> boardState(boardSize, vector<char>(boardSize, '.'));
if (horizontalLines.size() < boardSize || verticalLines.size() < boardSize) {
cerr << "Not enough grid lines detected." << endl;
return boardState;
}
Mat hsv;
cvtColor(image, hsv, COLOR_BGR2HSV);
for (int i = 0; i < boardSize; ++i) {
for (int j = 0; j < boardSize; ++j) {
Point2f intersection(
(verticalLines[j][0] + verticalLines[j][2]) / 2.0f,
(horizontalLines[i][1] + horizontalLines[i][3]) / 2.0f);
if (intersection.x < 0 || intersection.x >= image.cols ||
intersection.y < 0 || intersection.y >= image.rows)
continue;
Vec3b centerColor = hsv.at<Vec3b>(intersection.y, intersection.x);
int v = centerColor[2];
int s = centerColor[1];
if (v < 80 && s > 30) { // 黑子
boardState[i][j] = 'B';
}
else if (v > 180 && s < 80) { // 白子
boardState[i][j] = 'W';
}
}
}
return boardState;
}
// 将棋盘状态转换为SGF格式
string boardStateToSGF(const vector<vector<char>>& boardState) {
string sgfContent = "(;FF[4]GM[1]SZ[19]";
string addBlack, addWhite;
for (int y = 0; y < 19; ++y) {
for (int x = 0; x < 19; ++x) {
string coord = toSGFCoord(x, 18 - y);
if (!coord.empty()) {
if (boardState[y][x] == 'B') {
addBlack += "[" + coord + "]";
}
else if (boardState[y][x] == 'W') {
addWhite += "[" + coord + "]";
}
}
}
}
if (!addBlack.empty())
sgfContent += "AB" + addBlack;
if (!addWhite.empty())
sgfContent += "AW" + addWhite;
sgfContent += ")";
return sgfContent;
}
int main() {
string imageFile = "go_board.jpg";
Mat originalImage = imread(imageFile, IMREAD_COLOR);
if (originalImage.empty()) {
cerr << "Error: Could not open image." << endl;
return -1;
}
Mat processedImage;
vector<Point> corners = detectBoardCorners(originalImage, processedImage);
Mat warpedImage;
if (corners.size() == 4) {
warpedImage = correctPerspective(originalImage, corners);
}
else {
warpedImage = originalImage.clone();
cerr << "Perspective correction skipped." << endl;
}
vector<Vec4i> horizontalLines, verticalLines;
detectGridLines(warpedImage, horizontalLines, verticalLines);
vector<Vec4i> selectedHorz, selectedVert;
identifyGrid(horizontalLines, verticalLines, selectedHorz, selectedVert);
vector<vector<char>> board = detectStonesAtIntersections(warpedImage, selectedHorz, selectedVert);
string sgf = boardStateToSGF(board);
ofstream sgfFile("current_state.s