194 lines
7.8 KiB
C#
194 lines
7.8 KiB
C#
using System.Collections.Generic;
|
|
using OpenCvSharp;
|
|
|
|
namespace Com.Lmc.ShuiYin.Two.Utils
|
|
{
|
|
public class WaterMarkDCT
|
|
{
|
|
private static readonly double P = 65;
|
|
|
|
public static void Embed(string imagePath, string watermarkPath, string outputPath)
|
|
{
|
|
Mat originaleImage = Cv2.ImRead(imagePath);
|
|
List<Mat> allPlanes = new List<Mat>();
|
|
Cv2.Split(originaleImage, out Mat[] splitPlanes);
|
|
allPlanes.AddRange(splitPlanes);
|
|
Mat YMat = allPlanes[0];
|
|
|
|
int[][] watermark = ImageToMatrix(watermarkPath);
|
|
int length = 8;
|
|
|
|
for (int i = 0; i < watermark.Length; i++)
|
|
{
|
|
for (int j = 0; j < watermark[0].Length; j++)
|
|
{
|
|
Mat block = GetImageValue(YMat, i, j, length);
|
|
|
|
int x1 = 1, y1 = 2;
|
|
int x2 = 2, y2 = 1;
|
|
|
|
// In Java: double[] a = block.get(x1, y1);
|
|
// OpenCvSharp Get returns struct/value.
|
|
// block is CV_32F (float) based on GetImageValue.
|
|
|
|
Cv2.Dct(block, block);
|
|
|
|
if (watermark[i][j] == 1)
|
|
{
|
|
block.Set(x1, y1, (float)P);
|
|
block.Set(x2, y2, (float)-P);
|
|
}
|
|
if (watermark[i][j] == 0)
|
|
{
|
|
block.Set(x1, y1, (float)-P);
|
|
block.Set(x2, y2, (float)P);
|
|
}
|
|
|
|
Cv2.Idct(block, block);
|
|
|
|
for (int m = 0; m < length; m++)
|
|
{
|
|
for (int t = 0; t < length; t++)
|
|
{
|
|
// In Java: double[] e = block.get(m, t);
|
|
// YMat.put(i * length + m, j * length + t, e);
|
|
float e = block.At<float>(m, t);
|
|
// YMat is likely CV_8U from imread, but GetImageValue reads it.
|
|
// If YMat is CV_8U, we can't put float P (65) directly if it overflows or logic expects float processing.
|
|
// But Java code: `Mat YMat = allPlanes.get(0);` (CV_8U usually).
|
|
// `Mat block = getImageValue(YMat...);` creates new Mat CV_32F.
|
|
// Then `YMat.put` writes back. If YMat is 8U, putting float might be truncated or requires YMat to be 32F.
|
|
// However, Java OpenCV `put` handles conversion?
|
|
// Let's assume YMat should be converted to float if we are doing this, OR we cast to byte.
|
|
// But P=65 is additive? No, it replaces values.
|
|
// Wait, `block.get(x1, y1)` returns the DCT coefficient.
|
|
// The DCT is done on 32F block.
|
|
// Then IDCT.
|
|
// Then we put back to YMat.
|
|
// If YMat is 8U, we are putting float values.
|
|
// In OpenCvSharp: `YMat.Set(row, col, value)`.
|
|
// If YMat is 8U, Set will cast.
|
|
YMat.Set(i * length + m, j * length + t, (byte)e); // Cast to byte? Or keep as double/float if YMat was converted?
|
|
// Java code didn't convert YMat to 32F explicitly in Embed method, unlike DFT.
|
|
// But `getImageValue` creates a 32F mat from YMat values.
|
|
// `YMat.put(...)` in Java takes double array.
|
|
// If YMat is 8U, it sets the byte value.
|
|
// So casting to byte is correct.
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
Mat imageOut = new Mat();
|
|
Cv2.Merge(allPlanes.ToArray(), imageOut);
|
|
Cv2.ImWrite(outputPath, imageOut);
|
|
}
|
|
|
|
public static void Extract(string targetImage, string outputWatermark)
|
|
{
|
|
Mat image = Cv2.ImRead(targetImage);
|
|
List<Mat> allPlanes = new List<Mat>();
|
|
Cv2.Split(image, out Mat[] splitPlanes);
|
|
allPlanes.AddRange(splitPlanes);
|
|
Mat YMat = allPlanes[0];
|
|
|
|
int rows = 100;
|
|
int cols = 100;
|
|
int length = 8;
|
|
int[][] watermark = new int[rows][];
|
|
for (int k = 0; k < rows; k++) watermark[k] = new int[cols];
|
|
|
|
for (int i = 0; i < rows; i++)
|
|
{
|
|
for (int j = 0; j < cols; j++)
|
|
{
|
|
Mat block = GetImageValue(YMat, i, j, length);
|
|
Cv2.Dct(block, block);
|
|
|
|
int x1 = 1, y1 = 2;
|
|
int x2 = 2, y2 = 1;
|
|
|
|
float a = block.At<float>(x1, y1);
|
|
float c = block.At<float>(x2, y2);
|
|
|
|
if (a >= c)
|
|
{
|
|
watermark[i][j] = 1;
|
|
}
|
|
}
|
|
}
|
|
MatrixToImage(watermark, outputWatermark);
|
|
}
|
|
|
|
private static Mat GetImageValue(Mat YMat, int x, int y, int length)
|
|
{
|
|
Mat mat = new Mat(length, length, MatType.CV_32F);
|
|
for (int i = 0; i < length; i++)
|
|
{
|
|
for (int j = 0; j < length; j++)
|
|
{
|
|
// Java: double[] temp = YMat.get(x * length + i, y * length + j);
|
|
// mat.put(i, j, temp);
|
|
// If YMat is 8U, At<byte>
|
|
byte val = YMat.At<byte>(x * length + i, y * length + j);
|
|
mat.Set(i, j, (float)val);
|
|
}
|
|
}
|
|
return mat;
|
|
}
|
|
|
|
private static void MatrixToImage(int[][] watermark, string dstPath)
|
|
{
|
|
int rows = watermark.Length;
|
|
int columns = watermark[0].Length;
|
|
Mat image = new Mat(rows, columns, MatType.CV_8U); // THRESH_BINARY usually on 8U
|
|
for (int i = 0; i < rows; i++)
|
|
{
|
|
for (int j = 0; j < columns; j++)
|
|
{
|
|
if (watermark[i][j] == 1)
|
|
{
|
|
image.Set(i, j, (byte)255);
|
|
}
|
|
else
|
|
{
|
|
image.Set(i, j, (byte)0);
|
|
}
|
|
}
|
|
}
|
|
Cv2.ImWrite(dstPath, image);
|
|
}
|
|
|
|
private static int[][] ImageToMatrix(string srcPath)
|
|
{
|
|
Mat mat = Cv2.ImRead(srcPath, ImreadModes.Grayscale); // Java: THRESH_BINARY is not a load flag, it's a thresh type. But here passed to imread?
|
|
// Java: imread(srcPath, Imgproc.THRESH_BINARY).
|
|
// THRESH_BINARY is 0. CV_LOAD_IMAGE_GRAYSCALE is 0.
|
|
// So they are loading as grayscale.
|
|
|
|
int rows = mat.Rows;
|
|
int columns = mat.Cols;
|
|
int[][] waterMark = new int[rows][];
|
|
for(int k=0; k<rows; k++) waterMark[k] = new int[columns];
|
|
|
|
for (int i = 0; i < rows; i++)
|
|
{
|
|
for (int j = 0; j < columns; j++)
|
|
{
|
|
// double[] doubles = mat.get(i, j);
|
|
byte val = mat.At<byte>(i, j);
|
|
if (val == 255)
|
|
{
|
|
waterMark[i][j] = 1;
|
|
}
|
|
else
|
|
{
|
|
waterMark[i][j] = 0;
|
|
}
|
|
}
|
|
}
|
|
return waterMark;
|
|
}
|
|
}
|
|
}
|