Java 类org.opencv.core.Size 实例源码

项目:Android-Code-Demos    文件:CameraBridgeViewBase.java   
/**
 * This helper method can be called by subclasses to select camera preview size.
 * It goes over the list of the supported preview sizes and selects the maximum one which
 * fits both values set via setMaxFrameSize() and surface frame allocated for this view
 * @param supportedSizes
 * @param surfaceWidth
 * @param surfaceHeight
 * @return optimal frame size
 */
protected Size calculateCameraFrameSize(List<?> supportedSizes, ListItemAccessor accessor, int surfaceWidth, int surfaceHeight) {
    int calcWidth = 0;
    int calcHeight = 0;

    int maxAllowedWidth = (mMaxWidth != MAX_UNSPECIFIED && mMaxWidth < surfaceWidth)? mMaxWidth : surfaceWidth;
    int maxAllowedHeight = (mMaxHeight != MAX_UNSPECIFIED && mMaxHeight < surfaceHeight)? mMaxHeight : surfaceHeight;

    for (Object size : supportedSizes) {
        int width = accessor.getWidth(size);
        int height = accessor.getHeight(size);

        if (width <= maxAllowedWidth && height <= maxAllowedHeight) {
            if (width >= calcWidth && height >= calcHeight) {
                calcWidth = (int) width;
                calcHeight = (int) height;
            }
        }
    }

    return new Size(calcWidth, calcHeight);
}
项目:RobotIGS    文件:Transform.java   
/**
 * Scales an image to an approximate size. The scale will always be equal
 * on the x and y axis, regardless of the approxSize.
 *
 * @param img          The image
 * @param approxSize   The target size
 * @param maximize     If maximize is true, then if the approxSize aspect ratio
 *                     does not match the target, then the largest possible image
 *                     would be used. If false (default), the the smallest image
 *                     would be used.
 * @param integerScale If true (default), then only integer scale factors would be used.
 *                     Otherwise, any scale factor can be used.
 */
private static double makeScale(Mat img, Size approxSize, boolean maximize, boolean integerScale) {
    Size imageSize = img.size();
    double ratioWidth = approxSize.width / imageSize.width;
    double ratioHeight = approxSize.height / imageSize.height;
    double ratio = maximize ? Math.max(ratioWidth, ratioHeight) : Math.min(ratioWidth, ratioHeight);
    if (MathUtil.equal(ratio, 1))
        return 1;
    if (integerScale) {
        //The scale factor is always greater than 1
        double scale = (ratio < 1) ? 1 / ratio : ratio;
        //If you are actually increasing the size of the object, use ceiling()
        //Otherwise, use floor()
        scale = maximize ^ (ratio < 1) ? Math.ceil(scale) : Math.floor(scale);
        //Get the actual ratio again
        return (ratio < 1) ? 1 / scale : scale;
    } else {
        return ratio;
    }
}
项目:fingerblox    文件:Calib3d.java   
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags, TermCriteria criteria)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
    Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
    double retVal = stereoCalibrate_3(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);

    return retVal;
}
项目:react-native-scan-doc    文件:Video.java   
public static int buildOpticalFlowPyramid(Mat img, List<Mat> pyramid, Size winSize, int maxLevel)
{
    Mat pyramid_mat = new Mat();
    int retVal = buildOpticalFlowPyramid_1(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel);
    Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
    pyramid_mat.release();
    return retVal;
}
项目:MOAAP    文件:Imgproc.java   
public static void cornerSubPix(Mat image, MatOfPoint2f corners, Size winSize, Size zeroZone, TermCriteria criteria)
{
    Mat corners_mat = corners;
    cornerSubPix_0(image.nativeObj, corners_mat.nativeObj, winSize.width, winSize.height, zeroZone.width, zeroZone.height, criteria.type, criteria.maxCount, criteria.epsilon);

    return;
}
项目:FaceDetectDemo    文件:Calib3d.java   
public static double calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs, int flags)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrate_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:Android-Code-Demos    文件:Calib3d.java   
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat K1, Mat D1, Mat K2, Mat D2, Size imageSize, Mat R, Mat T, int flags, TermCriteria criteria)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
    Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
    double retVal = stereoCalibrate_3(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, K1.nativeObj, D1.nativeObj, K2.nativeObj, D2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);

    return retVal;
}
项目:MOAAP    文件:Calib3d.java   
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags, TermCriteria criteria)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
    Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
    double retVal = stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);

    return retVal;
}
项目:OpenCV    文件:Imgproc.java   
public static void ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta, MatOfPoint pts)
{
    Mat pts_mat = pts;
    ellipse2Poly_0(center.x, center.y, axes.width, axes.height, angle, arcStart, arcEnd, delta, pts_mat.nativeObj);

    return;
}
项目:Ftc2018RelicRecovery    文件:Calib3d.java   
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags, TermCriteria criteria)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
    Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
    double retVal = stereoCalibrate_0(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon);

    return retVal;
}
项目:Sikulix2opencv    文件:CascadeClassifier.java   
public  void detectMultiScale2(Mat image, MatOfRect objects, MatOfInt numDetections, double scaleFactor, int minNeighbors, int flags, Size minSize, Size maxSize)
{
    Mat objects_mat = objects;
    Mat numDetections_mat = numDetections;
    detectMultiScale2_0(nativeObj, image.nativeObj, objects_mat.nativeObj, numDetections_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height, maxSize.width, maxSize.height);

    return;
}
项目:real_time_circle_detection_android    文件:Calib3d.java   
public static Mat getOptimalNewCameraMatrix(Mat cameraMatrix, Mat distCoeffs, Size imageSize, double alpha, Size newImgSize, Rect validPixROI, boolean centerPrincipalPoint)
{
    double[] validPixROI_out = new double[4];
    Mat retVal = new Mat(getOptimalNewCameraMatrix_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, alpha, newImgSize.width, newImgSize.height, validPixROI_out, centerPrincipalPoint));
    if(validPixROI!=null){ validPixROI.x = (int)validPixROI_out[0]; validPixROI.y = (int)validPixROI_out[1]; validPixROI.width = (int)validPixROI_out[2]; validPixROI.height = (int)validPixROI_out[3]; } 
    return retVal;
}
项目:Team9261-2017-2018    文件:Calib3d.java   
public static double calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrate_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:renderscript_examples    文件:Calib3d.java   
public static void drawChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, boolean patternWasFound)
{
    Mat corners_mat = corners;
    drawChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, patternWasFound);

    return;
}
项目:Android-Code-Demos    文件:Calib3d.java   
public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrateCameraExtended_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:Microsphere    文件:Calib3d.java   
public static void stereoRectify(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat R1, Mat R2, Mat P1, Mat P2, Mat Q, int flags, double alpha, Size newImageSize, Rect validPixROI1, Rect validPixROI2)
{
    double[] validPixROI1_out = new double[4];
    double[] validPixROI2_out = new double[4];
    stereoRectify_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, R1.nativeObj, R2.nativeObj, P1.nativeObj, P2.nativeObj, Q.nativeObj, flags, alpha, newImageSize.width, newImageSize.height, validPixROI1_out, validPixROI2_out);
    if(validPixROI1!=null){ validPixROI1.x = (int)validPixROI1_out[0]; validPixROI1.y = (int)validPixROI1_out[1]; validPixROI1.width = (int)validPixROI1_out[2]; validPixROI1.height = (int)validPixROI1_out[3]; } 
    if(validPixROI2!=null){ validPixROI2.x = (int)validPixROI2_out[0]; validPixROI2.y = (int)validPixROI2_out[1]; validPixROI2.width = (int)validPixROI2_out[2]; validPixROI2.height = (int)validPixROI2_out[3]; } 
    return;
}
项目:DogeCV    文件:Calib3d.java   
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
    Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
    double retVal = stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags);

    return retVal;
}
项目:MOAAP    文件:Calib3d.java   
public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrateCamera_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:FTC2016    文件:Calib3d.java   
public static double stereoCalibrate(List<Mat> objectPoints, List<Mat> imagePoints1, List<Mat> imagePoints2, Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Size imageSize, Mat R, Mat T, Mat E, Mat F, int flags)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1);
    Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2);
    double retVal = stereoCalibrate_1(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, imageSize.width, imageSize.height, R.nativeObj, T.nativeObj, E.nativeObj, F.nativeObj, flags);

    return retVal;
}
项目:DogeCV    文件:Calib3d.java   
public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrateCameraExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:real_time_circle_detection_android    文件:Video.java   
public static int buildOpticalFlowPyramid(Mat img, List<Mat> pyramid, Size winSize, int maxLevel, boolean withDerivatives, int pyrBorder, int derivBorder, boolean tryReuseInputImage)
{
    Mat pyramid_mat = new Mat();
    int retVal = buildOpticalFlowPyramid_0(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives, pyrBorder, derivBorder, tryReuseInputImage);
    Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
    pyramid_mat.release();
    return retVal;
}
项目:MOAAP    文件:Calib3d.java   
public static void drawChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners, boolean patternWasFound)
{
    Mat corners_mat = corners;
    drawChessboardCorners_0(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj, patternWasFound);

    return;
}
项目:android-imaging-utils    文件:ImagingUtils.java   
/**
 * Resize, crop and rotate the camera preview frame
 *
 * @param bytes  preview data
 * @param width  original width
 * @param height original height
 * @param params image processing parameters
 * @return
 */
public static Bitmap rotateCropAndResizePreview(byte[] bytes, int width, int height, PreviewResizeParams params)
{
    Size finalSize = new Size(params.newWidth, params.newHeight);
    Rect cropRect = new Rect(params.cropX, params.cropY, params.cropWidth, params.cropHeight);

    Mat rawMat = new Mat(height * 3 / 2, width, CvType.CV_8UC1); // YUV data
    rawMat.put(0, 0, bytes);
    Mat rgbMat = new Mat(height, width, CvType.CV_8UC4); // RGBA image
    Imgproc.cvtColor(rawMat, rgbMat, Imgproc.COLOR_YUV2RGBA_NV21);

    //rotate clockwise
    Mat rotatedMat = rotateFrame(rgbMat, params.rotation);

    //crop rect from image
    Mat croppedMat = new Mat(rotatedMat, cropRect);

    //resize
    if (finalSize.area() > 0)
        Imgproc.resize(croppedMat, croppedMat, finalSize);


    Bitmap bmp = Bitmap.createBitmap(croppedMat.cols(), croppedMat.rows(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(croppedMat, bmp);

    return bmp;
}
项目:MOAAP    文件:Calib3d.java   
public static boolean findChessboardCorners(Mat image, Size patternSize, MatOfPoint2f corners)
{
    Mat corners_mat = corners;
    boolean retVal = findChessboardCorners_1(image.nativeObj, patternSize.width, patternSize.height, corners_mat.nativeObj);

    return retVal;
}
项目:MOAAP    文件:Video.java   
public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel, TermCriteria criteria, int flags, double minEigThreshold)
{
    Mat prevPts_mat = prevPts;
    Mat nextPts_mat = nextPts;
    Mat status_mat = status;
    Mat err_mat = err;
    calcOpticalFlowPyrLK_0(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel, criteria.type, criteria.maxCount, criteria.epsilon, flags, minEigThreshold);

    return;
}
项目:DNNLibrary    文件:Calib3d.java   
public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors, int flags)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrateCameraExtended_1(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj, flags);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:MOAAP    文件:Aruco.java   
public static double calibrateCameraCharuco(List<Mat> charucoCorners, List<Mat> charucoIds, CharucoBoard board, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, int flags)
{
    Mat charucoCorners_mat = Converters.vector_Mat_to_Mat(charucoCorners);
    Mat charucoIds_mat = Converters.vector_Mat_to_Mat(charucoIds);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrateCameraCharuco_1(charucoCorners_mat.nativeObj, charucoIds_mat.nativeObj, board.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:MOAAP    文件:Calib3d.java   
public static double calibrate(List<Mat> objectPoints, List<Mat> imagePoints, Size image_size, Mat K, Mat D, List<Mat> rvecs, List<Mat> tvecs)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrate_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, image_size.width, image_size.height, K.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:MOAAP    文件:Calib3d.java   
public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize, double aspectRatio)
{
    List<Mat> objectPoints_tmplm = new ArrayList<Mat>((objectPoints != null) ? objectPoints.size() : 0);
    Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm);
    List<Mat> imagePoints_tmplm = new ArrayList<Mat>((imagePoints != null) ? imagePoints.size() : 0);
    Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm);
    Mat retVal = new Mat(initCameraMatrix2D_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, aspectRatio));

    return retVal;
}
项目:Ftc2018RelicRecovery    文件:Calib3d.java   
public static double calibrateCamera(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrateCamera_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:MOAAP    文件:Calib3d.java   
public static double calibrateCameraExtended(List<Mat> objectPoints, List<Mat> imagePoints, Size imageSize, Mat cameraMatrix, Mat distCoeffs, List<Mat> rvecs, List<Mat> tvecs, Mat stdDeviationsIntrinsics, Mat stdDeviationsExtrinsics, Mat perViewErrors)
{
    Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints);
    Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints);
    Mat rvecs_mat = new Mat();
    Mat tvecs_mat = new Mat();
    double retVal = calibrateCameraExtended_2(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, cameraMatrix.nativeObj, distCoeffs.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, stdDeviationsIntrinsics.nativeObj, stdDeviationsExtrinsics.nativeObj, perViewErrors.nativeObj);
    Converters.Mat_to_vector_Mat(rvecs_mat, rvecs);
    rvecs_mat.release();
    Converters.Mat_to_vector_Mat(tvecs_mat, tvecs);
    tvecs_mat.release();
    return retVal;
}
项目:MOAAP    文件:Calib3d.java   
public static float rectify3Collinear(Mat cameraMatrix1, Mat distCoeffs1, Mat cameraMatrix2, Mat distCoeffs2, Mat cameraMatrix3, Mat distCoeffs3, List<Mat> imgpt1, List<Mat> imgpt3, Size imageSize, Mat R12, Mat T12, Mat R13, Mat T13, Mat R1, Mat R2, Mat R3, Mat P1, Mat P2, Mat P3, Mat Q, double alpha, Size newImgSize, Rect roi1, Rect roi2, int flags)
{
    Mat imgpt1_mat = Converters.vector_Mat_to_Mat(imgpt1);
    Mat imgpt3_mat = Converters.vector_Mat_to_Mat(imgpt3);
    double[] roi1_out = new double[4];
    double[] roi2_out = new double[4];
    float retVal = rectify3Collinear_0(cameraMatrix1.nativeObj, distCoeffs1.nativeObj, cameraMatrix2.nativeObj, distCoeffs2.nativeObj, cameraMatrix3.nativeObj, distCoeffs3.nativeObj, imgpt1_mat.nativeObj, imgpt3_mat.nativeObj, imageSize.width, imageSize.height, R12.nativeObj, T12.nativeObj, R13.nativeObj, T13.nativeObj, R1.nativeObj, R2.nativeObj, R3.nativeObj, P1.nativeObj, P2.nativeObj, P3.nativeObj, Q.nativeObj, alpha, newImgSize.width, newImgSize.height, roi1_out, roi2_out, flags);
    if(roi1!=null){ roi1.x = (int)roi1_out[0]; roi1.y = (int)roi1_out[1]; roi1.width = (int)roi1_out[2]; roi1.height = (int)roi1_out[3]; } 
    if(roi2!=null){ roi2.x = (int)roi2_out[0]; roi2.y = (int)roi2_out[1]; roi2.width = (int)roi2_out[2]; roi2.height = (int)roi2_out[3]; } 
    return retVal;
}
项目:Checkerboard-IMU-Comparator    文件:HOGDescriptor.java   
public  void detectMultiScale(Mat img, MatOfRect foundLocations, MatOfDouble foundWeights, double hitThreshold, Size winStride, Size padding, double scale, double finalThreshold, boolean useMeanshiftGrouping)
{
    Mat foundLocations_mat = foundLocations;
    Mat foundWeights_mat = foundWeights;
    detectMultiScale_0(nativeObj, img.nativeObj, foundLocations_mat.nativeObj, foundWeights_mat.nativeObj, hitThreshold, winStride.width, winStride.height, padding.width, padding.height, scale, finalThreshold, useMeanshiftGrouping);

    return;
}
项目:mao-android    文件:CascadeClassifier.java   
public  void detectMultiScale(Mat image, MatOfRect objects, double scaleFactor, int minNeighbors, int flags, Size minSize, Size maxSize)
{
    Mat objects_mat = objects;
    detectMultiScale_0(nativeObj, image.nativeObj, objects_mat.nativeObj, scaleFactor, minNeighbors, flags, minSize.width, minSize.height, maxSize.width, maxSize.height);

    return;
}
项目:RobotIGS    文件:Calib3d.java   
public static Mat initCameraMatrix2D(List<MatOfPoint3f> objectPoints, List<MatOfPoint2f> imagePoints, Size imageSize, double aspectRatio)
{
    List<Mat> objectPoints_tmplm = new ArrayList<Mat>((objectPoints != null) ? objectPoints.size() : 0);
    Mat objectPoints_mat = Converters.vector_vector_Point3f_to_Mat(objectPoints, objectPoints_tmplm);
    List<Mat> imagePoints_tmplm = new ArrayList<Mat>((imagePoints != null) ? imagePoints.size() : 0);
    Mat imagePoints_mat = Converters.vector_vector_Point2f_to_Mat(imagePoints, imagePoints_tmplm);
    Mat retVal = new Mat(initCameraMatrix2D_0(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, imageSize.width, imageSize.height, aspectRatio));

    return retVal;
}
项目:android-age-estimator    文件:JavaCameraView.java   
@Override
public int getWidth(Object obj) {
    Camera.Size size = (Camera.Size) obj;
    return size.width;
}
项目:ImageEnhanceViaFusion    文件:Filters.java   
private static Mat filterSingleChannel(Mat p, double s, ArrayList<Mat> Isubchannels, ArrayList<Mat> Ichannels, 
        Mat mean_I_r, Mat mean_I_g, Mat mean_I_b, Mat invrr, Mat invrg, Mat invrb, Mat invgg, Mat invgb, 
        Mat invbb, double r_sub) {
    Mat p_sub = new Mat();
    Imgproc.resize(p, p_sub, new Size(p.cols() / s, p.rows() / s), 0.0, 0.0, Imgproc.INTER_NEAREST);

    Mat mean_p = boxfilter(p_sub, (int) r_sub);

    Mat mean_Ip_r = boxfilter(Isubchannels.get(0).mul(p_sub), (int) r_sub);
    Mat mean_Ip_g = boxfilter(Isubchannels.get(1).mul(p_sub), (int) r_sub);
    Mat mean_Ip_b = boxfilter(Isubchannels.get(2).mul(p_sub), (int) r_sub);

    // convariance of (I, p) in each local patch
    Mat cov_Ip_r = new Mat();
    Mat cov_Ip_g = new Mat();
    Mat cov_Ip_b = new Mat();
    Core.subtract(mean_Ip_r, mean_I_r.mul(mean_p), cov_Ip_r);
    Core.subtract(mean_Ip_g, mean_I_g.mul(mean_p), cov_Ip_g);
    Core.subtract(mean_Ip_b, mean_I_b.mul(mean_p), cov_Ip_b);

    Mat temp1 = new Mat();
    Mat a_r = new Mat();
    Mat a_g = new Mat();
    Mat a_b = new Mat();
    Core.add(invrr.mul(cov_Ip_r), invrg.mul(cov_Ip_g), temp1);
    Core.add(temp1, invrb.mul(cov_Ip_b), a_r);
    Core.add(invrg.mul(cov_Ip_r), invgg.mul(cov_Ip_g), temp1);
    Core.add(temp1, invgb.mul(cov_Ip_b), a_g);
    Core.add(invrb.mul(cov_Ip_r), invgb.mul(cov_Ip_g), temp1);
    Core.add(temp1, invbb.mul(cov_Ip_b), a_b);

    Mat b = new Mat();
    Core.subtract(mean_p, a_r.mul(mean_I_r), b);
    Core.subtract(b, a_g.mul(mean_I_g), b);
    Core.subtract(b, a_b.mul(mean_I_b), b);

    Mat mean_a_r = boxfilter(a_r, (int) r_sub);
    Mat mean_a_g = boxfilter(a_g, (int) r_sub);
    Mat mean_a_b = boxfilter(a_b, (int) r_sub);
    Mat mean_b = boxfilter(b, (int) r_sub);

    Imgproc.resize(mean_a_r, mean_a_r, 
            new Size(Ichannels.get(0).cols(), Ichannels.get(0).rows()), 0.0, 0.0, Imgproc.INTER_LINEAR);
    Imgproc.resize(mean_a_g, mean_a_g, 
            new Size(Ichannels.get(0).cols(), Ichannels.get(0).rows()), 0.0, 0.0, Imgproc.INTER_LINEAR);
    Imgproc.resize(mean_a_b, mean_a_b, 
            new Size(Ichannels.get(0).cols(), Ichannels.get(0).rows()), 0.0, 0.0, Imgproc.INTER_LINEAR);
    Imgproc.resize(mean_b, mean_b, 
            new Size(Ichannels.get(0).cols(), Ichannels.get(0).rows()), 0.0, 0.0, Imgproc.INTER_LINEAR);

    Mat result = new Mat();
    Core.add(mean_a_r.mul(Ichannels.get(0)), mean_a_g.mul(Ichannels.get(1)), temp1);
    Core.add(temp1, mean_a_b.mul(Ichannels.get(2)), temp1);
    Core.add(temp1, mean_b, result);
    return result;
}
项目:react-native-scan-doc    文件:HOGDescriptor.java   
public  Size get_winSize()
{

    Size retVal = new Size(get_winSize_0(nativeObj));

    return retVal;
}
项目:react-native-scan-doc    文件:VideoWriter.java   
public   VideoWriter(String filename, int fourcc, double fps, Size frameSize)
{

    nativeObj = VideoWriter_1(filename, fourcc, fps, frameSize.width, frameSize.height);

    return;
}
项目:MOAAP    文件:Imgproc.java   
public static float initWideAngleProjMap(Mat cameraMatrix, Mat distCoeffs, Size imageSize, int destImageWidth, int m1type, Mat map1, Mat map2, int projType, double alpha)
{

    float retVal = initWideAngleProjMap_0(cameraMatrix.nativeObj, distCoeffs.nativeObj, imageSize.width, imageSize.height, destImageWidth, m1type, map1.nativeObj, map2.nativeObj, projType, alpha);

    return retVal;
}