Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added beta samples for video object tracking/text detection #1237

Merged
merged 5 commits into from
Oct 24, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions video/beta/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
<parent>
<groupId>com.google.cloud.samples</groupId>
<artifactId>shared-configuration</artifactId>
<version>1.0.9</version>
<version>1.0.10</version>
</parent>

<properties>
Expand All @@ -39,7 +39,7 @@
<dependency>
<groupId>com.google.cloud</groupId>
<artifactId>google-cloud-video-intelligence</artifactId>
<version>0.56.0-beta</version>
<version>0.67.0-beta</version>
</dependency>

<!-- Test dependencies -->
Expand Down
Binary file added video/beta/resources/cat.mp4
Binary file not shown.
Binary file added video/beta/resources/googlework_short.mp4
Binary file not shown.
2 changes: 1 addition & 1 deletion video/beta/src/main/java/com/example/video/Detect.java
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ public static void speechTranscription(String gcsUri) throws Exception {

System.out.println("Waiting for operation to complete...");
// Display the results
for (VideoAnnotationResults results : response.get(180, TimeUnit.SECONDS)
for (VideoAnnotationResults results : response.get(300, TimeUnit.SECONDS)
.getAnnotationResultsList()) {
for (SpeechTranscription speechTranscription : results.getSpeechTranscriptionsList()) {
try {
Expand Down
165 changes: 165 additions & 0 deletions video/beta/src/main/java/com/example/video/TextDetection.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
/*
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.video;

import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1p2beta1.Feature;
import com.google.cloud.videointelligence.v1p2beta1.NormalizedVertex;
import com.google.cloud.videointelligence.v1p2beta1.TextAnnotation;
import com.google.cloud.videointelligence.v1p2beta1.TextFrame;
import com.google.cloud.videointelligence.v1p2beta1.TextSegment;
import com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1p2beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p2beta1.VideoSegment;
import com.google.protobuf.ByteString;

import com.google.protobuf.Duration;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.concurrent.TimeUnit;

public class TextDetection {

// [START video_detect_text_beta]
/**
* Detect text in a video.
*
* @param filePath the path to the video file to analyze.
*/
public static VideoAnnotationResults detectText(String filePath) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);

// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
.setInputContent(ByteString.copyFrom(data))
.addFeatures(Feature.TEXT_DETECTION)
.build();

// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
client.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);

// Get only the first annotation for demo purposes.
TextAnnotation annotation = results.getTextAnnotations(0);
System.out.println("Text: " + annotation.getText());

// Get the first text segment.
TextSegment textSegment = annotation.getSegments(0);
System.out.println("Confidence: " + textSegment.getConfidence());
// For the text segment display it's time offset
VideoSegment videoSegment = textSegment.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
System.out.println(String.format("Start time: %.2f",
startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
System.out.println(String.format("End time: %.2f",
endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));

// Show the first result for the first frame in the segment.
TextFrame textFrame = textSegment.getFrames(0);
Duration timeOffset = textFrame.getTimeOffset();
System.out.println(String.format("Time offset for the first frame: %.2f",
timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));

// Display the rotated bounding box for where the text is on the frame.
System.out.println("Rotated Bounding Box Vertices:");
List<NormalizedVertex> vertices = textFrame.getRotatedBoundingBox().getVerticesList();
for (NormalizedVertex normalizedVertex : vertices) {
System.out.println(String.format(
"\tVertex.x: %.2f, Vertex.y: %.2f",
normalizedVertex.getX(),
normalizedVertex.getY()));
}
return results;
}
}
// [END video_detect_text_beta]

// [START video_detect_text_gcs_beta]
/**
* Detect Text in a video.
*
* @param gcsUri the path to the video file to analyze.
*/
public static VideoAnnotationResults detectTextGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.TEXT_DETECTION)
.build();

// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
client.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);

// Get only the first annotation for demo purposes.
TextAnnotation annotation = results.getTextAnnotations(0);
System.out.println("Text: " + annotation.getText());

// Get the first text segment.
TextSegment textSegment = annotation.getSegments(0);
System.out.println("Confidence: " + textSegment.getConfidence());
// For the text segment display it's time offset
VideoSegment videoSegment = textSegment.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the offset times in seconds, 1e9 is part of the formula to convert nanos to seconds
System.out.println(String.format("Start time: %.2f",
startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9));
System.out.println(String.format("End time: %.2f",
endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));

// Show the first result for the first frame in the segment.
TextFrame textFrame = textSegment.getFrames(0);
Duration timeOffset = textFrame.getTimeOffset();
System.out.println(String.format("Time offset for the first frame: %.2f",
timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));

// Display the rotated bounding box for where the text is on the frame.
System.out.println("Rotated Bounding Box Vertices:");
List<NormalizedVertex> vertices = textFrame.getRotatedBoundingBox().getVerticesList();
for (NormalizedVertex normalizedVertex : vertices) {
System.out.println(String.format(
"\tVertex.x: %.2f, Vertex.y: %.2f",
normalizedVertex.getX(),
normalizedVertex.getY()));
}
return results;
}
}
// [END video_detect_text_gcs_beta]
}
175 changes: 175 additions & 0 deletions video/beta/src/main/java/com/example/video/TrackObjects.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
/*
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.example.video;

import com.google.api.gax.longrunning.OperationFuture;
import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoProgress;
import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoRequest;
import com.google.cloud.videointelligence.v1p2beta1.AnnotateVideoResponse;
import com.google.cloud.videointelligence.v1p2beta1.Entity;
import com.google.cloud.videointelligence.v1p2beta1.Feature;
import com.google.cloud.videointelligence.v1p2beta1.NormalizedBoundingBox;
import com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingAnnotation;
import com.google.cloud.videointelligence.v1p2beta1.ObjectTrackingFrame;
import com.google.cloud.videointelligence.v1p2beta1.VideoAnnotationResults;
import com.google.cloud.videointelligence.v1p2beta1.VideoIntelligenceServiceClient;
import com.google.cloud.videointelligence.v1p2beta1.VideoSegment;
import com.google.protobuf.ByteString;

import com.google.protobuf.Duration;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.TimeUnit;

public class TrackObjects {

// [START video_object_tracking_beta]
/**
* Track objects in a video.
*
* @param filePath the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjects(String filePath) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Read file
Path path = Paths.get(filePath);
byte[] data = Files.readAllBytes(path);

// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
.setInputContent(ByteString.copyFrom(data))
.addFeatures(Feature.OBJECT_TRACKING)
.setLocationId("us-east1")
.build();

// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
client.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);

// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());

if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}

if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format(
"Segment: %.2fs to %.2fs",
startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9,
endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}

// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format(
"Time offset of the first frame: %.2fs",
timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));

// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
// [END video_object_tracking_beta]

// [START video_object_tracking_gcs_beta]
/**
* Track objects in a video.
*
* @param gcsUri the path to the video file to analyze.
*/
public static VideoAnnotationResults trackObjectsGcs(String gcsUri) throws Exception {
try (VideoIntelligenceServiceClient client = VideoIntelligenceServiceClient.create()) {
// Create the request
AnnotateVideoRequest request = AnnotateVideoRequest.newBuilder()
.setInputUri(gcsUri)
.addFeatures(Feature.OBJECT_TRACKING)
.setLocationId("us-east1")
.build();

// asynchronously perform object tracking on videos
OperationFuture<AnnotateVideoResponse, AnnotateVideoProgress> future =
client.annotateVideoAsync(request);

System.out.println("Waiting for operation to complete...");
// The first result is retrieved because a single video was processed.
AnnotateVideoResponse response = future.get(300, TimeUnit.SECONDS);
VideoAnnotationResults results = response.getAnnotationResults(0);

// Get only the first annotation for demo purposes.
ObjectTrackingAnnotation annotation = results.getObjectAnnotations(0);
System.out.println("Confidence: " + annotation.getConfidence());

if (annotation.hasEntity()) {
Entity entity = annotation.getEntity();
System.out.println("Entity description: " + entity.getDescription());
System.out.println("Entity id:: " + entity.getEntityId());
}

if (annotation.hasSegment()) {
VideoSegment videoSegment = annotation.getSegment();
Duration startTimeOffset = videoSegment.getStartTimeOffset();
Duration endTimeOffset = videoSegment.getEndTimeOffset();
// Display the segment time in seconds, 1e9 converts nanos to seconds
System.out.println(String.format(
"Segment: %.2fs to %.2fs",
startTimeOffset.getSeconds() + startTimeOffset.getNanos() / 1e9,
endTimeOffset.getSeconds() + endTimeOffset.getNanos() / 1e9));
}

// Here we print only the bounding box of the first frame in this segment.
ObjectTrackingFrame frame = annotation.getFrames(0);
// Display the offset time in seconds, 1e9 converts nanos to seconds
Duration timeOffset = frame.getTimeOffset();
System.out.println(String.format(
"Time offset of the first frame: %.2fs",
timeOffset.getSeconds() + timeOffset.getNanos() / 1e9));

// Display the bounding box of the detected object
NormalizedBoundingBox normalizedBoundingBox = frame.getNormalizedBoundingBox();
System.out.println("Bounding box position:");
System.out.println("\tleft: " + normalizedBoundingBox.getLeft());
System.out.println("\ttop: " + normalizedBoundingBox.getTop());
System.out.println("\tright: " + normalizedBoundingBox.getRight());
System.out.println("\tbottom: " + normalizedBoundingBox.getBottom());
return results;
}
}
// [END video_object_tracking_gcs_beta]
}

Loading