diff --git a/.gitignore b/.gitignore index 4cba5cd7c9e8..8b2e39a8fbc8 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ coverage-gae.json coverage.xml nosetests.xml python-docs-samples.json +__pycache__ diff --git a/README.md b/README.md index 2ca6d9ee07c0..81b96eaf2289 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,9 @@ For more detailed introduction to a product, check the README in the correspondi ## Testing +The tests in this repository run against live services, therefore, it takes a bit +of configuration to run all of the tests locally. + ### Local setup Before you can run tests locally you must have: @@ -26,7 +29,9 @@ Before you can run tests locally you must have: $ curl https://sdk.cloud.google.com | bash * Most tests require you to have an active, billing-enabled project on the [Google Developers Console](https://console.developers.google.com). + * You will need a set of [Service Account Credentials](https://console.developers.google.com/project/_/apiui/credential) for your project in ``json`` form. + * Set the environment variables appropriately for your project. $ export GOOGLE_APPLICATION_CREDENTIALS=your-service-account-json-file @@ -43,6 +48,17 @@ If you want to run the Google App Engine tests, you will need: $ export GAE_PYTHONPATH=~/google-cloud-sdk/platform/google_appengine +To run the bigquery tests, you'll need to create a bigquery dataset: + +* Create a dataset in your project named `test_dataset`. +* Create a table named `test_table2`, upload ``tests/resources/data.csv`` and give it the following schema: + + Name STRING + Age INTEGER + Weight FLOAT + IsMagic BOOLEAN + + ### Test environments We use [tox](https://tox.readthedocs.org/en/latest/) to configure multiple python environments: diff --git a/bigquery/samples/async_query.py b/bigquery/samples/async_query.py index eba8d28c6099..a0b2f775f0c7 100644 --- a/bigquery/samples/async_query.py +++ b/bigquery/samples/async_query.py @@ -11,14 +11,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from __future__ import print_function # For python 2/3 interoperability - import json import uuid from bigquery.samples.utils import get_service from bigquery.samples.utils import paging from bigquery.samples.utils import poll_job +from six.moves import input # [START async_query] @@ -70,13 +69,13 @@ def run(project_id, query_string, batch, num_retries, interval): # [START main] def main(): - project_id = raw_input("Enter the project ID: ") - query_string = raw_input("Enter the Bigquery SQL Query: ") - batch = raw_input("Run query as batch (y/n)?: ") in ( + project_id = input("Enter the project ID: ") + query_string = input("Enter the Bigquery SQL Query: ") + batch = input("Run query as batch (y/n)?: ") in ( 'True', 'true', 'y', 'Y', 'yes', 'Yes') - num_retries = int(raw_input( + num_retries = int(input( "Enter number of times to retry in case of 500 error: ")) - interval = raw_input( + interval = input( "Enter how often to poll the query for completion (seconds): ") for result in run(project_id, query_string, batch, num_retries, interval): diff --git a/bigquery/samples/export_data_to_cloud_storage.py b/bigquery/samples/export_data_to_cloud_storage.py index 4e2456370952..334a12d4298f 100644 --- a/bigquery/samples/export_data_to_cloud_storage.py +++ b/bigquery/samples/export_data_to_cloud_storage.py @@ -15,6 +15,7 @@ from bigquery.samples.utils import get_service from bigquery.samples.utils import poll_job +from six.moves import input # [START export_table] @@ -82,19 +83,19 @@ def run(cloud_storage_path, # [START main] def main(): - projectId = raw_input("Enter the project ID: ") - datasetId = raw_input("Enter a dataset ID: ") - tableId = raw_input("Enter a table name to copy: ") - cloud_storage_path = raw_input( + projectId = input("Enter the project ID: ") + datasetId = input("Enter a dataset ID: ") + tableId = input("Enter a table name to copy: ") + cloud_storage_path = input( "Enter a Google Cloud Storage URI: ") - interval = raw_input( + interval = input( "Enter how often to poll the job (in seconds): ") - num_retries = raw_input( + num_retries = input( "Enter the number of retries in case of 500 error: ") run(cloud_storage_path, projectId, datasetId, tableId, num_retries, interval) - print 'Done exporting!' + print('Done exporting!') # [END main] diff --git a/bigquery/samples/list_datasets_projects.py b/bigquery/samples/list_datasets_projects.py index eac7e2cf38f8..026bb5530cef 100644 --- a/bigquery/samples/list_datasets_projects.py +++ b/bigquery/samples/list_datasets_projects.py @@ -31,7 +31,8 @@ where is the id of the developers console [3] project you'd like to list the bigquery datasets and projects for. -[1] https://developers.google.com/identity/protocols/application-default-credentials#howtheywork +[1] https://developers.google.com/identity/protocols/\ + application-default-credentials#howtheywork [2] https://cloud.google.com/sdk/ [3] https://console.developers.google.com """ # NOQA @@ -39,11 +40,9 @@ import argparse from pprint import pprint -from urllib2 import HTTPError - from apiclient import discovery - from oauth2client.client import GoogleCredentials +from six.moves.urllib.error import HTTPError # [START list_datasets] diff --git a/bigquery/samples/load_data_by_post.py b/bigquery/samples/load_data_by_post.py index 8ba4b8838270..26b1e2236ffd 100644 --- a/bigquery/samples/load_data_by_post.py +++ b/bigquery/samples/load_data_by_post.py @@ -14,10 +14,9 @@ import json from bigquery.samples.utils import get_service, poll_job - import httplib2 - from oauth2client.client import GoogleCredentials +from six.moves import input # [START make_post] @@ -75,16 +74,16 @@ def make_post(http, schema, data, projectId, datasetId, tableId): def main(): credentials = GoogleCredentials.get_application_default() http = credentials.authorize(httplib2.Http()) - projectId = raw_input('Enter the project ID: ') - datasetId = raw_input('Enter a dataset ID: ') - tableId = raw_input('Enter a table name to load the data to: ') - schema_path = raw_input( + projectId = input('Enter the project ID: ') + datasetId = input('Enter a dataset ID: ') + tableId = input('Enter a table name to load the data to: ') + schema_path = input( 'Enter the path to the schema file for the table: ') with open(schema_path, 'r') as schema_file: schema = schema_file.read() - data_path = raw_input('Enter the path to the data file: ') + data_path = input('Enter the path to the data file: ') with open(data_path, 'r') as data_file: data = data_file.read() diff --git a/bigquery/samples/load_data_from_csv.py b/bigquery/samples/load_data_from_csv.py index a58fc32d9885..b8000785cbba 100644 --- a/bigquery/samples/load_data_from_csv.py +++ b/bigquery/samples/load_data_from_csv.py @@ -15,6 +15,7 @@ import uuid from bigquery.samples.utils import get_service, poll_job +from six.moves import input # [START load_table] @@ -81,20 +82,20 @@ def run(source_schema, source_csv, # [START main] def main(): - projectId = raw_input("Enter the project ID: ") - datasetId = raw_input("Enter a dataset ID: ") - tableId = raw_input("Enter a destination table name: ") + projectId = input("Enter the project ID: ") + datasetId = input("Enter a dataset ID: ") + tableId = input("Enter a destination table name: ") - schema_file_path = raw_input( + schema_file_path = input( "Enter the path to the table schema: ") with open(schema_file_path, 'r') as schema_file: schema = json.load(schema_file) - data_file_path = raw_input( + data_file_path = input( "Enter the Cloud Storage path for the CSV file: ") - num_retries = raw_input( + num_retries = input( "Enter number of times to retry in case of 500 error: ") - interval = raw_input( + interval = input( "Enter how often to poll the query for completion (seconds): ") run(schema, data_file_path, diff --git a/bigquery/samples/streaming.py b/bigquery/samples/streaming.py index dd8de3783fe5..958efcf5f991 100644 --- a/bigquery/samples/streaming.py +++ b/bigquery/samples/streaming.py @@ -11,13 +11,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from __future__ import print_function - import ast import json import uuid from bigquery.samples.utils import get_service +from six.moves import input # [START stream_row_to_bigquery] @@ -57,18 +56,18 @@ def run(project_id, dataset_id, table_id, rows, num_retries): # [START main] def get_rows(): - line = raw_input("Enter a row (python dict) into the table: ") + line = input("Enter a row (python dict) into the table: ") while line: yield ast.literal_eval(line) - line = raw_input("Enter another row into the table \n" + - "[hit enter to stop]: ") + line = input("Enter another row into the table \n" + + "[hit enter to stop]: ") def main(): - project_id = raw_input("Enter the project ID: ") - dataset_id = raw_input("Enter a dataset ID: ") - table_id = raw_input("Enter a table ID : ") - num_retries = int(raw_input( + project_id = input("Enter the project ID: ") + dataset_id = input("Enter a dataset ID: ") + table_id = input("Enter a table ID : ") + num_retries = int(input( "Enter number of times to retry in case of 500 error: ")) for result in run(project_id, dataset_id, table_id, diff --git a/bigquery/samples/sync_query.py b/bigquery/samples/sync_query.py index aab5a3123657..56be93ef2622 100644 --- a/bigquery/samples/sync_query.py +++ b/bigquery/samples/sync_query.py @@ -11,11 +11,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from __future__ import print_function # For python 2/3 interoperability - import json from bigquery.samples.utils import get_service, paging +from six.moves import input # [START sync_query] @@ -49,12 +48,12 @@ def run(project_id, query, timeout, num_retries): # [START main] def main(): - project_id = raw_input("Enter the project ID: ") - query_string = raw_input("Enter the Bigquery SQL Query: ") - timeout = raw_input( + project_id = input("Enter the project ID: ") + query_string = input("Enter the Bigquery SQL Query: ") + timeout = input( "Enter how long to wait for the query to complete in milliseconds" "\n (if longer than 10 seconds, use an asynchronous query): ") - num_retries = int(raw_input( + num_retries = int(input( "Enter how many times to retry in case of server error")) for result in run(project_id, query_string, timeout, num_retries): diff --git a/bigquery/tests/test_async_query.py b/bigquery/tests/test_async_query.py index d2dd58472316..0792cb57072f 100644 --- a/bigquery/tests/test_async_query.py +++ b/bigquery/tests/test_async_query.py @@ -37,7 +37,9 @@ def test_async_query_runner(self): test_project_id = os.environ.get(tests.PROJECT_ID_ENV) answers = [test_project_id, self.constants['query'], 'n', '1', '1'] - with tests.mock_raw_input(answers): + + with tests.mock_input_answers( + answers, target='bigquery.samples.async_query.input'): main() diff --git a/compute/autoscaler/demo/frontend.py b/compute/autoscaler/demo/frontend.py index f985ad404601..e464e9af7856 100644 --- a/compute/autoscaler/demo/frontend.py +++ b/compute/autoscaler/demo/frontend.py @@ -22,10 +22,15 @@ autoscaler/demo/tests/test_frontend.py """ -import BaseHTTPServer +try: + import BaseHTTPServer + import SocketServer +except: + import http.server as BaseHTTPServer + import socketserver as SocketServer + from multiprocessing import Process import os -import SocketServer import sys import time diff --git a/monitoring/samples/auth.py b/monitoring/samples/auth.py index 7e4aa6201f64..2207694af086 100644 --- a/monitoring/samples/auth.py +++ b/monitoring/samples/auth.py @@ -35,7 +35,8 @@ $ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/json-key.json -[1] https://developers.google.com/identity/protocols/application-default-credentials +[1] https://developers.google.com/identity/protocols/\ + application-default-credentials [2] https://console.developers.google.com/project/_/apiui/credential """ # NOQA @@ -62,19 +63,19 @@ def ListTimeseries(project_name, service): timeseries = service.timeseries() - print 'Timeseries.list raw response:' + print('Timeseries.list raw response:') try: response = timeseries.list( project=project_name, metric=METRIC, youngest=YOUNGEST).execute() - print json.dumps(response, + print(json.dumps(response, sort_keys=True, indent=4, - separators=(',', ': ')) + separators=(',', ': '))) except: - print 'Error:' + print('Error:') for error in sys.exc_info(): - print error + print(error) def main(project_name): @@ -87,7 +88,7 @@ def main(project_name): if __name__ == '__main__': if len(sys.argv) != 2: - print "Usage: %s " % sys.argv[0] + print("Usage: {} ".format(sys.argv[0])) sys.exit(1) main(sys.argv[1]) # [END all] diff --git a/storage/compose_objects.py b/storage/compose_objects.py index 9ede81caf1a1..b0619f4738a9 100644 --- a/storage/compose_objects.py +++ b/storage/compose_objects.py @@ -73,8 +73,8 @@ def main(argv): name=filename, bucket=args.bucket) resp = req.execute() - print '> Uploaded source file %s' % filename - print json.dumps(resp, indent=2) + print('> Uploaded source file {}'.format(filename)) + print(json.dumps(resp, indent=2)) # Construct a request to compose the source files into the destination. compose_req_body = { @@ -88,8 +88,8 @@ def main(argv): destinationObject=args.destination, body=compose_req_body) resp = req.execute() - print '> Composed files into %s' % args.destination - print json.dumps(resp, indent=2) + print('> Composed files into {}'.format(args.destination)) + print(json.dumps(resp, indent=2)) # Download and print the composed object. req = service.objects().get_media( @@ -97,8 +97,8 @@ def main(argv): object=args.destination) res = req.execute() - print '> Composed file contents:' - print res + print('> Composed file contents:') + print(res) if __name__ == '__main__': diff --git a/storage/list_objects.py b/storage/list_objects.py index 231899ee43b5..f5ec4aa5cb1f 100644 --- a/storage/list_objects.py +++ b/storage/list_objects.py @@ -64,7 +64,7 @@ def main(argv): # specified bucket. req = service.buckets().get(bucket=args.bucket) resp = req.execute() - print json.dumps(resp, indent=2) + print(json.dumps(resp, indent=2)) # [END list_bucket] # Create a request to objects.list to retrieve a list of objects. @@ -76,7 +76,7 @@ def main(argv): # automatically handle paging with the pageToken. while req is not None: resp = req.execute() - print json.dumps(resp, indent=2) + print(json.dumps(resp, indent=2)) req = service.objects().list_next(req, resp) if __name__ == '__main__': diff --git a/tests/__init__.py b/tests/__init__.py index 0b10a41ca0d3..2305025918a2 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -11,122 +11,23 @@ # See the License for the specific language governing permissions and # limitations under the License. # -""" -Common testing utilities between samples -""" -import __builtin__ -import contextlib -import json -import os -import StringIO -import sys -import tempfile -import unittest - -from nose.plugins.skip import SkipTest - -try: - APPENGINE_AVAILABLE = True - from google.appengine.datastore import datastore_stub_util - from google.appengine.ext import testbed -except ImportError: - APPENGINE_AVAILABLE = False - -BUCKET_NAME_ENV = 'TEST_BUCKET_NAME' -PROJECT_ID_ENV = 'TEST_PROJECT_ID' -RESOURCE_PATH = os.path.join( - os.path.abspath(os.path.dirname(__file__)), 'resources') - - -class mock_raw_input(object): - - def __init__(self, list_): - self.i = 0 - self.list_ = list_ - - def get_next_value(self, question): - ret = self.list_[self.i] - self.i += 1 - return ret - - def __enter__(self): - self.raw_input_cache = __builtin__.raw_input - __builtin__.raw_input = self.get_next_value - - def __exit__(self, exc_type, exc_value, traceback): - __builtin__.raw_input = self.raw_input_cache - - -class CloudBaseTest(unittest.TestCase): - - def setUp(self): - self.resource_path = RESOURCE_PATH - - # A hack to prevent get_application_default from going GAE route. - self._server_software_org = os.environ.get('SERVER_SOFTWARE') - os.environ['SERVER_SOFTWARE'] = '' - - # Constants from environment - test_bucket_name = os.environ.get(BUCKET_NAME_ENV, '') - test_project_id = os.environ.get(PROJECT_ID_ENV, '') - if not test_project_id or not test_bucket_name: - raise Exception('You need to define an env var "%s" and "%s" to ' - 'run the test.' - % (PROJECT_ID_ENV, BUCKET_NAME_ENV)) - - # Constants from resources/constants.json - with open( - os.path.join(RESOURCE_PATH, 'constants.json'), - 'r') as constants_file: - - self.constants = json.load(constants_file) - self.constants['projectId'] = test_project_id - self.constants['bucketName'] = test_bucket_name - self.constants['cloudStorageInputURI'] = ( - self.constants['cloudStorageInputURI'] % test_bucket_name) - self.constants['cloudStorageOutputURI'] = ( - self.constants['cloudStorageOutputURI'] % test_bucket_name) - - def tearDown(self): - if self._server_software_org: - os.environ['SERVER_SOFTWARE'] = self._server_software_org - - -class DatastoreTestbedCase(unittest.TestCase): - """A base test case for common setup/teardown tasks for test.""" - def setUp(self): - if not APPENGINE_AVAILABLE: - raise SkipTest() - - """Setup the datastore and memcache stub.""" - # First, create an instance of the Testbed class. - self.testbed = testbed.Testbed() - # Then activate the testbed, which prepares the service stubs for - # use. - self.testbed.activate() - # Create a consistency policy that will simulate the High - # Replication consistency model. - self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy( - probability=0) - # Initialize the datastore stub with this policy. - self.testbed.init_datastore_v3_stub( - datastore_file=tempfile.mkstemp()[1], - consistency_policy=self.policy) - self.testbed.init_memcache_stub() - - def tearDown(self): - self.testbed.deactivate() - - -@contextlib.contextmanager -def capture_stdout(): - """Capture stdout.""" - fake_stdout = StringIO.StringIO() - old_stdout = sys.stdout - - try: - sys.stdout = fake_stdout - yield fake_stdout - finally: - sys.stdout = old_stdout +from .utils import ( + BUCKET_NAME_ENV, + capture_stdout, + CloudBaseTest, + DatastoreTestbedCase, + mock_input_answers, + PROJECT_ID_ENV, + RESOURCE_PATH) + + +__all__ = [ + 'BUCKET_NAME_ENV', + 'capture_stdout', + 'CloudBaseTest', + 'DatastoreTestbedCase', + 'mock_input_answers', + 'PROJECT_ID_ENV', + 'RESOURCE_PATH' +] diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 000000000000..4006262afe4f --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,135 @@ +# Copyright 2015, Google, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Common testing utilities between samples +""" + +import contextlib +import json +import os +import sys +import tempfile +import unittest + +from mock import patch +from nose.plugins.skip import SkipTest +from six.moves import cStringIO + +try: + APPENGINE_AVAILABLE = True + from google.appengine.datastore import datastore_stub_util + from google.appengine.ext import testbed +except ImportError: + APPENGINE_AVAILABLE = False + +BUCKET_NAME_ENV = 'TEST_BUCKET_NAME' +PROJECT_ID_ENV = 'TEST_PROJECT_ID' +RESOURCE_PATH = os.path.join( + os.path.abspath(os.path.dirname(__file__)), 'resources') + + +# TODO: This can be written as a much simpler context manager. +class mock_input_answers(object): + + def __init__(self, list_, target): + self.i = 0 + self.list_ = list_ + self.target = target + + def get_next_value(self, question): + ret = self.list_[self.i] + self.i += 1 + print('Responding to {} with {}'.format(question, ret)) + return u"{}".format(ret) + + def __enter__(self): + self.patch = patch(self.target, self.get_next_value) + self.patch.__enter__() + + def __exit__(self, exc_type, exc_value, traceback): + self.patch.__exit__(exc_type, exc_value, traceback) + + +class CloudBaseTest(unittest.TestCase): + + def setUp(self): + self.resource_path = RESOURCE_PATH + + # A hack to prevent get_application_default from going GAE route. + self._server_software_org = os.environ.get('SERVER_SOFTWARE') + os.environ['SERVER_SOFTWARE'] = '' + + # Constants from environment + test_bucket_name = os.environ.get(BUCKET_NAME_ENV, '') + test_project_id = os.environ.get(PROJECT_ID_ENV, '') + if not test_project_id or not test_bucket_name: + raise Exception('You need to define an env var "%s" and "%s" to ' + 'run the test.' + % (PROJECT_ID_ENV, BUCKET_NAME_ENV)) + + # Constants from resources/constants.json + with open( + os.path.join(RESOURCE_PATH, 'constants.json'), + 'r') as constants_file: + + self.constants = json.load(constants_file) + self.constants['projectId'] = test_project_id + self.constants['bucketName'] = test_bucket_name + self.constants['cloudStorageInputURI'] = ( + self.constants['cloudStorageInputURI'] % test_bucket_name) + self.constants['cloudStorageOutputURI'] = ( + self.constants['cloudStorageOutputURI'] % test_bucket_name) + + def tearDown(self): + if self._server_software_org: + os.environ['SERVER_SOFTWARE'] = self._server_software_org + + +class DatastoreTestbedCase(unittest.TestCase): + """A base test case for common setup/teardown tasks for test.""" + def setUp(self): + if not APPENGINE_AVAILABLE: + raise SkipTest() + + """Setup the datastore and memcache stub.""" + # First, create an instance of the Testbed class. + self.testbed = testbed.Testbed() + # Then activate the testbed, which prepares the service stubs for + # use. + self.testbed.activate() + # Create a consistency policy that will simulate the High + # Replication consistency model. + self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy( + probability=0) + # Initialize the datastore stub with this policy. + self.testbed.init_datastore_v3_stub( + datastore_file=tempfile.mkstemp()[1], + consistency_policy=self.policy) + self.testbed.init_memcache_stub() + + def tearDown(self): + self.testbed.deactivate() + + +@contextlib.contextmanager +def capture_stdout(): + """Capture stdout.""" + fake_stdout = cStringIO() + old_stdout = sys.stdout + + try: + sys.stdout = fake_stdout + yield fake_stdout + finally: + sys.stdout = old_stdout diff --git a/tox.ini b/tox.ini index cf07060946e8..f04e19d81685 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] skipsdist = True -envlist = gae, py27, pep8 +envlist = gae, py27, py34, pep8 [testenv] passenv = PYTHONPATH GOOGLE_* GCLOUD_* TEST_* TRAVIS* @@ -49,6 +49,21 @@ commands = {[testenv]coverargs} \ {posargs} +[testenv:py34] +basepython = python3.4 +deps = + {[testenv]deps} + gcloud +commands = + nosetests \ + --exclude-dir=bigquery/tests/appengine \ + --exclude-dir=bigquery/samples/appengine_auth \ + --exclude-dir=appengine \ + --exclude-dir=datastore/ndb \ + --exclude-dir=localtesting \ + {[testenv]coverargs} \ + {posargs} + [testenv:pep8] deps = flake8