diff --git a/perfkitbenchmarker/linux_benchmarks/cloud_bigtable_ycsb_benchmark.py b/perfkitbenchmarker/linux_benchmarks/cloud_bigtable_ycsb_benchmark.py index 26acdd0897..a4f3f88e51 100644 --- a/perfkitbenchmarker/linux_benchmarks/cloud_bigtable_ycsb_benchmark.py +++ b/perfkitbenchmarker/linux_benchmarks/cloud_bigtable_ycsb_benchmark.py @@ -298,11 +298,10 @@ def _GetCpuUtilizationSample( # pylint: disable=g-import-not-at-top from google.cloud import monitoring_v3 from google.cloud.monitoring_v3 import query - from google.cloud.monitoring_v3.gapic.transports import metric_service_grpc_transport client = monitoring_v3.MetricServiceClient( - transport=metric_service_grpc_transport.MetricServiceGrpcTransport( - address=_MONITORING_ADDRESS.value + transport=monitoring_v3.services.metric_service.transports.grpc.MetricServiceGrpcTransport( + host=_MONITORING_ADDRESS.value ) ) diff --git a/perfkitbenchmarker/providers/gcp/gcp_cloud_redis.py b/perfkitbenchmarker/providers/gcp/gcp_cloud_redis.py index 341fecbff4..6106861119 100644 --- a/perfkitbenchmarker/providers/gcp/gcp_cloud_redis.py +++ b/perfkitbenchmarker/providers/gcp/gcp_cloud_redis.py @@ -23,7 +23,6 @@ from absl import flags from google.cloud import monitoring_v3 -from google.cloud.monitoring_v3.types import TimeInterval from perfkitbenchmarker import errors from perfkitbenchmarker import managed_memory_store from perfkitbenchmarker import provider_info @@ -240,7 +239,7 @@ def MeasureCpuUtilization(self, interval_length): # pytype: disable=signature-m """Measure the average CPU utilization on GCP instance in percentage.""" now = time.time() seconds = int(now) - interval = TimeInterval() + interval = monitoring_v3.TimeInterval() interval.end_time.seconds = seconds interval.start_time.seconds = seconds - interval_length client = monitoring_v3.MetricServiceClient() @@ -260,7 +259,7 @@ def MeasureCpuUtilization(self, interval_length): # pytype: disable=signature-m name='projects/' + self.project, filter_=api_filter, interval=interval, - view=monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, + view=monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, ) return self._ParseMonitoringTimeSeries(time_series) diff --git a/perfkitbenchmarker/providers/gcp/gcp_dpb_dataflow.py b/perfkitbenchmarker/providers/gcp/gcp_dpb_dataflow.py index d6cf0cb56c..047736bc25 100644 --- a/perfkitbenchmarker/providers/gcp/gcp_dpb_dataflow.py +++ b/perfkitbenchmarker/providers/gcp/gcp_dpb_dataflow.py @@ -431,10 +431,8 @@ def GetAvgCpuUtilization( # Shift TZ of datetime arguments since FromDatetime() assumes UTC # See # https://googleapis.dev/python/protobuf/latest/google/protobuf/timestamp_pb2.html#google.protobuf.timestamp_pb2.Timestamp.FromDatetime - interval.start_time.FromDatetime( - start_time.astimezone(datetime.timezone.utc) - ) - interval.end_time.FromDatetime(end_time.astimezone(datetime.timezone.utc)) + interval.start_time = start_time.astimezone(datetime.timezone.utc) + interval.end_time = end_time.astimezone(datetime.timezone.utc) api_filter = ( 'metric.type = "compute.googleapis.com/instance/cpu/utilization" ' @@ -449,14 +447,16 @@ def GetAvgCpuUtilization( group_by_fields=['resource.instance_id'], ) - results = client.list_time_series( + req = types.ListTimeSeriesRequest( name=project_name, - filter_=api_filter, + filter=api_filter, interval=interval, - view=monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, + view=monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, aggregation=aggregation, ) + results = client.list_time_series(req) + if not results: logging.warning( 'No monitoring data found. Unable to calculate avg CPU utilization.' @@ -502,10 +502,8 @@ def GetMaxOutputThroughput( # Shift TZ of datetime arguments since FromDatetime() assumes UTC # See # https://googleapis.dev/python/protobuf/latest/google/protobuf/timestamp_pb2.html#google.protobuf.timestamp_pb2.Timestamp.FromDatetime - interval.start_time.FromDatetime( - start_time.astimezone(datetime.timezone.utc) - ) - interval.end_time.FromDatetime(end_time.astimezone(datetime.timezone.utc)) + interval.start_time = start_time.astimezone(datetime.timezone.utc) + interval.end_time = end_time.astimezone(datetime.timezone.utc) api_filter = ( 'metric.type = "dataflow.googleapis.com/job/elements_produced_count" ' @@ -519,14 +517,16 @@ def GetMaxOutputThroughput( per_series_aligner=types.Aggregation.Aligner.ALIGN_RATE, ) - results = client.list_time_series( + req = types.ListTimeSeriesRequest( name=project_name, - filter_=api_filter, + filter=api_filter, interval=interval, - view=monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, + view=monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, aggregation=aggregation, ) + results = client.list_time_series(req) + if not results: logging.warning( 'No monitoring data found. Unable to calculate max throughput.' @@ -545,8 +545,8 @@ def GetSubscriptionBacklogSize(self, subscription_name, interval_length=4): delta = datetime.timedelta(minutes=interval_length) interval = types.TimeInterval() - interval.start_time.FromDatetime(now - delta) - interval.end_time.FromDatetime(now) + interval.start_time = now - delta + interval.end_time = now api_filter = ( 'metric.type = "pubsub.googleapis.com/subscription/' @@ -556,9 +556,9 @@ def GetSubscriptionBacklogSize(self, subscription_name, interval_length=4): results = client.list_time_series( name=project_name, - filter_=api_filter, + filter=api_filter, interval=interval, - view=monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, + view=monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, ) return round(self._GetLastValueFromTimeSeries(results), 2) diff --git a/requirements.txt b/requirements.txt index c9570c5b96..fccc9a72dc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,7 +27,7 @@ six>=1.13.0 pywinrm timeout-decorator google-cloud-datastore -google-cloud-monitoring>=0.31.1 +google-cloud-monitoring>=2.0.0 beautifulsoup4 requests seaborn