diff --git a/PKG-INFO b/PKG-INFO index c094888..081352a 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,10 +1,10 @@ Metadata-Version: 2.1 Name: tuneinsight -Version: 0.6.0 -Summary: Diapason is the official Python SDK for the Tune Insight API. Version 0.6.0 targets the API v0.7.8. +Version: 0.6.2 +Summary: Diapason is the official Python SDK for the Tune Insight API. Version 0.6.2 targets the API v0.8.0. License: Apache-2.0 Author: Tune Insight SA -Requires-Python: >=3.7.1 +Requires-Python: >=3.8,<3.12 Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 @@ -13,20 +13,16 @@ Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Dist: PyYAML (>=6.0,<7.0) Requires-Dist: attrs (>=21.3.0) +Requires-Dist: black (>=24.1.0,<25.0.0) Requires-Dist: certifi (>=2023.7.22,<2024.0.0) -Requires-Dist: docker (>=6.0.1,<7.0.0) Requires-Dist: httpx (>=0.15.4,<0.24.0) Requires-Dist: matplotlib (>=3.5.0,<4.0.0) Requires-Dist: notebook (>=6.4.11,<7.0.0) Requires-Dist: pandas (>=1.3.5,<2.0.0) -Requires-Dist: pylint (>=2.13.2,<3.0.0) Requires-Dist: python-dateutil (>=2.8.0,<3.0.0) Requires-Dist: python-dotenv (>=0.21.0,<0.22.0) Requires-Dist: python-keycloak (>=0.27.0,<0.28.0) -Requires-Dist: pyvcf3 (>=1.0.3,<2.0.0) -Requires-Dist: selenium (>=4.9.1,<5.0.0) Requires-Dist: typing-extensions (>=4.6.3,<5.0.0) -Requires-Dist: wheel (>=0.38.1,<0.39.0) Description-Content-Type: text/markdown # Tune Insight Python SDK diff --git a/pyproject.toml b/pyproject.toml index aa30999..dc587b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "tuneinsight" -version = "0.6.0" -description = "Diapason is the official Python SDK for the Tune Insight API. Version 0.6.0 targets the API v0.7.8." +version = "0.6.2" +description = "Diapason is the official Python SDK for the Tune Insight API. Version 0.6.2 targets the API v0.8.0." authors = ["Tune Insight SA"] license = "Apache-2.0" include = [ @@ -12,28 +12,37 @@ include = [ readme = "src/tuneinsight/README.md" [tool.poetry.dependencies] -python = ">= 3.7.1" +python = ">= 3.8,<3.12" python-keycloak = "^0.27.0" pandas = "^1.3.5" PyYAML = "^6.0" -#cloudpickle = "^2.0.0" // TODO: to add in the future to run python on backend -wheel = "^0.38.1" -pylint = "^2.13.2" -docker = "^6.0.1" notebook = "^6.4.11" python-dotenv = "^0.21.0" -pyvcf3 = "^1.0.3" python-dateutil = "^2.8.0" matplotlib = "^3.5.0" -selenium = "^4.9.1" typing-extensions = "^4.6.3" # Required by ge_co_rest_api httpx = ">=0.15.4,<0.24.0" attrs = ">=21.3.0" certifi = "^2023.7.22" +black = "^24.1.0" +[tool.poetry.group.dev.dependencies] +selenium = "^4.9.1" +wheel = "^0.38.1" +docker = "^6.0.1" +pylint = "^2.13.2" +pyvcf3 = "^1.0.3" # For GWAS .vcf file parsing [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" + +[tool.black] +include = '\.pyi?$' +extend-exclude=''' +( + src/tuneinsight/api +) +''' diff --git a/src/tuneinsight/api/sdk/api/api_computations/compute.py b/src/tuneinsight/api/sdk/api/api_computations/compute.py index 6cea4a3..0992888 100644 --- a/src/tuneinsight/api/sdk/api/api_computations/compute.py +++ b/src/tuneinsight/api/sdk/api/api_computations/compute.py @@ -14,6 +14,7 @@ from ...models.distributed_join import DistributedJoin from ...models.dummy import Dummy from ...models.encrypted_aggregation import EncryptedAggregation +from ...models.encrypted_mean import EncryptedMean from ...models.encrypted_prediction import EncryptedPrediction from ...models.encrypted_regression import EncryptedRegression from ...models.error import Error @@ -45,6 +46,7 @@ def _get_kwargs( "DistributedJoin", "Dummy", "EncryptedAggregation", + "EncryptedMean", "EncryptedPrediction", "EncryptedRegression", "GWAS", @@ -135,6 +137,9 @@ def _get_kwargs( elif isinstance(json_body, PrivateSearch): json_json_body = json_body.to_dict() + elif isinstance(json_body, PrivateSearchSetup): + json_json_body = json_body.to_dict() + else: json_json_body = json_body.to_dict() @@ -196,6 +201,7 @@ def sync_detailed( "DistributedJoin", "Dummy", "EncryptedAggregation", + "EncryptedMean", "EncryptedPrediction", "EncryptedRegression", "GWAS", @@ -218,9 +224,9 @@ def sync_detailed( Args: json_body (Union['AggregatedDatasetLength', 'Bootstrap', 'CollectiveKeyGen', 'CollectiveKeySwitch', 'DatasetStatistics', 'DistributedJoin', 'Dummy', - 'EncryptedAggregation', 'EncryptedPrediction', 'EncryptedRegression', 'GWAS', 'HybridFL', - 'KeySwitchedComputation', 'PrivateSearch', 'PrivateSearchSetup', 'RelinKeyGen', - 'RotKeyGen', 'SampleExtraction', 'SetIntersection', 'SetupSession', + 'EncryptedAggregation', 'EncryptedMean', 'EncryptedPrediction', 'EncryptedRegression', + 'GWAS', 'HybridFL', 'KeySwitchedComputation', 'PrivateSearch', 'PrivateSearchSetup', + 'RelinKeyGen', 'RotKeyGen', 'SampleExtraction', 'SetIntersection', 'SetupSession', 'StatisticalAggregation', 'SurvivalAggregation', 'VBinnedAggregation']): Raises: @@ -256,6 +262,7 @@ def sync( "DistributedJoin", "Dummy", "EncryptedAggregation", + "EncryptedMean", "EncryptedPrediction", "EncryptedRegression", "GWAS", @@ -278,9 +285,9 @@ def sync( Args: json_body (Union['AggregatedDatasetLength', 'Bootstrap', 'CollectiveKeyGen', 'CollectiveKeySwitch', 'DatasetStatistics', 'DistributedJoin', 'Dummy', - 'EncryptedAggregation', 'EncryptedPrediction', 'EncryptedRegression', 'GWAS', 'HybridFL', - 'KeySwitchedComputation', 'PrivateSearch', 'PrivateSearchSetup', 'RelinKeyGen', - 'RotKeyGen', 'SampleExtraction', 'SetIntersection', 'SetupSession', + 'EncryptedAggregation', 'EncryptedMean', 'EncryptedPrediction', 'EncryptedRegression', + 'GWAS', 'HybridFL', 'KeySwitchedComputation', 'PrivateSearch', 'PrivateSearchSetup', + 'RelinKeyGen', 'RotKeyGen', 'SampleExtraction', 'SetIntersection', 'SetupSession', 'StatisticalAggregation', 'SurvivalAggregation', 'VBinnedAggregation']): Raises: @@ -309,6 +316,7 @@ async def asyncio_detailed( "DistributedJoin", "Dummy", "EncryptedAggregation", + "EncryptedMean", "EncryptedPrediction", "EncryptedRegression", "GWAS", @@ -331,9 +339,9 @@ async def asyncio_detailed( Args: json_body (Union['AggregatedDatasetLength', 'Bootstrap', 'CollectiveKeyGen', 'CollectiveKeySwitch', 'DatasetStatistics', 'DistributedJoin', 'Dummy', - 'EncryptedAggregation', 'EncryptedPrediction', 'EncryptedRegression', 'GWAS', 'HybridFL', - 'KeySwitchedComputation', 'PrivateSearch', 'PrivateSearchSetup', 'RelinKeyGen', - 'RotKeyGen', 'SampleExtraction', 'SetIntersection', 'SetupSession', + 'EncryptedAggregation', 'EncryptedMean', 'EncryptedPrediction', 'EncryptedRegression', + 'GWAS', 'HybridFL', 'KeySwitchedComputation', 'PrivateSearch', 'PrivateSearchSetup', + 'RelinKeyGen', 'RotKeyGen', 'SampleExtraction', 'SetIntersection', 'SetupSession', 'StatisticalAggregation', 'SurvivalAggregation', 'VBinnedAggregation']): Raises: @@ -367,6 +375,7 @@ async def asyncio( "DistributedJoin", "Dummy", "EncryptedAggregation", + "EncryptedMean", "EncryptedPrediction", "EncryptedRegression", "GWAS", @@ -389,9 +398,9 @@ async def asyncio( Args: json_body (Union['AggregatedDatasetLength', 'Bootstrap', 'CollectiveKeyGen', 'CollectiveKeySwitch', 'DatasetStatistics', 'DistributedJoin', 'Dummy', - 'EncryptedAggregation', 'EncryptedPrediction', 'EncryptedRegression', 'GWAS', 'HybridFL', - 'KeySwitchedComputation', 'PrivateSearch', 'PrivateSearchSetup', 'RelinKeyGen', - 'RotKeyGen', 'SampleExtraction', 'SetIntersection', 'SetupSession', + 'EncryptedAggregation', 'EncryptedMean', 'EncryptedPrediction', 'EncryptedRegression', + 'GWAS', 'HybridFL', 'KeySwitchedComputation', 'PrivateSearch', 'PrivateSearchSetup', + 'RelinKeyGen', 'RotKeyGen', 'SampleExtraction', 'SetIntersection', 'SetupSession', 'StatisticalAggregation', 'SurvivalAggregation', 'VBinnedAggregation']): Raises: diff --git a/src/tuneinsight/api/sdk/api/api_computations/get_computation_list.py b/src/tuneinsight/api/sdk/api/api_computations/get_computation_list.py index bb6a33b..9db8d9d 100644 --- a/src/tuneinsight/api/sdk/api/api_computations/get_computation_list.py +++ b/src/tuneinsight/api/sdk/api/api_computations/get_computation_list.py @@ -1,11 +1,11 @@ from http import HTTPStatus -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, Optional, Union import httpx from ... import errors from ...client import Client -from ...models.computation import Computation +from ...models.computation_list_response import ComputationListResponse from ...models.error import Error from ...models.get_computation_list_order import GetComputationListOrder from ...models.get_computation_list_sort_by import GetComputationListSortBy @@ -15,10 +15,14 @@ def _get_kwargs( *, client: Client, - limit: Union[Unset, None, int] = 50, - sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + page: Union[Unset, None, int] = 1, + per_page: Union[Unset, None, int] = 30, + with_total: Union[Unset, None, bool] = True, order: Union[Unset, None, GetComputationListOrder] = UNSET, + sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + limit: Union[Unset, None, int] = 50, show_non_visible: Union[Unset, None, bool] = False, + project_id: Union[Unset, None, str] = UNSET, ) -> Dict[str, Any]: url = "{}/computations".format(client.base_url) @@ -26,13 +30,11 @@ def _get_kwargs( cookies: Dict[str, Any] = client.get_cookies() params: Dict[str, Any] = {} - params["limit"] = limit + params["page"] = page - json_sort_by: Union[Unset, None, str] = UNSET - if not isinstance(sort_by, Unset): - json_sort_by = sort_by.value if sort_by else None + params["perPage"] = per_page - params["sortBy"] = json_sort_by + params["withTotal"] = with_total json_order: Union[Unset, None, str] = UNSET if not isinstance(order, Unset): @@ -40,8 +42,18 @@ def _get_kwargs( params["order"] = json_order + json_sort_by: Union[Unset, None, str] = UNSET + if not isinstance(sort_by, Unset): + json_sort_by = sort_by.value if sort_by else None + + params["sortBy"] = json_sort_by + + params["limit"] = limit + params["showNonVisible"] = show_non_visible + params["projectId"] = project_id + params = {k: v for k, v in params.items() if v is not UNSET and v is not None} return { @@ -54,14 +66,9 @@ def _get_kwargs( } -def _parse_response(*, client: Client, response: httpx.Response) -> Optional[Union[Error, List["Computation"]]]: +def _parse_response(*, client: Client, response: httpx.Response) -> Optional[Union[ComputationListResponse, Error]]: if response.status_code == HTTPStatus.OK: - response_200 = [] - _response_200 = response.json() - for response_200_item_data in _response_200: - response_200_item = Computation.from_dict(response_200_item_data) - - response_200.append(response_200_item) + response_200 = ComputationListResponse.from_dict(response.json()) return response_200 if response.status_code == HTTPStatus.FORBIDDEN: @@ -78,7 +85,7 @@ def _parse_response(*, client: Client, response: httpx.Response) -> Optional[Uni return None -def _build_response(*, client: Client, response: httpx.Response) -> Response[Union[Error, List["Computation"]]]: +def _build_response(*, client: Client, response: httpx.Response) -> Response[Union[ComputationListResponse, Error]]: return Response( status_code=HTTPStatus(response.status_code), content=response.content, @@ -90,33 +97,45 @@ def _build_response(*, client: Client, response: httpx.Response) -> Response[Uni def sync_detailed( *, client: Client, - limit: Union[Unset, None, int] = 50, - sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + page: Union[Unset, None, int] = 1, + per_page: Union[Unset, None, int] = 30, + with_total: Union[Unset, None, bool] = True, order: Union[Unset, None, GetComputationListOrder] = UNSET, + sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + limit: Union[Unset, None, int] = 50, show_non_visible: Union[Unset, None, bool] = False, -) -> Response[Union[Error, List["Computation"]]]: + project_id: Union[Unset, None, str] = UNSET, +) -> Response[Union[ComputationListResponse, Error]]: """Get list of computations currently or previously running. Args: - limit (Union[Unset, None, int]): Default: 50. - sort_by (Union[Unset, None, GetComputationListSortBy]): + page (Union[Unset, None, int]): Default: 1. + per_page (Union[Unset, None, int]): Default: 30. + with_total (Union[Unset, None, bool]): Default: True. order (Union[Unset, None, GetComputationListOrder]): + sort_by (Union[Unset, None, GetComputationListSortBy]): + limit (Union[Unset, None, int]): Default: 50. show_non_visible (Union[Unset, None, bool]): + project_id (Union[Unset, None, str]): Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Error, List['Computation']]] + Response[Union[ComputationListResponse, Error]] """ kwargs = _get_kwargs( client=client, - limit=limit, - sort_by=sort_by, + page=page, + per_page=per_page, + with_total=with_total, order=order, + sort_by=sort_by, + limit=limit, show_non_visible=show_non_visible, + project_id=project_id, ) response = httpx.request( @@ -130,66 +149,90 @@ def sync_detailed( def sync( *, client: Client, - limit: Union[Unset, None, int] = 50, - sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + page: Union[Unset, None, int] = 1, + per_page: Union[Unset, None, int] = 30, + with_total: Union[Unset, None, bool] = True, order: Union[Unset, None, GetComputationListOrder] = UNSET, + sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + limit: Union[Unset, None, int] = 50, show_non_visible: Union[Unset, None, bool] = False, -) -> Optional[Union[Error, List["Computation"]]]: + project_id: Union[Unset, None, str] = UNSET, +) -> Optional[Union[ComputationListResponse, Error]]: """Get list of computations currently or previously running. Args: - limit (Union[Unset, None, int]): Default: 50. - sort_by (Union[Unset, None, GetComputationListSortBy]): + page (Union[Unset, None, int]): Default: 1. + per_page (Union[Unset, None, int]): Default: 30. + with_total (Union[Unset, None, bool]): Default: True. order (Union[Unset, None, GetComputationListOrder]): + sort_by (Union[Unset, None, GetComputationListSortBy]): + limit (Union[Unset, None, int]): Default: 50. show_non_visible (Union[Unset, None, bool]): + project_id (Union[Unset, None, str]): Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Error, List['Computation']]] + Response[Union[ComputationListResponse, Error]] """ return sync_detailed( client=client, - limit=limit, - sort_by=sort_by, + page=page, + per_page=per_page, + with_total=with_total, order=order, + sort_by=sort_by, + limit=limit, show_non_visible=show_non_visible, + project_id=project_id, ).parsed async def asyncio_detailed( *, client: Client, - limit: Union[Unset, None, int] = 50, - sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + page: Union[Unset, None, int] = 1, + per_page: Union[Unset, None, int] = 30, + with_total: Union[Unset, None, bool] = True, order: Union[Unset, None, GetComputationListOrder] = UNSET, + sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + limit: Union[Unset, None, int] = 50, show_non_visible: Union[Unset, None, bool] = False, -) -> Response[Union[Error, List["Computation"]]]: + project_id: Union[Unset, None, str] = UNSET, +) -> Response[Union[ComputationListResponse, Error]]: """Get list of computations currently or previously running. Args: - limit (Union[Unset, None, int]): Default: 50. - sort_by (Union[Unset, None, GetComputationListSortBy]): + page (Union[Unset, None, int]): Default: 1. + per_page (Union[Unset, None, int]): Default: 30. + with_total (Union[Unset, None, bool]): Default: True. order (Union[Unset, None, GetComputationListOrder]): + sort_by (Union[Unset, None, GetComputationListSortBy]): + limit (Union[Unset, None, int]): Default: 50. show_non_visible (Union[Unset, None, bool]): + project_id (Union[Unset, None, str]): Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Error, List['Computation']]] + Response[Union[ComputationListResponse, Error]] """ kwargs = _get_kwargs( client=client, - limit=limit, - sort_by=sort_by, + page=page, + per_page=per_page, + with_total=with_total, order=order, + sort_by=sort_by, + limit=limit, show_non_visible=show_non_visible, + project_id=project_id, ) async with httpx.AsyncClient(verify=client.verify_ssl) as _client: @@ -201,33 +244,45 @@ async def asyncio_detailed( async def asyncio( *, client: Client, - limit: Union[Unset, None, int] = 50, - sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + page: Union[Unset, None, int] = 1, + per_page: Union[Unset, None, int] = 30, + with_total: Union[Unset, None, bool] = True, order: Union[Unset, None, GetComputationListOrder] = UNSET, + sort_by: Union[Unset, None, GetComputationListSortBy] = UNSET, + limit: Union[Unset, None, int] = 50, show_non_visible: Union[Unset, None, bool] = False, -) -> Optional[Union[Error, List["Computation"]]]: + project_id: Union[Unset, None, str] = UNSET, +) -> Optional[Union[ComputationListResponse, Error]]: """Get list of computations currently or previously running. Args: - limit (Union[Unset, None, int]): Default: 50. - sort_by (Union[Unset, None, GetComputationListSortBy]): + page (Union[Unset, None, int]): Default: 1. + per_page (Union[Unset, None, int]): Default: 30. + with_total (Union[Unset, None, bool]): Default: True. order (Union[Unset, None, GetComputationListOrder]): + sort_by (Union[Unset, None, GetComputationListSortBy]): + limit (Union[Unset, None, int]): Default: 50. show_non_visible (Union[Unset, None, bool]): + project_id (Union[Unset, None, str]): Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Error, List['Computation']]] + Response[Union[ComputationListResponse, Error]] """ return ( await asyncio_detailed( client=client, - limit=limit, - sort_by=sort_by, + page=page, + per_page=per_page, + with_total=with_total, order=order, + sort_by=sort_by, + limit=limit, show_non_visible=show_non_visible, + project_id=project_id, ) ).parsed diff --git a/src/tuneinsight/api/sdk/api/api_computations/get_result_list.py b/src/tuneinsight/api/sdk/api/api_computations/get_result_list.py index ca28b19..8cf5f9e 100644 --- a/src/tuneinsight/api/sdk/api/api_computations/get_result_list.py +++ b/src/tuneinsight/api/sdk/api/api_computations/get_result_list.py @@ -16,6 +16,7 @@ def _get_kwargs( *, client: Client, project_id: Union[Unset, None, str] = UNSET, + session_id: Union[Unset, None, str] = UNSET, owned: Union[Unset, None, bool] = UNSET, tagged: Union[Unset, None, bool] = UNSET, tags: Union[Unset, None, List[str]] = UNSET, @@ -33,6 +34,8 @@ def _get_kwargs( params: Dict[str, Any] = {} params["projectId"] = project_id + params["sessionId"] = session_id + params["owned"] = owned params["tagged"] = tagged @@ -121,6 +124,7 @@ def sync_detailed( *, client: Client, project_id: Union[Unset, None, str] = UNSET, + session_id: Union[Unset, None, str] = UNSET, owned: Union[Unset, None, bool] = UNSET, tagged: Union[Unset, None, bool] = UNSET, tags: Union[Unset, None, List[str]] = UNSET, @@ -134,6 +138,7 @@ def sync_detailed( Args: project_id (Union[Unset, None, str]): + session_id (Union[Unset, None, str]): owned (Union[Unset, None, bool]): tagged (Union[Unset, None, bool]): tags (Union[Unset, None, List[str]]): @@ -154,6 +159,7 @@ def sync_detailed( kwargs = _get_kwargs( client=client, project_id=project_id, + session_id=session_id, owned=owned, tagged=tagged, tags=tags, @@ -176,6 +182,7 @@ def sync( *, client: Client, project_id: Union[Unset, None, str] = UNSET, + session_id: Union[Unset, None, str] = UNSET, owned: Union[Unset, None, bool] = UNSET, tagged: Union[Unset, None, bool] = UNSET, tags: Union[Unset, None, List[str]] = UNSET, @@ -189,6 +196,7 @@ def sync( Args: project_id (Union[Unset, None, str]): + session_id (Union[Unset, None, str]): owned (Union[Unset, None, bool]): tagged (Union[Unset, None, bool]): tags (Union[Unset, None, List[str]]): @@ -209,6 +217,7 @@ def sync( return sync_detailed( client=client, project_id=project_id, + session_id=session_id, owned=owned, tagged=tagged, tags=tags, @@ -224,6 +233,7 @@ async def asyncio_detailed( *, client: Client, project_id: Union[Unset, None, str] = UNSET, + session_id: Union[Unset, None, str] = UNSET, owned: Union[Unset, None, bool] = UNSET, tagged: Union[Unset, None, bool] = UNSET, tags: Union[Unset, None, List[str]] = UNSET, @@ -237,6 +247,7 @@ async def asyncio_detailed( Args: project_id (Union[Unset, None, str]): + session_id (Union[Unset, None, str]): owned (Union[Unset, None, bool]): tagged (Union[Unset, None, bool]): tags (Union[Unset, None, List[str]]): @@ -257,6 +268,7 @@ async def asyncio_detailed( kwargs = _get_kwargs( client=client, project_id=project_id, + session_id=session_id, owned=owned, tagged=tagged, tags=tags, @@ -277,6 +289,7 @@ async def asyncio( *, client: Client, project_id: Union[Unset, None, str] = UNSET, + session_id: Union[Unset, None, str] = UNSET, owned: Union[Unset, None, bool] = UNSET, tagged: Union[Unset, None, bool] = UNSET, tags: Union[Unset, None, List[str]] = UNSET, @@ -290,6 +303,7 @@ async def asyncio( Args: project_id (Union[Unset, None, str]): + session_id (Union[Unset, None, str]): owned (Union[Unset, None, bool]): tagged (Union[Unset, None, bool]): tags (Union[Unset, None, List[str]]): @@ -311,6 +325,7 @@ async def asyncio( await asyncio_detailed( client=client, project_id=project_id, + session_id=session_id, owned=owned, tagged=tagged, tags=tags, diff --git a/src/tuneinsight/api/sdk/api/api_datagen/__init__.py b/src/tuneinsight/api/sdk/api/api_datagen/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/tuneinsight/api/sdk/api/api_datagen/post_mock_dataset.py b/src/tuneinsight/api/sdk/api/api_datagen/post_mock_dataset.py new file mode 100644 index 0000000..412895b --- /dev/null +++ b/src/tuneinsight/api/sdk/api/api_datagen/post_mock_dataset.py @@ -0,0 +1,241 @@ +from http import HTTPStatus +from typing import Any, Dict, Optional, Union + +import httpx + +from ... import errors +from ...client import Client +from ...models.data_source import DataSource +from ...models.error import Error +from ...models.post_mock_dataset_method import PostMockDatasetMethod +from ...types import UNSET, Response, Unset + + +def _get_kwargs( + *, + client: Client, + json_body: str, + method: PostMockDatasetMethod, + name: Union[Unset, None, str] = UNSET, + numrows: int, + seed: Union[Unset, None, str] = UNSET, +) -> Dict[str, Any]: + url = "{}/mock/dataset".format(client.base_url) + + headers: Dict[str, str] = client.get_headers() + cookies: Dict[str, Any] = client.get_cookies() + + params: Dict[str, Any] = {} + json_method = method.value + + params["method"] = json_method + + params["name"] = name + + params["numrows"] = numrows + + params["seed"] = seed + + params = {k: v for k, v in params.items() if v is not UNSET and v is not None} + + json_json_body = json_body + + return { + "method": "post", + "url": url, + "headers": headers, + "cookies": cookies, + "timeout": client.get_timeout(), + "json": json_json_body, + "params": params, + } + + +def _parse_response(*, client: Client, response: httpx.Response) -> Optional[Union[DataSource, Error]]: + if response.status_code == HTTPStatus.CREATED: + response_201 = DataSource.from_dict(response.json()) + + return response_201 + if response.status_code == HTTPStatus.BAD_REQUEST: + response_400 = Error.from_dict(response.json()) + + return response_400 + if response.status_code == HTTPStatus.FORBIDDEN: + response_403 = Error.from_dict(response.json()) + + return response_403 + if response.status_code == HTTPStatus.INTERNAL_SERVER_ERROR: + response_500 = Error.from_dict(response.json()) + + return response_500 + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(f"Unexpected status code: {response.status_code}") + else: + return None + + +def _build_response(*, client: Client, response: httpx.Response) -> Response[Union[DataSource, Error]]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + *, + client: Client, + json_body: str, + method: PostMockDatasetMethod, + name: Union[Unset, None, str] = UNSET, + numrows: int, + seed: Union[Unset, None, str] = UNSET, +) -> Response[Union[DataSource, Error]]: + """Request the creation of a mock dataset. + + Args: + method (PostMockDatasetMethod): + name (Union[Unset, None, str]): + numrows (int): + seed (Union[Unset, None, str]): + json_body (str): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Union[DataSource, Error]] + """ + + kwargs = _get_kwargs( + client=client, + json_body=json_body, + method=method, + name=name, + numrows=numrows, + seed=seed, + ) + + response = httpx.request( + verify=client.verify_ssl, + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + *, + client: Client, + json_body: str, + method: PostMockDatasetMethod, + name: Union[Unset, None, str] = UNSET, + numrows: int, + seed: Union[Unset, None, str] = UNSET, +) -> Optional[Union[DataSource, Error]]: + """Request the creation of a mock dataset. + + Args: + method (PostMockDatasetMethod): + name (Union[Unset, None, str]): + numrows (int): + seed (Union[Unset, None, str]): + json_body (str): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Union[DataSource, Error]] + """ + + return sync_detailed( + client=client, + json_body=json_body, + method=method, + name=name, + numrows=numrows, + seed=seed, + ).parsed + + +async def asyncio_detailed( + *, + client: Client, + json_body: str, + method: PostMockDatasetMethod, + name: Union[Unset, None, str] = UNSET, + numrows: int, + seed: Union[Unset, None, str] = UNSET, +) -> Response[Union[DataSource, Error]]: + """Request the creation of a mock dataset. + + Args: + method (PostMockDatasetMethod): + name (Union[Unset, None, str]): + numrows (int): + seed (Union[Unset, None, str]): + json_body (str): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Union[DataSource, Error]] + """ + + kwargs = _get_kwargs( + client=client, + json_body=json_body, + method=method, + name=name, + numrows=numrows, + seed=seed, + ) + + async with httpx.AsyncClient(verify=client.verify_ssl) as _client: + response = await _client.request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + *, + client: Client, + json_body: str, + method: PostMockDatasetMethod, + name: Union[Unset, None, str] = UNSET, + numrows: int, + seed: Union[Unset, None, str] = UNSET, +) -> Optional[Union[DataSource, Error]]: + """Request the creation of a mock dataset. + + Args: + method (PostMockDatasetMethod): + name (Union[Unset, None, str]): + numrows (int): + seed (Union[Unset, None, str]): + json_body (str): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Union[DataSource, Error]] + """ + + return ( + await asyncio_detailed( + client=client, + json_body=json_body, + method=method, + name=name, + numrows=numrows, + seed=seed, + ) + ).parsed diff --git a/src/tuneinsight/api/sdk/api/api_network/get_network_metadata.py b/src/tuneinsight/api/sdk/api/api_network/get_network_metadata.py index dbae585..ee57a18 100644 --- a/src/tuneinsight/api/sdk/api/api_network/get_network_metadata.py +++ b/src/tuneinsight/api/sdk/api/api_network/get_network_metadata.py @@ -7,24 +7,31 @@ from ...client import Client from ...models.error import Error from ...models.get_network_metadata_response_200 import GetNetworkMetadataResponse200 -from ...types import Response +from ...types import UNSET, Response, Unset def _get_kwargs( *, client: Client, + force_network_sync: Union[Unset, None, bool] = False, ) -> Dict[str, Any]: url = "{}/network".format(client.base_url) headers: Dict[str, str] = client.get_headers() cookies: Dict[str, Any] = client.get_cookies() + params: Dict[str, Any] = {} + params["forceNetworkSync"] = force_network_sync + + params = {k: v for k, v in params.items() if v is not UNSET and v is not None} + return { "method": "get", "url": url, "headers": headers, "cookies": cookies, "timeout": client.get_timeout(), + "params": params, } @@ -63,9 +70,13 @@ def _build_response( def sync_detailed( *, client: Client, + force_network_sync: Union[Unset, None, bool] = False, ) -> Response[Union[Error, GetNetworkMetadataResponse200]]: """Get network metadata: local instance configuration and nodes of the network + Args: + force_network_sync (Union[Unset, None, bool]): + Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. @@ -76,6 +87,7 @@ def sync_detailed( kwargs = _get_kwargs( client=client, + force_network_sync=force_network_sync, ) response = httpx.request( @@ -89,9 +101,13 @@ def sync_detailed( def sync( *, client: Client, + force_network_sync: Union[Unset, None, bool] = False, ) -> Optional[Union[Error, GetNetworkMetadataResponse200]]: """Get network metadata: local instance configuration and nodes of the network + Args: + force_network_sync (Union[Unset, None, bool]): + Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. @@ -102,15 +118,20 @@ def sync( return sync_detailed( client=client, + force_network_sync=force_network_sync, ).parsed async def asyncio_detailed( *, client: Client, + force_network_sync: Union[Unset, None, bool] = False, ) -> Response[Union[Error, GetNetworkMetadataResponse200]]: """Get network metadata: local instance configuration and nodes of the network + Args: + force_network_sync (Union[Unset, None, bool]): + Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. @@ -121,6 +142,7 @@ async def asyncio_detailed( kwargs = _get_kwargs( client=client, + force_network_sync=force_network_sync, ) async with httpx.AsyncClient(verify=client.verify_ssl) as _client: @@ -132,9 +154,13 @@ async def asyncio_detailed( async def asyncio( *, client: Client, + force_network_sync: Union[Unset, None, bool] = False, ) -> Optional[Union[Error, GetNetworkMetadataResponse200]]: """Get network metadata: local instance configuration and nodes of the network + Args: + force_network_sync (Union[Unset, None, bool]): + Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. @@ -146,5 +172,6 @@ async def asyncio( return ( await asyncio_detailed( client=client, + force_network_sync=force_network_sync, ) ).parsed diff --git a/src/tuneinsight/api/sdk/api/api_project/get_project_status.py b/src/tuneinsight/api/sdk/api/api_project/get_project_status.py index a47f1da..65b5ae0 100644 --- a/src/tuneinsight/api/sdk/api/api_project/get_project_status.py +++ b/src/tuneinsight/api/sdk/api/api_project/get_project_status.py @@ -6,32 +6,39 @@ from ... import errors from ...client import Client from ...models.error import Error -from ...models.participant import Participant -from ...types import Response +from ...models.get_project_status_response_200 import GetProjectStatusResponse200 +from ...types import UNSET, Response, Unset def _get_kwargs( project_id: str, *, client: Client, + remote: Union[Unset, None, bool] = UNSET, ) -> Dict[str, Any]: url = "{}/projects/{projectId}/status".format(client.base_url, projectId=project_id) headers: Dict[str, str] = client.get_headers() cookies: Dict[str, Any] = client.get_cookies() + params: Dict[str, Any] = {} + params["remote"] = remote + + params = {k: v for k, v in params.items() if v is not UNSET and v is not None} + return { "method": "get", "url": url, "headers": headers, "cookies": cookies, "timeout": client.get_timeout(), + "params": params, } -def _parse_response(*, client: Client, response: httpx.Response) -> Optional[Union[Error, Participant]]: +def _parse_response(*, client: Client, response: httpx.Response) -> Optional[Union[Error, GetProjectStatusResponse200]]: if response.status_code == HTTPStatus.OK: - response_200 = Participant.from_dict(response.json()) + response_200 = GetProjectStatusResponse200.from_dict(response.json()) return response_200 if response.status_code == HTTPStatus.FORBIDDEN: @@ -52,7 +59,7 @@ def _parse_response(*, client: Client, response: httpx.Response) -> Optional[Uni return None -def _build_response(*, client: Client, response: httpx.Response) -> Response[Union[Error, Participant]]: +def _build_response(*, client: Client, response: httpx.Response) -> Response[Union[Error, GetProjectStatusResponse200]]: return Response( status_code=HTTPStatus(response.status_code), content=response.content, @@ -65,23 +72,26 @@ def sync_detailed( project_id: str, *, client: Client, -) -> Response[Union[Error, Participant]]: + remote: Union[Unset, None, bool] = UNSET, +) -> Response[Union[Error, GetProjectStatusResponse200]]: """Gets the various statuses of the project Args: project_id (str): + remote (Union[Unset, None, bool]): Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Error, Participant]] + Response[Union[Error, GetProjectStatusResponse200]] """ kwargs = _get_kwargs( project_id=project_id, client=client, + remote=remote, ) response = httpx.request( @@ -96,23 +106,26 @@ def sync( project_id: str, *, client: Client, -) -> Optional[Union[Error, Participant]]: + remote: Union[Unset, None, bool] = UNSET, +) -> Optional[Union[Error, GetProjectStatusResponse200]]: """Gets the various statuses of the project Args: project_id (str): + remote (Union[Unset, None, bool]): Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Error, Participant]] + Response[Union[Error, GetProjectStatusResponse200]] """ return sync_detailed( project_id=project_id, client=client, + remote=remote, ).parsed @@ -120,23 +133,26 @@ async def asyncio_detailed( project_id: str, *, client: Client, -) -> Response[Union[Error, Participant]]: + remote: Union[Unset, None, bool] = UNSET, +) -> Response[Union[Error, GetProjectStatusResponse200]]: """Gets the various statuses of the project Args: project_id (str): + remote (Union[Unset, None, bool]): Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Error, Participant]] + Response[Union[Error, GetProjectStatusResponse200]] """ kwargs = _get_kwargs( project_id=project_id, client=client, + remote=remote, ) async with httpx.AsyncClient(verify=client.verify_ssl) as _client: @@ -149,23 +165,26 @@ async def asyncio( project_id: str, *, client: Client, -) -> Optional[Union[Error, Participant]]: + remote: Union[Unset, None, bool] = UNSET, +) -> Optional[Union[Error, GetProjectStatusResponse200]]: """Gets the various statuses of the project Args: project_id (str): + remote (Union[Unset, None, bool]): Raises: errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. httpx.TimeoutException: If the request takes longer than Client.timeout. Returns: - Response[Union[Error, Participant]] + Response[Union[Error, GetProjectStatusResponse200]] """ return ( await asyncio_detailed( project_id=project_id, client=client, + remote=remote, ) ).parsed diff --git a/src/tuneinsight/api/sdk/models/__init__.py b/src/tuneinsight/api/sdk/models/__init__.py index 0338f46..0e4550f 100644 --- a/src/tuneinsight/api/sdk/models/__init__.py +++ b/src/tuneinsight/api/sdk/models/__init__.py @@ -38,6 +38,7 @@ from .computation_definition import ComputationDefinition from .computation_error import ComputationError from .computation_error_type import ComputationErrorType +from .computation_list_response import ComputationListResponse from .computation_policy import ComputationPolicy from .computation_preprocessing_parameters import ComputationPreprocessingParameters from .computation_preprocessing_parameters_compound_preprocessing import ( @@ -88,6 +89,7 @@ from .enc_vector import EncVector from .enc_vector_type import EncVectorType from .encrypted_aggregation import EncryptedAggregation +from .encrypted_mean import EncryptedMean from .encrypted_prediction import EncryptedPrediction from .encrypted_regression import EncryptedRegression from .encrypted_regression_params import EncryptedRegressionParams @@ -116,6 +118,7 @@ from .get_project_list_order import GetProjectListOrder from .get_project_list_sort_by import GetProjectListSortBy from .get_project_network_status_response_200_item import GetProjectNetworkStatusResponse200Item +from .get_project_status_response_200 import GetProjectStatusResponse200 from .get_query_list_order import GetQueryListOrder from .get_query_list_sort_by import GetQueryListSortBy from .get_result_list_order import GetResultListOrder @@ -139,6 +142,7 @@ from .matching_column import MatchingColumn from .matching_params import MatchingParams from .measurement import Measurement +from .mock_method import MockMethod from .model import Model from .model_definition import ModelDefinition from .model_metadata import ModelMetadata @@ -154,6 +158,7 @@ from .organization_coordinates import OrganizationCoordinates from .paginated_result import PaginatedResult from .participant import Participant +from .participation_status import ParticipationStatus from .phonetic_encoding import PhoneticEncoding from .post_data_object_json_body import PostDataObjectJsonBody from .post_data_source_query_json_body import PostDataSourceQueryJsonBody @@ -161,6 +166,7 @@ PostDataSourceQueryJsonBodyOutputDataObjectsSharedIDs, ) from .post_data_source_query_json_body_parameters import PostDataSourceQueryJsonBodyParameters +from .post_mock_dataset_method import PostMockDatasetMethod from .post_project_data_json_body import PostProjectDataJsonBody from .post_project_data_source_query_json_body import PostProjectDataSourceQueryJsonBody from .post_project_data_source_query_json_body_aggregation_type import PostProjectDataSourceQueryJsonBodyAggregationType @@ -292,6 +298,7 @@ "ComputationDefinition", "ComputationError", "ComputationErrorType", + "ComputationListResponse", "ComputationPolicy", "ComputationPreprocessingParameters", "ComputationPreprocessingParametersCompoundPreprocessing", @@ -338,6 +345,7 @@ "Dummy", "Duration", "EncryptedAggregation", + "EncryptedMean", "EncryptedPrediction", "EncryptedRegression", "EncryptedRegressionParams", @@ -368,6 +376,7 @@ "GetProjectListOrder", "GetProjectListSortBy", "GetProjectNetworkStatusResponse200Item", + "GetProjectStatusResponse200", "GetQueryListOrder", "GetQueryListSortBy", "GetResultListOrder", @@ -391,6 +400,7 @@ "MatchingColumn", "MatchingParams", "Measurement", + "MockMethod", "Model", "ModelDefinition", "ModelMetadata", @@ -406,11 +416,13 @@ "OrganizationCoordinates", "PaginatedResult", "Participant", + "ParticipationStatus", "PhoneticEncoding", "PostDataObjectJsonBody", "PostDataSourceQueryJsonBody", "PostDataSourceQueryJsonBodyOutputDataObjectsSharedIDs", "PostDataSourceQueryJsonBodyParameters", + "PostMockDatasetMethod", "PostProjectDataJsonBody", "PostProjectDataSourceQueryJsonBody", "PostProjectDataSourceQueryJsonBodyAggregationType", diff --git a/src/tuneinsight/api/sdk/models/aggregated_dataset_length.py b/src/tuneinsight/api/sdk/models/aggregated_dataset_length.py index 6b69c7c..cd33124 100644 --- a/src/tuneinsight/api/sdk/models/aggregated_dataset_length.py +++ b/src/tuneinsight/api/sdk/models/aggregated_dataset_length.py @@ -20,83 +20,89 @@ class AggregatedDatasetLength: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. features (Union[Unset, str]): Shared identifier of a data object. """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET features: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object features = self.features field_dict: Dict[str, Any] = {} @@ -106,36 +112,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if features is not UNSET: field_dict["features"] = features @@ -151,7 +159,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -160,15 +168,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -177,22 +190,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -201,25 +209,28 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + features = d.pop("features", UNSET) aggregated_dataset_length = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, features=features, ) diff --git a/src/tuneinsight/api/sdk/models/api_connection_info.py b/src/tuneinsight/api/sdk/models/api_connection_info.py index 0b766ff..d6d1153 100644 --- a/src/tuneinsight/api/sdk/models/api_connection_info.py +++ b/src/tuneinsight/api/sdk/models/api_connection_info.py @@ -13,48 +13,45 @@ class APIConnectionInfo: """Connection information for a API data sources Attributes: - api_token (Union[Unset, str]): Token to connect to the API - api_url (Union[Unset, str]): URL of the API cert (Union[Unset, str]): If applicable, name of the certificate to acces the datasource. Certificate should be in '/usr/local/share/datasource-certificates/.{crt/key}' type (Union[Unset, APIConnectionInfoType]): Type of API + api_token (Union[Unset, str]): Token to connect to the API + api_url (Union[Unset, str]): URL of the API """ - api_token: Union[Unset, str] = UNSET - api_url: Union[Unset, str] = UNSET cert: Union[Unset, str] = UNSET type: Union[Unset, APIConnectionInfoType] = UNSET + api_token: Union[Unset, str] = UNSET + api_url: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - api_token = self.api_token - api_url = self.api_url cert = self.cert type: Union[Unset, str] = UNSET if not isinstance(self.type, Unset): type = self.type.value + api_token = self.api_token + api_url = self.api_url + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if api_token is not UNSET: - field_dict["api-token"] = api_token - if api_url is not UNSET: - field_dict["api-url"] = api_url if cert is not UNSET: field_dict["cert"] = cert if type is not UNSET: field_dict["type"] = type + if api_token is not UNSET: + field_dict["api-token"] = api_token + if api_url is not UNSET: + field_dict["api-url"] = api_url return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - api_token = d.pop("api-token", UNSET) - - api_url = d.pop("api-url", UNSET) - cert = d.pop("cert", UNSET) _type = d.pop("type", UNSET) @@ -64,11 +61,15 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: type = APIConnectionInfoType(_type) + api_token = d.pop("api-token", UNSET) + + api_url = d.pop("api-url", UNSET) + api_connection_info = cls( - api_token=api_token, - api_url=api_url, cert=cert, type=type, + api_token=api_token, + api_url=api_url, ) api_connection_info.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/binning_operation.py b/src/tuneinsight/api/sdk/models/binning_operation.py index 8ccf78e..4765895 100644 --- a/src/tuneinsight/api/sdk/models/binning_operation.py +++ b/src/tuneinsight/api/sdk/models/binning_operation.py @@ -17,35 +17,29 @@ class BinningOperation: """Dataset binning operation definition Attributes: - target_column (Union[Unset, str]): column targeted by the binning operation - aggregated_columns (Union[Unset, List[str]]): list of numerical columns to aggregate per bin when binning is - done, if unspecified binning only counts the number of rows - categories (Union[Unset, List[str]]): list of categories when groupByType is 'category' count_columns (Union[Unset, List['CategoricalColumn']]): list of categorical on which to count the number of records per bin per matching value group_by_type (Union[Unset, GroupByType]): type of the groupBy operation specified + keep_non_categorized_items (Union[Unset, bool]): keeps items that do not fall in a specific category and stores + them in the default category "other" Default: True. range_values (Union[Unset, List[float]]): list of cuts to use when groupByType is 'range' ([x,y] => creating 3 bins [v < x, x <= v < y, y <= v]) + target_column (Union[Unset, str]): column targeted by the binning operation + aggregated_columns (Union[Unset, List[str]]): list of numerical columns to aggregate per bin when binning is + done, if unspecified binning only counts the number of rows + categories (Union[Unset, List[str]]): list of categories when groupByType is 'category' """ - target_column: Union[Unset, str] = UNSET - aggregated_columns: Union[Unset, List[str]] = UNSET - categories: Union[Unset, List[str]] = UNSET count_columns: Union[Unset, List["CategoricalColumn"]] = UNSET group_by_type: Union[Unset, GroupByType] = UNSET + keep_non_categorized_items: Union[Unset, bool] = True range_values: Union[Unset, List[float]] = UNSET + target_column: Union[Unset, str] = UNSET + aggregated_columns: Union[Unset, List[str]] = UNSET + categories: Union[Unset, List[str]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - target_column = self.target_column - aggregated_columns: Union[Unset, List[str]] = UNSET - if not isinstance(self.aggregated_columns, Unset): - aggregated_columns = self.aggregated_columns - - categories: Union[Unset, List[str]] = UNSET - if not isinstance(self.categories, Unset): - categories = self.categories - count_columns: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.count_columns, Unset): count_columns = [] @@ -58,25 +52,37 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.group_by_type, Unset): group_by_type = self.group_by_type.value + keep_non_categorized_items = self.keep_non_categorized_items range_values: Union[Unset, List[float]] = UNSET if not isinstance(self.range_values, Unset): range_values = self.range_values + target_column = self.target_column + aggregated_columns: Union[Unset, List[str]] = UNSET + if not isinstance(self.aggregated_columns, Unset): + aggregated_columns = self.aggregated_columns + + categories: Union[Unset, List[str]] = UNSET + if not isinstance(self.categories, Unset): + categories = self.categories + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if target_column is not UNSET: - field_dict["targetColumn"] = target_column - if aggregated_columns is not UNSET: - field_dict["aggregatedColumns"] = aggregated_columns - if categories is not UNSET: - field_dict["categories"] = categories if count_columns is not UNSET: field_dict["countColumns"] = count_columns if group_by_type is not UNSET: field_dict["groupByType"] = group_by_type + if keep_non_categorized_items is not UNSET: + field_dict["keepNonCategorizedItems"] = keep_non_categorized_items if range_values is not UNSET: field_dict["rangeValues"] = range_values + if target_column is not UNSET: + field_dict["targetColumn"] = target_column + if aggregated_columns is not UNSET: + field_dict["aggregatedColumns"] = aggregated_columns + if categories is not UNSET: + field_dict["categories"] = categories return field_dict @@ -85,12 +91,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.categorical_column import CategoricalColumn d = src_dict.copy() - target_column = d.pop("targetColumn", UNSET) - - aggregated_columns = cast(List[str], d.pop("aggregatedColumns", UNSET)) - - categories = cast(List[str], d.pop("categories", UNSET)) - count_columns = [] _count_columns = d.pop("countColumns", UNSET) for count_columns_item_data in _count_columns or []: @@ -105,15 +105,24 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: group_by_type = GroupByType(_group_by_type) + keep_non_categorized_items = d.pop("keepNonCategorizedItems", UNSET) + range_values = cast(List[float], d.pop("rangeValues", UNSET)) + target_column = d.pop("targetColumn", UNSET) + + aggregated_columns = cast(List[str], d.pop("aggregatedColumns", UNSET)) + + categories = cast(List[str], d.pop("categories", UNSET)) + binning_operation = cls( - target_column=target_column, - aggregated_columns=aggregated_columns, - categories=categories, count_columns=count_columns, group_by_type=group_by_type, + keep_non_categorized_items=keep_non_categorized_items, range_values=range_values, + target_column=target_column, + aggregated_columns=aggregated_columns, + categories=categories, ) binning_operation.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/bootstrap.py b/src/tuneinsight/api/sdk/models/bootstrap.py index bd2d6b0..0ac9f50 100644 --- a/src/tuneinsight/api/sdk/models/bootstrap.py +++ b/src/tuneinsight/api/sdk/models/bootstrap.py @@ -21,83 +21,90 @@ class Bootstrap: Attributes: type (ComputationType): Type of the computation. value (str): Unique identifier of a data object. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. """ type: ComputationType value: str - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value value = self.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -106,36 +113,38 @@ def to_dict(self) -> Dict[str, Any]: "value": value, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object return field_dict @@ -151,7 +160,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: value = d.pop("value") - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -160,15 +169,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -177,22 +191,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -201,24 +210,27 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + bootstrap = cls( type=type, value=value, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, ) bootstrap.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/collective_key_gen.py b/src/tuneinsight/api/sdk/models/collective_key_gen.py index b9dab22..311f7d8 100644 --- a/src/tuneinsight/api/sdk/models/collective_key_gen.py +++ b/src/tuneinsight/api/sdk/models/collective_key_gen.py @@ -20,81 +20,88 @@ class CollectiveKeyGen: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -102,36 +109,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object return field_dict @@ -145,7 +154,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -154,15 +163,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -171,22 +185,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -195,23 +204,26 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + collective_key_gen = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, ) collective_key_gen.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/collective_key_switch.py b/src/tuneinsight/api/sdk/models/collective_key_switch.py index 84572dd..bb1ea79 100644 --- a/src/tuneinsight/api/sdk/models/collective_key_switch.py +++ b/src/tuneinsight/api/sdk/models/collective_key_switch.py @@ -21,51 +21,56 @@ class CollectiveKeySwitch: Attributes: type (ComputationType): Type of the computation. cipher_vector (str): Unique identifier of a data object. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. target_public_key (Union[Unset, str]): Unique identifier of a data object. """ type: ComputationType cipher_vector: str - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET target_public_key: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) @@ -73,33 +78,34 @@ def to_dict(self) -> Dict[str, Any]: type = self.type.value cipher_vector = self.cipher_vector - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object target_public_key = self.target_public_key field_dict: Dict[str, Any] = {} @@ -110,36 +116,38 @@ def to_dict(self) -> Dict[str, Any]: "cipherVector": cipher_vector, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if target_public_key is not UNSET: field_dict["targetPublicKey"] = target_public_key @@ -157,7 +165,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: cipher_vector = d.pop("cipherVector") - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -166,15 +174,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -183,22 +196,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -207,26 +215,29 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + target_public_key = d.pop("targetPublicKey", UNSET) collective_key_switch = cls( type=type, cipher_vector=cipher_vector, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, target_public_key=target_public_key, ) diff --git a/src/tuneinsight/api/sdk/models/column_schema.py b/src/tuneinsight/api/sdk/models/column_schema.py index 642e45d..b792daf 100644 --- a/src/tuneinsight/api/sdk/models/column_schema.py +++ b/src/tuneinsight/api/sdk/models/column_schema.py @@ -15,49 +15,43 @@ class ColumnSchema: """ Attributes: - title (Union[Unset, str]): name given to the column for informative purposes - checks (Union[Unset, ColumnSchemaChecks]): optional additional checks - coerce (Union[Unset, bool]): if set to true, the validation will first coerce the column into the corresponding - dtype - before applying the validation. description (Union[Unset, str]): informative description for the column dtype (Union[Unset, str]): expected data type for the column supported types: https://pandera.readthedocs.io/en/stable/dtype_validation.html#supported-pandas-datatypes nullable (Union[Unset, bool]): whether the column is allowed to contain null values. required (Union[Unset, None, bool]): if set to false, the column will be considered as optional in the dataset. + title (Union[Unset, str]): name given to the column for informative purposes + checks (Union[Unset, ColumnSchemaChecks]): optional additional checks + coerce (Union[Unset, bool]): if set to true, the validation will first coerce the column into the corresponding + dtype + before applying the validation. """ - title: Union[Unset, str] = UNSET - checks: Union[Unset, "ColumnSchemaChecks"] = UNSET - coerce: Union[Unset, bool] = UNSET description: Union[Unset, str] = UNSET dtype: Union[Unset, str] = UNSET nullable: Union[Unset, bool] = UNSET required: Union[Unset, None, bool] = UNSET + title: Union[Unset, str] = UNSET + checks: Union[Unset, "ColumnSchemaChecks"] = UNSET + coerce: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + description = self.description + dtype = self.dtype + nullable = self.nullable + required = self.required title = self.title checks: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.checks, Unset): checks = self.checks.to_dict() coerce = self.coerce - description = self.description - dtype = self.dtype - nullable = self.nullable - required = self.required field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if title is not UNSET: - field_dict["title"] = title - if checks is not UNSET: - field_dict["checks"] = checks - if coerce is not UNSET: - field_dict["coerce"] = coerce if description is not UNSET: field_dict["description"] = description if dtype is not UNSET: @@ -66,6 +60,12 @@ def to_dict(self) -> Dict[str, Any]: field_dict["nullable"] = nullable if required is not UNSET: field_dict["required"] = required + if title is not UNSET: + field_dict["title"] = title + if checks is not UNSET: + field_dict["checks"] = checks + if coerce is not UNSET: + field_dict["coerce"] = coerce return field_dict @@ -74,6 +74,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.column_schema_checks import ColumnSchemaChecks d = src_dict.copy() + description = d.pop("description", UNSET) + + dtype = d.pop("dtype", UNSET) + + nullable = d.pop("nullable", UNSET) + + required = d.pop("required", UNSET) + title = d.pop("title", UNSET) _checks = d.pop("checks", UNSET) @@ -85,22 +93,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: coerce = d.pop("coerce", UNSET) - description = d.pop("description", UNSET) - - dtype = d.pop("dtype", UNSET) - - nullable = d.pop("nullable", UNSET) - - required = d.pop("required", UNSET) - column_schema = cls( - title=title, - checks=checks, - coerce=coerce, description=description, dtype=dtype, nullable=nullable, required=required, + title=title, + checks=checks, + coerce=coerce, ) column_schema.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/column_schema_checks.py b/src/tuneinsight/api/sdk/models/column_schema_checks.py index 7ddff2f..6d85236 100644 --- a/src/tuneinsight/api/sdk/models/column_schema_checks.py +++ b/src/tuneinsight/api/sdk/models/column_schema_checks.py @@ -16,33 +16,29 @@ class ColumnSchemaChecks: """optional additional checks Attributes: + in_range (Union[Unset, ColumnSchemaChecksInRange]): + isin (Union[Unset, List[Any]]): lt (Union[Unset, Any]): verifies that all values are less than this value. str_startswith (Union[Unset, str]): + eq (Union[Unset, Any]): verifies that all values are equal to this value. + ge (Union[Unset, Any]): verifies that all values are greater than or equal to this value. gt (Union[Unset, Any]): verifies that all values are greater than this value. le (Union[Unset, Any]): verifies that all values are less than or equal to this value. - in_range (Union[Unset, ColumnSchemaChecksInRange]): - isin (Union[Unset, List[Any]]): notin (Union[Unset, List[Any]]): - eq (Union[Unset, Any]): verifies that all values are equal to this value. - ge (Union[Unset, Any]): verifies that all values are greater than or equal to this value. """ + in_range: Union[Unset, "ColumnSchemaChecksInRange"] = UNSET + isin: Union[Unset, List[Any]] = UNSET lt: Union[Unset, Any] = UNSET str_startswith: Union[Unset, str] = UNSET + eq: Union[Unset, Any] = UNSET + ge: Union[Unset, Any] = UNSET gt: Union[Unset, Any] = UNSET le: Union[Unset, Any] = UNSET - in_range: Union[Unset, "ColumnSchemaChecksInRange"] = UNSET - isin: Union[Unset, List[Any]] = UNSET notin: Union[Unset, List[Any]] = UNSET - eq: Union[Unset, Any] = UNSET - ge: Union[Unset, Any] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - lt = self.lt - str_startswith = self.str_startswith - gt = self.gt - le = self.le in_range: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.in_range, Unset): in_range = self.in_range.to_dict() @@ -51,34 +47,37 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.isin, Unset): isin = self.isin + lt = self.lt + str_startswith = self.str_startswith + eq = self.eq + ge = self.ge + gt = self.gt + le = self.le notin: Union[Unset, List[Any]] = UNSET if not isinstance(self.notin, Unset): notin = self.notin - eq = self.eq - ge = self.ge - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if in_range is not UNSET: + field_dict["in_range"] = in_range + if isin is not UNSET: + field_dict["isin"] = isin if lt is not UNSET: field_dict["lt"] = lt if str_startswith is not UNSET: field_dict["str_startswith"] = str_startswith + if eq is not UNSET: + field_dict["eq"] = eq + if ge is not UNSET: + field_dict["ge"] = ge if gt is not UNSET: field_dict["gt"] = gt if le is not UNSET: field_dict["le"] = le - if in_range is not UNSET: - field_dict["in_range"] = in_range - if isin is not UNSET: - field_dict["isin"] = isin if notin is not UNSET: field_dict["notin"] = notin - if eq is not UNSET: - field_dict["eq"] = eq - if ge is not UNSET: - field_dict["ge"] = ge return field_dict @@ -87,14 +86,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.column_schema_checks_in_range import ColumnSchemaChecksInRange d = src_dict.copy() - lt = d.pop("lt", UNSET) - - str_startswith = d.pop("str_startswith", UNSET) - - gt = d.pop("gt", UNSET) - - le = d.pop("le", UNSET) - _in_range = d.pop("in_range", UNSET) in_range: Union[Unset, ColumnSchemaChecksInRange] if isinstance(_in_range, Unset): @@ -104,22 +95,30 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: isin = cast(List[Any], d.pop("isin", UNSET)) - notin = cast(List[Any], d.pop("notin", UNSET)) + lt = d.pop("lt", UNSET) + + str_startswith = d.pop("str_startswith", UNSET) eq = d.pop("eq", UNSET) ge = d.pop("ge", UNSET) + gt = d.pop("gt", UNSET) + + le = d.pop("le", UNSET) + + notin = cast(List[Any], d.pop("notin", UNSET)) + column_schema_checks = cls( + in_range=in_range, + isin=isin, lt=lt, str_startswith=str_startswith, + eq=eq, + ge=ge, gt=gt, le=le, - in_range=in_range, - isin=isin, notin=notin, - eq=eq, - ge=ge, ) column_schema_checks.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/column_schema_checks_in_range.py b/src/tuneinsight/api/sdk/models/column_schema_checks_in_range.py index c380c95..9e47964 100644 --- a/src/tuneinsight/api/sdk/models/column_schema_checks_in_range.py +++ b/src/tuneinsight/api/sdk/models/column_schema_checks_in_range.py @@ -11,54 +11,54 @@ class ColumnSchemaChecksInRange: """ Attributes: - max_value (Union[Unset, float]): - min_value (Union[Unset, float]): include_max (Union[Unset, bool]): include_min (Union[Unset, bool]): + max_value (Union[Unset, float]): + min_value (Union[Unset, float]): """ - max_value: Union[Unset, float] = UNSET - min_value: Union[Unset, float] = UNSET include_max: Union[Unset, bool] = UNSET include_min: Union[Unset, bool] = UNSET + max_value: Union[Unset, float] = UNSET + min_value: Union[Unset, float] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - max_value = self.max_value - min_value = self.min_value include_max = self.include_max include_min = self.include_min + max_value = self.max_value + min_value = self.min_value field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if max_value is not UNSET: - field_dict["max_value"] = max_value - if min_value is not UNSET: - field_dict["min_value"] = min_value if include_max is not UNSET: field_dict["include_max"] = include_max if include_min is not UNSET: field_dict["include_min"] = include_min + if max_value is not UNSET: + field_dict["max_value"] = max_value + if min_value is not UNSET: + field_dict["min_value"] = min_value return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - max_value = d.pop("max_value", UNSET) - - min_value = d.pop("min_value", UNSET) - include_max = d.pop("include_max", UNSET) include_min = d.pop("include_min", UNSET) + max_value = d.pop("max_value", UNSET) + + min_value = d.pop("min_value", UNSET) + column_schema_checks_in_range = cls( - max_value=max_value, - min_value=min_value, include_max=include_max, include_min=include_min, + max_value=max_value, + min_value=min_value, ) column_schema_checks_in_range.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/computation.py b/src/tuneinsight/api/sdk/models/computation.py index 260c0e0..7425993 100644 --- a/src/tuneinsight/api/sdk/models/computation.py +++ b/src/tuneinsight/api/sdk/models/computation.py @@ -19,67 +19,54 @@ class Computation: """Metadata of a computation. Attributes: - id (str): Identifier of a computation, unique across all computing nodes. - status (ComputationStatus): Status of the computation. definition (ComputationDefinition): Generic computation. - owner (Union[Unset, str]): identifier of the end user that has requested the computation - progress (Union[Unset, int]): - started_at (Union[Unset, str]): - created_at (Union[Unset, str]): - description (Union[Unset, str]): - errors (Union[Unset, List['ComputationError']]): list of errors that occurred during the computation + status (ComputationStatus): Status of the computation. + id (str): Identifier of a computation, unique across all computing nodes. measurements (Union[Unset, List['Measurement']]): list of benchmarking measurements done on the computation - ended_at (Union[Unset, str]): - visible (Union[Unset, bool]): False if the computation is internal and should not be displayed to the user by - default - egress (Union[Unset, int]): keeps track of the number of bytes sent during a computation to serve as a bandwidth - measure ingress (Union[Unset, int]): keeps track of the number of bytes received during a computation to serve as a bandwidth measure local (Union[Unset, bool]): + progress (Union[Unset, int]): results (Union[Unset, List[str]]): Identifier(s) of the resulting data object(s). Available only when the status is completed. + visible (Union[Unset, bool]): False if the computation is internal and should not be displayed to the user by + default + errors (Union[Unset, List['ComputationError']]): list of errors that occurred during the computation + description (Union[Unset, str]): + egress (Union[Unset, int]): keeps track of the number of bytes sent during a computation to serve as a bandwidth + measure + owner (Union[Unset, str]): identifier of the end user that has requested the computation + created_at (Union[Unset, str]): + started_at (Union[Unset, str]): updated_at (Union[Unset, str]): + ended_at (Union[Unset, str]): """ - id: str - status: ComputationStatus definition: "ComputationDefinition" - owner: Union[Unset, str] = UNSET - progress: Union[Unset, int] = UNSET - started_at: Union[Unset, str] = UNSET - created_at: Union[Unset, str] = UNSET - description: Union[Unset, str] = UNSET - errors: Union[Unset, List["ComputationError"]] = UNSET + status: ComputationStatus + id: str measurements: Union[Unset, List["Measurement"]] = UNSET - ended_at: Union[Unset, str] = UNSET - visible: Union[Unset, bool] = UNSET - egress: Union[Unset, int] = UNSET ingress: Union[Unset, int] = UNSET local: Union[Unset, bool] = UNSET + progress: Union[Unset, int] = UNSET results: Union[Unset, List[str]] = UNSET + visible: Union[Unset, bool] = UNSET + errors: Union[Unset, List["ComputationError"]] = UNSET + description: Union[Unset, str] = UNSET + egress: Union[Unset, int] = UNSET + owner: Union[Unset, str] = UNSET + created_at: Union[Unset, str] = UNSET + started_at: Union[Unset, str] = UNSET updated_at: Union[Unset, str] = UNSET + ended_at: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - id = self.id - status = self.status.value - definition = self.definition.to_dict() - owner = self.owner - progress = self.progress - started_at = self.started_at - created_at = self.created_at - description = self.description - errors: Union[Unset, List[Dict[str, Any]]] = UNSET - if not isinstance(self.errors, Unset): - errors = [] - for errors_item_data in self.errors: - errors_item = errors_item_data.to_dict() - - errors.append(errors_item) + status = self.status.value + id = self.id measurements: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.measurements, Unset): measurements = [] @@ -88,54 +75,67 @@ def to_dict(self) -> Dict[str, Any]: measurements.append(measurements_item) - ended_at = self.ended_at - visible = self.visible - egress = self.egress ingress = self.ingress local = self.local + progress = self.progress results: Union[Unset, List[str]] = UNSET if not isinstance(self.results, Unset): results = self.results + visible = self.visible + errors: Union[Unset, List[Dict[str, Any]]] = UNSET + if not isinstance(self.errors, Unset): + errors = [] + for errors_item_data in self.errors: + errors_item = errors_item_data.to_dict() + + errors.append(errors_item) + + description = self.description + egress = self.egress + owner = self.owner + created_at = self.created_at + started_at = self.started_at updated_at = self.updated_at + ended_at = self.ended_at field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { - "id": id, - "status": status, "definition": definition, + "status": status, + "id": id, } ) - if owner is not UNSET: - field_dict["owner"] = owner - if progress is not UNSET: - field_dict["progress"] = progress - if started_at is not UNSET: - field_dict["startedAt"] = started_at - if created_at is not UNSET: - field_dict["createdAt"] = created_at - if description is not UNSET: - field_dict["description"] = description - if errors is not UNSET: - field_dict["errors"] = errors if measurements is not UNSET: field_dict["measurements"] = measurements - if ended_at is not UNSET: - field_dict["endedAt"] = ended_at - if visible is not UNSET: - field_dict["visible"] = visible - if egress is not UNSET: - field_dict["egress"] = egress if ingress is not UNSET: field_dict["ingress"] = ingress if local is not UNSET: field_dict["local"] = local + if progress is not UNSET: + field_dict["progress"] = progress if results is not UNSET: field_dict["results"] = results + if visible is not UNSET: + field_dict["visible"] = visible + if errors is not UNSET: + field_dict["errors"] = errors + if description is not UNSET: + field_dict["description"] = description + if egress is not UNSET: + field_dict["egress"] = egress + if owner is not UNSET: + field_dict["owner"] = owner + if created_at is not UNSET: + field_dict["createdAt"] = created_at + if started_at is not UNSET: + field_dict["startedAt"] = started_at if updated_at is not UNSET: field_dict["updatedAt"] = updated_at + if ended_at is not UNSET: + field_dict["endedAt"] = ended_at return field_dict @@ -146,21 +146,28 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.measurement import Measurement d = src_dict.copy() - id = d.pop("id") + definition = ComputationDefinition.from_dict(d.pop("definition")) status = ComputationStatus(d.pop("status")) - definition = ComputationDefinition.from_dict(d.pop("definition")) + id = d.pop("id") - owner = d.pop("owner", UNSET) + measurements = [] + _measurements = d.pop("measurements", UNSET) + for measurements_item_data in _measurements or []: + measurements_item = Measurement.from_dict(measurements_item_data) - progress = d.pop("progress", UNSET) + measurements.append(measurements_item) - started_at = d.pop("startedAt", UNSET) + ingress = d.pop("ingress", UNSET) - created_at = d.pop("createdAt", UNSET) + local = d.pop("local", UNSET) - description = d.pop("description", UNSET) + progress = d.pop("progress", UNSET) + + results = cast(List[str], d.pop("results", UNSET)) + + visible = d.pop("visible", UNSET) errors = [] _errors = d.pop("errors", UNSET) @@ -169,45 +176,38 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: errors.append(errors_item) - measurements = [] - _measurements = d.pop("measurements", UNSET) - for measurements_item_data in _measurements or []: - measurements_item = Measurement.from_dict(measurements_item_data) - - measurements.append(measurements_item) - - ended_at = d.pop("endedAt", UNSET) - - visible = d.pop("visible", UNSET) + description = d.pop("description", UNSET) egress = d.pop("egress", UNSET) - ingress = d.pop("ingress", UNSET) + owner = d.pop("owner", UNSET) - local = d.pop("local", UNSET) + created_at = d.pop("createdAt", UNSET) - results = cast(List[str], d.pop("results", UNSET)) + started_at = d.pop("startedAt", UNSET) updated_at = d.pop("updatedAt", UNSET) + ended_at = d.pop("endedAt", UNSET) + computation = cls( - id=id, - status=status, definition=definition, - owner=owner, - progress=progress, - started_at=started_at, - created_at=created_at, - description=description, - errors=errors, + status=status, + id=id, measurements=measurements, - ended_at=ended_at, - visible=visible, - egress=egress, ingress=ingress, local=local, + progress=progress, results=results, + visible=visible, + errors=errors, + description=description, + egress=egress, + owner=owner, + created_at=created_at, + started_at=started_at, updated_at=updated_at, + ended_at=ended_at, ) computation.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/computation_definition.py b/src/tuneinsight/api/sdk/models/computation_definition.py index 19c6e2d..2da14e5 100644 --- a/src/tuneinsight/api/sdk/models/computation_definition.py +++ b/src/tuneinsight/api/sdk/models/computation_definition.py @@ -21,81 +21,88 @@ class ComputationDefinition: Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -103,36 +110,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object return field_dict @@ -146,7 +155,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -155,15 +164,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -172,22 +186,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -196,23 +205,26 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + computation_definition = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, ) computation_definition.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/computation_error.py b/src/tuneinsight/api/sdk/models/computation_error.py index 07e9249..47423a6 100644 --- a/src/tuneinsight/api/sdk/models/computation_error.py +++ b/src/tuneinsight/api/sdk/models/computation_error.py @@ -13,47 +13,44 @@ class ComputationError: """error that occurred when running a computation Attributes: - message (Union[Unset, str]): the error message - origin (Union[Unset, str]): node instance id that caused the error timestamp (Union[Unset, str]): time at which the error ocurred type (Union[Unset, ComputationErrorType]): error type identifier + message (Union[Unset, str]): the error message + origin (Union[Unset, str]): node instance id that caused the error """ - message: Union[Unset, str] = UNSET - origin: Union[Unset, str] = UNSET timestamp: Union[Unset, str] = UNSET type: Union[Unset, ComputationErrorType] = UNSET + message: Union[Unset, str] = UNSET + origin: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - message = self.message - origin = self.origin timestamp = self.timestamp type: Union[Unset, str] = UNSET if not isinstance(self.type, Unset): type = self.type.value + message = self.message + origin = self.origin + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if message is not UNSET: - field_dict["message"] = message - if origin is not UNSET: - field_dict["origin"] = origin if timestamp is not UNSET: field_dict["timestamp"] = timestamp if type is not UNSET: field_dict["type"] = type + if message is not UNSET: + field_dict["message"] = message + if origin is not UNSET: + field_dict["origin"] = origin return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - message = d.pop("message", UNSET) - - origin = d.pop("origin", UNSET) - timestamp = d.pop("timestamp", UNSET) _type = d.pop("type", UNSET) @@ -63,11 +60,15 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: type = ComputationErrorType(_type) + message = d.pop("message", UNSET) + + origin = d.pop("origin", UNSET) + computation_error = cls( - message=message, - origin=origin, timestamp=timestamp, type=type, + message=message, + origin=origin, ) computation_error.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/computation_list_response.py b/src/tuneinsight/api/sdk/models/computation_list_response.py new file mode 100644 index 0000000..c2cb577 --- /dev/null +++ b/src/tuneinsight/api/sdk/models/computation_list_response.py @@ -0,0 +1,84 @@ +from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union + +import attr + +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.computation import Computation + + +T = TypeVar("T", bound="ComputationListResponse") + + +@attr.s(auto_attribs=True) +class ComputationListResponse: + """List of available computations. + + Attributes: + items (Union[Unset, List['Computation']]): + total (Union[Unset, int]): + """ + + items: Union[Unset, List["Computation"]] = UNSET + total: Union[Unset, int] = UNSET + additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + items: Union[Unset, List[Dict[str, Any]]] = UNSET + if not isinstance(self.items, Unset): + items = [] + for items_item_data in self.items: + items_item = items_item_data.to_dict() + + items.append(items_item) + + total = self.total + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update({}) + if items is not UNSET: + field_dict["items"] = items + if total is not UNSET: + field_dict["total"] = total + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.computation import Computation + + d = src_dict.copy() + items = [] + _items = d.pop("items", UNSET) + for items_item_data in _items or []: + items_item = Computation.from_dict(items_item_data) + + items.append(items_item) + + total = d.pop("total", UNSET) + + computation_list_response = cls( + items=items, + total=total, + ) + + computation_list_response.additional_properties = d + return computation_list_response + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/src/tuneinsight/api/sdk/models/computation_policy.py b/src/tuneinsight/api/sdk/models/computation_policy.py index db93865..80da139 100644 --- a/src/tuneinsight/api/sdk/models/computation_policy.py +++ b/src/tuneinsight/api/sdk/models/computation_policy.py @@ -19,60 +19,58 @@ class ComputationPolicy: """policy to validate a specific computation Attributes: - flexible_parameters (Union[Unset, List[str]]): when validateParameters is enabled, specifies the set of - parameters for which to ignore validation + authorized_computation_types (Union[Unset, List[ComputationType]]): list of authorized computation types + fixed_parameters (Union[Unset, List[str]]): when validateParameters is enabled, specifies the set of parameters + that cannot be changed if empty, then all parameters are validated template (Union[Unset, ComputationDefinition]): Generic computation. validate_parameters (Union[Unset, bool]): whether or not to validate the parameters with the ones from the template - restrict_data_source_queries (Union[Unset, bool]): whether or not datasource queries should be restricted - restrict_preprocessing_operations (Union[Unset, bool]): whether or not datasource queries should be restricted dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various disclosure prevention mechanisms - authorized_computation_types (Union[Unset, List[ComputationType]]): list of authorized computation types authorized_data_source_queries (Union[Unset, List[str]]): list of authorized datasource queries when restrictDataSourceQueries is set to true authorized_preprocessing_operations (Union[Unset, List[PreprocessingOperationType]]): list of authorized preprocessing operations types when restrictPreprocessingOperations is set to true - fixed_parameters (Union[Unset, List[str]]): when validateParameters is enabled, specifies the set of parameters - that cannot be changed if empty, then all parameters are validated + flexible_parameters (Union[Unset, List[str]]): when validateParameters is enabled, specifies the set of + parameters for which to ignore validation + restrict_data_source_queries (Union[Unset, bool]): whether or not datasource queries should be restricted + restrict_preprocessing_operations (Union[Unset, bool]): whether or not datasource queries should be restricted """ - flexible_parameters: Union[Unset, List[str]] = UNSET + authorized_computation_types: Union[Unset, List[ComputationType]] = UNSET + fixed_parameters: Union[Unset, List[str]] = UNSET template: Union[Unset, "ComputationDefinition"] = UNSET validate_parameters: Union[Unset, bool] = UNSET - restrict_data_source_queries: Union[Unset, bool] = UNSET - restrict_preprocessing_operations: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET - authorized_computation_types: Union[Unset, List[ComputationType]] = UNSET authorized_data_source_queries: Union[Unset, List[str]] = UNSET authorized_preprocessing_operations: Union[Unset, List[PreprocessingOperationType]] = UNSET - fixed_parameters: Union[Unset, List[str]] = UNSET + flexible_parameters: Union[Unset, List[str]] = UNSET + restrict_data_source_queries: Union[Unset, bool] = UNSET + restrict_preprocessing_operations: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - flexible_parameters: Union[Unset, List[str]] = UNSET - if not isinstance(self.flexible_parameters, Unset): - flexible_parameters = self.flexible_parameters + authorized_computation_types: Union[Unset, List[str]] = UNSET + if not isinstance(self.authorized_computation_types, Unset): + authorized_computation_types = [] + for authorized_computation_types_item_data in self.authorized_computation_types: + authorized_computation_types_item = authorized_computation_types_item_data.value + + authorized_computation_types.append(authorized_computation_types_item) + + fixed_parameters: Union[Unset, List[str]] = UNSET + if not isinstance(self.fixed_parameters, Unset): + fixed_parameters = self.fixed_parameters template: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.template, Unset): template = self.template.to_dict() validate_parameters = self.validate_parameters - restrict_data_source_queries = self.restrict_data_source_queries - restrict_preprocessing_operations = self.restrict_preprocessing_operations dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() - authorized_computation_types: Union[Unset, List[str]] = UNSET - if not isinstance(self.authorized_computation_types, Unset): - authorized_computation_types = [] - for authorized_computation_types_item_data in self.authorized_computation_types: - authorized_computation_types_item = authorized_computation_types_item_data.value - - authorized_computation_types.append(authorized_computation_types_item) - authorized_data_source_queries: Union[Unset, List[str]] = UNSET if not isinstance(self.authorized_data_source_queries, Unset): authorized_data_source_queries = self.authorized_data_source_queries @@ -85,33 +83,36 @@ def to_dict(self) -> Dict[str, Any]: authorized_preprocessing_operations.append(authorized_preprocessing_operations_item) - fixed_parameters: Union[Unset, List[str]] = UNSET - if not isinstance(self.fixed_parameters, Unset): - fixed_parameters = self.fixed_parameters + flexible_parameters: Union[Unset, List[str]] = UNSET + if not isinstance(self.flexible_parameters, Unset): + flexible_parameters = self.flexible_parameters + + restrict_data_source_queries = self.restrict_data_source_queries + restrict_preprocessing_operations = self.restrict_preprocessing_operations field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if flexible_parameters is not UNSET: - field_dict["flexibleParameters"] = flexible_parameters + if authorized_computation_types is not UNSET: + field_dict["authorizedComputationTypes"] = authorized_computation_types + if fixed_parameters is not UNSET: + field_dict["fixedParameters"] = fixed_parameters if template is not UNSET: field_dict["template"] = template if validate_parameters is not UNSET: field_dict["validateParameters"] = validate_parameters - if restrict_data_source_queries is not UNSET: - field_dict["restrictDataSourceQueries"] = restrict_data_source_queries - if restrict_preprocessing_operations is not UNSET: - field_dict["restrictPreprocessingOperations"] = restrict_preprocessing_operations if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy - if authorized_computation_types is not UNSET: - field_dict["authorizedComputationTypes"] = authorized_computation_types if authorized_data_source_queries is not UNSET: field_dict["authorizedDataSourceQueries"] = authorized_data_source_queries if authorized_preprocessing_operations is not UNSET: field_dict["authorizedPreprocessingOperations"] = authorized_preprocessing_operations - if fixed_parameters is not UNSET: - field_dict["fixedParameters"] = fixed_parameters + if flexible_parameters is not UNSET: + field_dict["flexibleParameters"] = flexible_parameters + if restrict_data_source_queries is not UNSET: + field_dict["restrictDataSourceQueries"] = restrict_data_source_queries + if restrict_preprocessing_operations is not UNSET: + field_dict["restrictPreprocessingOperations"] = restrict_preprocessing_operations return field_dict @@ -121,7 +122,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.dp_policy import DPPolicy d = src_dict.copy() - flexible_parameters = cast(List[str], d.pop("flexibleParameters", UNSET)) + authorized_computation_types = [] + _authorized_computation_types = d.pop("authorizedComputationTypes", UNSET) + for authorized_computation_types_item_data in _authorized_computation_types or []: + authorized_computation_types_item = ComputationType(authorized_computation_types_item_data) + + authorized_computation_types.append(authorized_computation_types_item) + + fixed_parameters = cast(List[str], d.pop("fixedParameters", UNSET)) _template = d.pop("template", UNSET) template: Union[Unset, ComputationDefinition] @@ -132,10 +140,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: validate_parameters = d.pop("validateParameters", UNSET) - restrict_data_source_queries = d.pop("restrictDataSourceQueries", UNSET) - - restrict_preprocessing_operations = d.pop("restrictPreprocessingOperations", UNSET) - _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] if isinstance(_dp_policy, Unset): @@ -143,13 +147,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - authorized_computation_types = [] - _authorized_computation_types = d.pop("authorizedComputationTypes", UNSET) - for authorized_computation_types_item_data in _authorized_computation_types or []: - authorized_computation_types_item = ComputationType(authorized_computation_types_item_data) - - authorized_computation_types.append(authorized_computation_types_item) - authorized_data_source_queries = cast(List[str], d.pop("authorizedDataSourceQueries", UNSET)) authorized_preprocessing_operations = [] @@ -161,19 +158,23 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: authorized_preprocessing_operations.append(authorized_preprocessing_operations_item) - fixed_parameters = cast(List[str], d.pop("fixedParameters", UNSET)) + flexible_parameters = cast(List[str], d.pop("flexibleParameters", UNSET)) + + restrict_data_source_queries = d.pop("restrictDataSourceQueries", UNSET) + + restrict_preprocessing_operations = d.pop("restrictPreprocessingOperations", UNSET) computation_policy = cls( - flexible_parameters=flexible_parameters, + authorized_computation_types=authorized_computation_types, + fixed_parameters=fixed_parameters, template=template, validate_parameters=validate_parameters, - restrict_data_source_queries=restrict_data_source_queries, - restrict_preprocessing_operations=restrict_preprocessing_operations, dp_policy=dp_policy, - authorized_computation_types=authorized_computation_types, authorized_data_source_queries=authorized_data_source_queries, authorized_preprocessing_operations=authorized_preprocessing_operations, - fixed_parameters=fixed_parameters, + flexible_parameters=flexible_parameters, + restrict_data_source_queries=restrict_data_source_queries, + restrict_preprocessing_operations=restrict_preprocessing_operations, ) computation_policy.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/computation_preprocessing_parameters.py b/src/tuneinsight/api/sdk/models/computation_preprocessing_parameters.py index 16ad382..b05ea93 100644 --- a/src/tuneinsight/api/sdk/models/computation_preprocessing_parameters.py +++ b/src/tuneinsight/api/sdk/models/computation_preprocessing_parameters.py @@ -22,24 +22,32 @@ class ComputationPreprocessingParameters: """dataframe pre-processing parameters applied to the input retrieved from the datasource, if applicable Attributes: + global_preprocessing (Union[Unset, PreprocessingChain]): Chain of preprocessing operations applied to the input + dataframe + select (Union[Unset, Select]): compound_preprocessing (Union[Unset, ComputationPreprocessingParametersCompoundPreprocessing]): preprocessing to be applied for each node dataset_schema (Union[Unset, DatasetSchema]): dataset schema definition used to validate input datasets. filters (Union[Unset, List['LogicalFormula']]): list of filters to apply to the input dataframe (applied after the preprocessing is run) - global_preprocessing (Union[Unset, PreprocessingChain]): Chain of preprocessing operations applied to the input - dataframe - select (Union[Unset, Select]): """ + global_preprocessing: Union[Unset, "PreprocessingChain"] = UNSET + select: Union[Unset, "Select"] = UNSET compound_preprocessing: Union[Unset, "ComputationPreprocessingParametersCompoundPreprocessing"] = UNSET dataset_schema: Union[Unset, "DatasetSchema"] = UNSET filters: Union[Unset, List["LogicalFormula"]] = UNSET - global_preprocessing: Union[Unset, "PreprocessingChain"] = UNSET - select: Union[Unset, "Select"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + global_preprocessing: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.global_preprocessing, Unset): + global_preprocessing = self.global_preprocessing.to_dict() + + select: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.select, Unset): + select = self.select.to_dict() + compound_preprocessing: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.compound_preprocessing, Unset): compound_preprocessing = self.compound_preprocessing.to_dict() @@ -56,27 +64,19 @@ def to_dict(self) -> Dict[str, Any]: filters.append(filters_item) - global_preprocessing: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.global_preprocessing, Unset): - global_preprocessing = self.global_preprocessing.to_dict() - - select: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.select, Unset): - select = self.select.to_dict() - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if global_preprocessing is not UNSET: + field_dict["globalPreprocessing"] = global_preprocessing + if select is not UNSET: + field_dict["select"] = select if compound_preprocessing is not UNSET: field_dict["compoundPreprocessing"] = compound_preprocessing if dataset_schema is not UNSET: field_dict["datasetSchema"] = dataset_schema if filters is not UNSET: field_dict["filters"] = filters - if global_preprocessing is not UNSET: - field_dict["globalPreprocessing"] = global_preprocessing - if select is not UNSET: - field_dict["select"] = select return field_dict @@ -91,6 +91,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.select import Select d = src_dict.copy() + _global_preprocessing = d.pop("globalPreprocessing", UNSET) + global_preprocessing: Union[Unset, PreprocessingChain] + if isinstance(_global_preprocessing, Unset): + global_preprocessing = UNSET + else: + global_preprocessing = PreprocessingChain.from_dict(_global_preprocessing) + + _select = d.pop("select", UNSET) + select: Union[Unset, Select] + if isinstance(_select, Unset): + select = UNSET + else: + select = Select.from_dict(_select) + _compound_preprocessing = d.pop("compoundPreprocessing", UNSET) compound_preprocessing: Union[Unset, ComputationPreprocessingParametersCompoundPreprocessing] if isinstance(_compound_preprocessing, Unset): @@ -114,26 +128,12 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: filters.append(filters_item) - _global_preprocessing = d.pop("globalPreprocessing", UNSET) - global_preprocessing: Union[Unset, PreprocessingChain] - if isinstance(_global_preprocessing, Unset): - global_preprocessing = UNSET - else: - global_preprocessing = PreprocessingChain.from_dict(_global_preprocessing) - - _select = d.pop("select", UNSET) - select: Union[Unset, Select] - if isinstance(_select, Unset): - select = UNSET - else: - select = Select.from_dict(_select) - computation_preprocessing_parameters = cls( + global_preprocessing=global_preprocessing, + select=select, compound_preprocessing=compound_preprocessing, dataset_schema=dataset_schema, filters=filters, - global_preprocessing=global_preprocessing, - select=select, ) computation_preprocessing_parameters.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/computation_type.py b/src/tuneinsight/api/sdk/models/computation_type.py index d71aa78..90bdf2d 100644 --- a/src/tuneinsight/api/sdk/models/computation_type.py +++ b/src/tuneinsight/api/sdk/models/computation_type.py @@ -25,6 +25,7 @@ class ComputationType(str, Enum): DATASETSTATISTICS = "datasetStatistics" PRIVATESEARCH = "privateSearch" PRIVATESEARCHSETUP = "privateSearchSetup" + ENCRYPTEDMEAN = "encryptedMean" def __str__(self) -> str: return str(self.value) diff --git a/src/tuneinsight/api/sdk/models/credentials.py b/src/tuneinsight/api/sdk/models/credentials.py index 7a2302e..e5ede35 100644 --- a/src/tuneinsight/api/sdk/models/credentials.py +++ b/src/tuneinsight/api/sdk/models/credentials.py @@ -12,54 +12,54 @@ class Credentials: """The credentials needed to access the data source. Attributes: - password (Union[Unset, str]): - username (Union[Unset, str]): connection_string (Union[Unset, str]): id (Union[Unset, str]): + password (Union[Unset, str]): + username (Union[Unset, str]): """ - password: Union[Unset, str] = UNSET - username: Union[Unset, str] = UNSET connection_string: Union[Unset, str] = UNSET id: Union[Unset, str] = UNSET + password: Union[Unset, str] = UNSET + username: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - password = self.password - username = self.username connection_string = self.connection_string id = self.id + password = self.password + username = self.username field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if password is not UNSET: - field_dict["password"] = password - if username is not UNSET: - field_dict["username"] = username if connection_string is not UNSET: field_dict["connectionString"] = connection_string if id is not UNSET: field_dict["id"] = id + if password is not UNSET: + field_dict["password"] = password + if username is not UNSET: + field_dict["username"] = username return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - password = d.pop("password", UNSET) - - username = d.pop("username", UNSET) - connection_string = d.pop("connectionString", UNSET) id = d.pop("id", UNSET) + password = d.pop("password", UNSET) + + username = d.pop("username", UNSET) + credentials = cls( - password=password, - username=username, connection_string=connection_string, id=id, + password=password, + username=username, ) credentials.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/cut.py b/src/tuneinsight/api/sdk/models/cut.py index 13c44cb..202b682 100644 --- a/src/tuneinsight/api/sdk/models/cut.py +++ b/src/tuneinsight/api/sdk/models/cut.py @@ -13,32 +13,32 @@ class Cut: """ Attributes: type (PreprocessingOperationType): type of preprocessing operation - cuts (Union[Unset, List[float]]): cuts to use - input_ (Union[Unset, str]): column to use as input labels (Union[Unset, List[str]]): labels to use for the cuts output (Union[Unset, str]): column to use as output + cuts (Union[Unset, List[float]]): cuts to use + input_ (Union[Unset, str]): column to use as input """ type: PreprocessingOperationType - cuts: Union[Unset, List[float]] = UNSET - input_: Union[Unset, str] = UNSET labels: Union[Unset, List[str]] = UNSET output: Union[Unset, str] = UNSET + cuts: Union[Unset, List[float]] = UNSET + input_: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - cuts: Union[Unset, List[float]] = UNSET - if not isinstance(self.cuts, Unset): - cuts = self.cuts - - input_ = self.input_ labels: Union[Unset, List[str]] = UNSET if not isinstance(self.labels, Unset): labels = self.labels output = self.output + cuts: Union[Unset, List[float]] = UNSET + if not isinstance(self.cuts, Unset): + cuts = self.cuts + + input_ = self.input_ field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -47,14 +47,14 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if cuts is not UNSET: - field_dict["cuts"] = cuts - if input_ is not UNSET: - field_dict["input"] = input_ if labels is not UNSET: field_dict["labels"] = labels if output is not UNSET: field_dict["output"] = output + if cuts is not UNSET: + field_dict["cuts"] = cuts + if input_ is not UNSET: + field_dict["input"] = input_ return field_dict @@ -63,20 +63,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = PreprocessingOperationType(d.pop("type")) - cuts = cast(List[float], d.pop("cuts", UNSET)) - - input_ = d.pop("input", UNSET) - labels = cast(List[str], d.pop("labels", UNSET)) output = d.pop("output", UNSET) + cuts = cast(List[float], d.pop("cuts", UNSET)) + + input_ = d.pop("input", UNSET) + cut = cls( type=type, - cuts=cuts, - input_=input_, labels=labels, output=output, + cuts=cuts, + input_=input_, ) cut.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/data_object.py b/src/tuneinsight/api/sdk/models/data_object.py index 8b17fd0..dca2b79 100644 --- a/src/tuneinsight/api/sdk/models/data_object.py +++ b/src/tuneinsight/api/sdk/models/data_object.py @@ -14,25 +14,27 @@ class DataObject: """A data object definition. Attributes: + encrypted (Union[Unset, bool]): + has_data (Union[Unset, bool]): whether the dataobject's data has been set session_id (Union[Unset, str]): Unique identifier of a session shared_id (Union[Unset, str]): Shared identifier of a data object. type (Union[Unset, DataObjectType]): type of the dataobject unique_id (Union[Unset, str]): Unique identifier of a data object. visibility_status (Union[Unset, DataObjectVisibilityStatus]): type of visibility set to the dataobject - encrypted (Union[Unset, bool]): - has_data (Union[Unset, bool]): whether the dataobject's data has been set """ + encrypted: Union[Unset, bool] = UNSET + has_data: Union[Unset, bool] = UNSET session_id: Union[Unset, str] = UNSET shared_id: Union[Unset, str] = UNSET type: Union[Unset, DataObjectType] = UNSET unique_id: Union[Unset, str] = UNSET visibility_status: Union[Unset, DataObjectVisibilityStatus] = UNSET - encrypted: Union[Unset, bool] = UNSET - has_data: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + encrypted = self.encrypted + has_data = self.has_data session_id = self.session_id shared_id = self.shared_id type: Union[Unset, str] = UNSET @@ -44,12 +46,13 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.visibility_status, Unset): visibility_status = self.visibility_status.value - encrypted = self.encrypted - has_data = self.has_data - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if encrypted is not UNSET: + field_dict["encrypted"] = encrypted + if has_data is not UNSET: + field_dict["hasData"] = has_data if session_id is not UNSET: field_dict["sessionId"] = session_id if shared_id is not UNSET: @@ -60,16 +63,16 @@ def to_dict(self) -> Dict[str, Any]: field_dict["uniqueId"] = unique_id if visibility_status is not UNSET: field_dict["visibilityStatus"] = visibility_status - if encrypted is not UNSET: - field_dict["encrypted"] = encrypted - if has_data is not UNSET: - field_dict["hasData"] = has_data return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() + encrypted = d.pop("encrypted", UNSET) + + has_data = d.pop("hasData", UNSET) + session_id = d.pop("sessionId", UNSET) shared_id = d.pop("sharedId", UNSET) @@ -90,18 +93,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: visibility_status = DataObjectVisibilityStatus(_visibility_status) - encrypted = d.pop("encrypted", UNSET) - - has_data = d.pop("hasData", UNSET) - data_object = cls( + encrypted=encrypted, + has_data=has_data, session_id=session_id, shared_id=shared_id, type=type, unique_id=unique_id, visibility_status=visibility_status, - encrypted=encrypted, - has_data=has_data, ) data_object.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/data_source.py b/src/tuneinsight/api/sdk/models/data_source.py index a752719..70857ae 100644 --- a/src/tuneinsight/api/sdk/models/data_source.py +++ b/src/tuneinsight/api/sdk/models/data_source.py @@ -18,38 +18,34 @@ class DataSource: """ Attributes: - access_scope (Union[Unset, AccessScope]): defines the scope of access given to a resource attributes (Union[Unset, List[str]]): authorized_users (Union[Unset, List[str]]): consent_type (Union[Unset, DataSourceConsentType]): Consent type given to the data source. name (Union[Unset, str]): type (Union[Unset, str]): unique_id (Union[Unset, None, str]): Unique identifier of a data source. + access_scope (Union[Unset, AccessScope]): defines the scope of access given to a resource + updated_at (Union[Unset, str]): created_at (Union[Unset, str]): metadata (Union[Unset, DataSourceMetadata]): metadata about a datasource selections (Union[Unset, List['LocalDataSelection']]): list of local data selections associated with the data source - updated_at (Union[Unset, str]): """ - access_scope: Union[Unset, AccessScope] = UNSET attributes: Union[Unset, List[str]] = UNSET authorized_users: Union[Unset, List[str]] = UNSET consent_type: Union[Unset, DataSourceConsentType] = UNSET name: Union[Unset, str] = UNSET type: Union[Unset, str] = UNSET unique_id: Union[Unset, None, str] = UNSET + access_scope: Union[Unset, AccessScope] = UNSET + updated_at: Union[Unset, str] = UNSET created_at: Union[Unset, str] = UNSET metadata: Union[Unset, "DataSourceMetadata"] = UNSET selections: Union[Unset, List["LocalDataSelection"]] = UNSET - updated_at: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - access_scope: Union[Unset, str] = UNSET - if not isinstance(self.access_scope, Unset): - access_scope = self.access_scope.value - attributes: Union[Unset, List[str]] = UNSET if not isinstance(self.attributes, Unset): attributes = self.attributes @@ -65,6 +61,11 @@ def to_dict(self) -> Dict[str, Any]: name = self.name type = self.type unique_id = self.unique_id + access_scope: Union[Unset, str] = UNSET + if not isinstance(self.access_scope, Unset): + access_scope = self.access_scope.value + + updated_at = self.updated_at created_at = self.created_at metadata: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.metadata, Unset): @@ -78,13 +79,9 @@ def to_dict(self) -> Dict[str, Any]: selections.append(selections_item) - updated_at = self.updated_at - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if access_scope is not UNSET: - field_dict["accessScope"] = access_scope if attributes is not UNSET: field_dict["attributes"] = attributes if authorized_users is not UNSET: @@ -97,14 +94,16 @@ def to_dict(self) -> Dict[str, Any]: field_dict["type"] = type if unique_id is not UNSET: field_dict["uniqueId"] = unique_id + if access_scope is not UNSET: + field_dict["accessScope"] = access_scope + if updated_at is not UNSET: + field_dict["updatedAt"] = updated_at if created_at is not UNSET: field_dict["createdAt"] = created_at if metadata is not UNSET: field_dict["metadata"] = metadata if selections is not UNSET: field_dict["selections"] = selections - if updated_at is not UNSET: - field_dict["updatedAt"] = updated_at return field_dict @@ -114,13 +113,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.local_data_selection import LocalDataSelection d = src_dict.copy() - _access_scope = d.pop("accessScope", UNSET) - access_scope: Union[Unset, AccessScope] - if isinstance(_access_scope, Unset): - access_scope = UNSET - else: - access_scope = AccessScope(_access_scope) - attributes = cast(List[str], d.pop("attributes", UNSET)) authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) @@ -138,6 +130,15 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: unique_id = d.pop("uniqueId", UNSET) + _access_scope = d.pop("accessScope", UNSET) + access_scope: Union[Unset, AccessScope] + if isinstance(_access_scope, Unset): + access_scope = UNSET + else: + access_scope = AccessScope(_access_scope) + + updated_at = d.pop("updatedAt", UNSET) + created_at = d.pop("createdAt", UNSET) _metadata = d.pop("metadata", UNSET) @@ -154,20 +155,18 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: selections.append(selections_item) - updated_at = d.pop("updatedAt", UNSET) - data_source = cls( - access_scope=access_scope, attributes=attributes, authorized_users=authorized_users, consent_type=consent_type, name=name, type=type, unique_id=unique_id, + access_scope=access_scope, + updated_at=updated_at, created_at=created_at, metadata=metadata, selections=selections, - updated_at=updated_at, ) data_source.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/data_source_base.py b/src/tuneinsight/api/sdk/models/data_source_base.py index a093314..fe01218 100644 --- a/src/tuneinsight/api/sdk/models/data_source_base.py +++ b/src/tuneinsight/api/sdk/models/data_source_base.py @@ -14,29 +14,25 @@ class DataSourceBase: """Common fields for a data source GET/POST Attributes: - access_scope (Union[Unset, AccessScope]): defines the scope of access given to a resource attributes (Union[Unset, List[str]]): authorized_users (Union[Unset, List[str]]): consent_type (Union[Unset, DataSourceConsentType]): Consent type given to the data source. name (Union[Unset, str]): type (Union[Unset, str]): unique_id (Union[Unset, None, str]): Unique identifier of a data source. + access_scope (Union[Unset, AccessScope]): defines the scope of access given to a resource """ - access_scope: Union[Unset, AccessScope] = UNSET attributes: Union[Unset, List[str]] = UNSET authorized_users: Union[Unset, List[str]] = UNSET consent_type: Union[Unset, DataSourceConsentType] = UNSET name: Union[Unset, str] = UNSET type: Union[Unset, str] = UNSET unique_id: Union[Unset, None, str] = UNSET + access_scope: Union[Unset, AccessScope] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - access_scope: Union[Unset, str] = UNSET - if not isinstance(self.access_scope, Unset): - access_scope = self.access_scope.value - attributes: Union[Unset, List[str]] = UNSET if not isinstance(self.attributes, Unset): attributes = self.attributes @@ -52,12 +48,13 @@ def to_dict(self) -> Dict[str, Any]: name = self.name type = self.type unique_id = self.unique_id + access_scope: Union[Unset, str] = UNSET + if not isinstance(self.access_scope, Unset): + access_scope = self.access_scope.value field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if access_scope is not UNSET: - field_dict["accessScope"] = access_scope if attributes is not UNSET: field_dict["attributes"] = attributes if authorized_users is not UNSET: @@ -70,19 +67,14 @@ def to_dict(self) -> Dict[str, Any]: field_dict["type"] = type if unique_id is not UNSET: field_dict["uniqueId"] = unique_id + if access_scope is not UNSET: + field_dict["accessScope"] = access_scope return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - _access_scope = d.pop("accessScope", UNSET) - access_scope: Union[Unset, AccessScope] - if isinstance(_access_scope, Unset): - access_scope = UNSET - else: - access_scope = AccessScope(_access_scope) - attributes = cast(List[str], d.pop("attributes", UNSET)) authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) @@ -100,14 +92,21 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: unique_id = d.pop("uniqueId", UNSET) + _access_scope = d.pop("accessScope", UNSET) + access_scope: Union[Unset, AccessScope] + if isinstance(_access_scope, Unset): + access_scope = UNSET + else: + access_scope = AccessScope(_access_scope) + data_source_base = cls( - access_scope=access_scope, attributes=attributes, authorized_users=authorized_users, consent_type=consent_type, name=name, type=type, unique_id=unique_id, + access_scope=access_scope, ) data_source_base.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/data_source_column.py b/src/tuneinsight/api/sdk/models/data_source_column.py index 9a2d3a4..d0ff02a 100644 --- a/src/tuneinsight/api/sdk/models/data_source_column.py +++ b/src/tuneinsight/api/sdk/models/data_source_column.py @@ -13,40 +13,43 @@ class DataSourceColumn: """column of a datasource includes name and type Attributes: - type_group (Union[Unset, ColumnTypeGroup]): represents a type group indicating the way the data may actually be - processed name (Union[Unset, str]): type (Union[Unset, str]): + type_group (Union[Unset, ColumnTypeGroup]): represents a type group indicating the way the data may actually be + processed """ - type_group: Union[Unset, ColumnTypeGroup] = UNSET name: Union[Unset, str] = UNSET type: Union[Unset, str] = UNSET + type_group: Union[Unset, ColumnTypeGroup] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + name = self.name + type = self.type type_group: Union[Unset, str] = UNSET if not isinstance(self.type_group, Unset): type_group = self.type_group.value - name = self.name - type = self.type - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if type_group is not UNSET: - field_dict["typeGroup"] = type_group if name is not UNSET: field_dict["name"] = name if type is not UNSET: field_dict["type"] = type + if type_group is not UNSET: + field_dict["typeGroup"] = type_group return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() + name = d.pop("name", UNSET) + + type = d.pop("type", UNSET) + _type_group = d.pop("typeGroup", UNSET) type_group: Union[Unset, ColumnTypeGroup] if isinstance(_type_group, Unset): @@ -54,14 +57,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: type_group = ColumnTypeGroup(_type_group) - name = d.pop("name", UNSET) - - type = d.pop("type", UNSET) - data_source_column = cls( - type_group=type_group, name=name, type=type, + type_group=type_group, ) data_source_column.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/data_source_definition.py b/src/tuneinsight/api/sdk/models/data_source_definition.py index 7ad6cdf..78a225d 100644 --- a/src/tuneinsight/api/sdk/models/data_source_definition.py +++ b/src/tuneinsight/api/sdk/models/data_source_definition.py @@ -18,36 +18,32 @@ class DataSourceDefinition: """ Attributes: - access_scope (Union[Unset, AccessScope]): defines the scope of access given to a resource attributes (Union[Unset, List[str]]): authorized_users (Union[Unset, List[str]]): consent_type (Union[Unset, DataSourceConsentType]): Consent type given to the data source. name (Union[Unset, str]): type (Union[Unset, str]): unique_id (Union[Unset, None, str]): Unique identifier of a data source. + access_scope (Union[Unset, AccessScope]): defines the scope of access given to a resource clear_if_exists (Union[Unset, bool]): If true and a data source with the same name already exists, delete it. config (Union[Unset, DataSourceConfig]): Configuration of data source that depends on the type. credentials_provider (Union[Unset, CredentialsProvider]): The provider of the credentials needed to access the data source. """ - access_scope: Union[Unset, AccessScope] = UNSET attributes: Union[Unset, List[str]] = UNSET authorized_users: Union[Unset, List[str]] = UNSET consent_type: Union[Unset, DataSourceConsentType] = UNSET name: Union[Unset, str] = UNSET type: Union[Unset, str] = UNSET unique_id: Union[Unset, None, str] = UNSET + access_scope: Union[Unset, AccessScope] = UNSET clear_if_exists: Union[Unset, bool] = False config: Union[Unset, "DataSourceConfig"] = UNSET credentials_provider: Union[Unset, "CredentialsProvider"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - access_scope: Union[Unset, str] = UNSET - if not isinstance(self.access_scope, Unset): - access_scope = self.access_scope.value - attributes: Union[Unset, List[str]] = UNSET if not isinstance(self.attributes, Unset): attributes = self.attributes @@ -63,6 +59,10 @@ def to_dict(self) -> Dict[str, Any]: name = self.name type = self.type unique_id = self.unique_id + access_scope: Union[Unset, str] = UNSET + if not isinstance(self.access_scope, Unset): + access_scope = self.access_scope.value + clear_if_exists = self.clear_if_exists config: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.config, Unset): @@ -75,8 +75,6 @@ def to_dict(self) -> Dict[str, Any]: field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if access_scope is not UNSET: - field_dict["accessScope"] = access_scope if attributes is not UNSET: field_dict["attributes"] = attributes if authorized_users is not UNSET: @@ -89,6 +87,8 @@ def to_dict(self) -> Dict[str, Any]: field_dict["type"] = type if unique_id is not UNSET: field_dict["uniqueId"] = unique_id + if access_scope is not UNSET: + field_dict["accessScope"] = access_scope if clear_if_exists is not UNSET: field_dict["clearIfExists"] = clear_if_exists if config is not UNSET: @@ -104,13 +104,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.data_source_config import DataSourceConfig d = src_dict.copy() - _access_scope = d.pop("accessScope", UNSET) - access_scope: Union[Unset, AccessScope] - if isinstance(_access_scope, Unset): - access_scope = UNSET - else: - access_scope = AccessScope(_access_scope) - attributes = cast(List[str], d.pop("attributes", UNSET)) authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) @@ -128,6 +121,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: unique_id = d.pop("uniqueId", UNSET) + _access_scope = d.pop("accessScope", UNSET) + access_scope: Union[Unset, AccessScope] + if isinstance(_access_scope, Unset): + access_scope = UNSET + else: + access_scope = AccessScope(_access_scope) + clear_if_exists = d.pop("clearIfExists", UNSET) _config = d.pop("config", UNSET) @@ -145,13 +145,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: credentials_provider = CredentialsProvider.from_dict(_credentials_provider) data_source_definition = cls( - access_scope=access_scope, attributes=attributes, authorized_users=authorized_users, consent_type=consent_type, name=name, type=type, unique_id=unique_id, + access_scope=access_scope, clear_if_exists=clear_if_exists, config=config, credentials_provider=credentials_provider, diff --git a/src/tuneinsight/api/sdk/models/data_source_metadata.py b/src/tuneinsight/api/sdk/models/data_source_metadata.py index a8823b3..77482a4 100644 --- a/src/tuneinsight/api/sdk/models/data_source_metadata.py +++ b/src/tuneinsight/api/sdk/models/data_source_metadata.py @@ -16,15 +16,16 @@ class DataSourceMetadata: """metadata about a datasource Attributes: - tables (Union[Unset, List['DataSourceTable']]): metadata_available (Union[Unset, bool]): whether or not the datasource supports returning metadata + tables (Union[Unset, List['DataSourceTable']]): """ - tables: Union[Unset, List["DataSourceTable"]] = UNSET metadata_available: Union[Unset, bool] = UNSET + tables: Union[Unset, List["DataSourceTable"]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + metadata_available = self.metadata_available tables: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.tables, Unset): tables = [] @@ -33,15 +34,13 @@ def to_dict(self) -> Dict[str, Any]: tables.append(tables_item) - metadata_available = self.metadata_available - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if tables is not UNSET: - field_dict["tables"] = tables if metadata_available is not UNSET: field_dict["metadataAvailable"] = metadata_available + if tables is not UNSET: + field_dict["tables"] = tables return field_dict @@ -50,6 +49,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.data_source_table import DataSourceTable d = src_dict.copy() + metadata_available = d.pop("metadataAvailable", UNSET) + tables = [] _tables = d.pop("tables", UNSET) for tables_item_data in _tables or []: @@ -57,11 +58,9 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: tables.append(tables_item) - metadata_available = d.pop("metadataAvailable", UNSET) - data_source_metadata = cls( - tables=tables, metadata_available=metadata_available, + tables=tables, ) data_source_metadata.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/data_source_query.py b/src/tuneinsight/api/sdk/models/data_source_query.py index 2b36752..719fafd 100644 --- a/src/tuneinsight/api/sdk/models/data_source_query.py +++ b/src/tuneinsight/api/sdk/models/data_source_query.py @@ -16,6 +16,8 @@ class DataSourceQuery: """schema used for the query Attributes: + api_json_path (Union[Unset, str]): JSONPath used for API data sources (if given, will be used to parse the API + response) api_path_query (Union[Unset, str]): Query path for the API data source URL (e.g. https://example.com+{apiPathQuery}) api_request_body (Union[Unset, str]): request body used for API data sources (if given, the request will use @@ -23,18 +25,17 @@ class DataSourceQuery: database_query (Union[Unset, str]): query used to retrieve data from a database data source (typically in SQL format) select (Union[Unset, Select]): - api_json_path (Union[Unset, str]): JSONPath used for API data sources (if given, will be used to parse the API - response) """ + api_json_path: Union[Unset, str] = UNSET api_path_query: Union[Unset, str] = UNSET api_request_body: Union[Unset, str] = UNSET database_query: Union[Unset, str] = UNSET select: Union[Unset, "Select"] = UNSET - api_json_path: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + api_json_path = self.api_json_path api_path_query = self.api_path_query api_request_body = self.api_request_body database_query = self.database_query @@ -42,11 +43,11 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.select, Unset): select = self.select.to_dict() - api_json_path = self.api_json_path - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if api_json_path is not UNSET: + field_dict["apiJsonPath"] = api_json_path if api_path_query is not UNSET: field_dict["apiPathQuery"] = api_path_query if api_request_body is not UNSET: @@ -55,8 +56,6 @@ def to_dict(self) -> Dict[str, Any]: field_dict["databaseQuery"] = database_query if select is not UNSET: field_dict["select"] = select - if api_json_path is not UNSET: - field_dict["apiJsonPath"] = api_json_path return field_dict @@ -65,6 +64,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.select import Select d = src_dict.copy() + api_json_path = d.pop("apiJsonPath", UNSET) + api_path_query = d.pop("apiPathQuery", UNSET) api_request_body = d.pop("apiRequestBody", UNSET) @@ -78,14 +79,12 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: select = Select.from_dict(_select) - api_json_path = d.pop("apiJsonPath", UNSET) - data_source_query = cls( + api_json_path=api_json_path, api_path_query=api_path_query, api_request_body=api_request_body, database_query=database_query, select=select, - api_json_path=api_json_path, ) data_source_query.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/data_source_types_info.py b/src/tuneinsight/api/sdk/models/data_source_types_info.py index 3b8c882..bd728e8 100644 --- a/src/tuneinsight/api/sdk/models/data_source_types_info.py +++ b/src/tuneinsight/api/sdk/models/data_source_types_info.py @@ -13,21 +13,17 @@ class DataSourceTypesInfo: """information about the available datasources Attributes: - data_source_types (Union[Unset, List[str]]): list of available datasource types database_types (Union[Unset, List[DatabaseType]]): list of supported database types local_formats (Union[Unset, List[str]]): list of supported format for local datasources + data_source_types (Union[Unset, List[str]]): list of available datasource types """ - data_source_types: Union[Unset, List[str]] = UNSET database_types: Union[Unset, List[DatabaseType]] = UNSET local_formats: Union[Unset, List[str]] = UNSET + data_source_types: Union[Unset, List[str]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - data_source_types: Union[Unset, List[str]] = UNSET - if not isinstance(self.data_source_types, Unset): - data_source_types = self.data_source_types - database_types: Union[Unset, List[str]] = UNSET if not isinstance(self.database_types, Unset): database_types = [] @@ -40,23 +36,25 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.local_formats, Unset): local_formats = self.local_formats + data_source_types: Union[Unset, List[str]] = UNSET + if not isinstance(self.data_source_types, Unset): + data_source_types = self.data_source_types + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if data_source_types is not UNSET: - field_dict["dataSourceTypes"] = data_source_types if database_types is not UNSET: field_dict["databaseTypes"] = database_types if local_formats is not UNSET: field_dict["localFormats"] = local_formats + if data_source_types is not UNSET: + field_dict["dataSourceTypes"] = data_source_types return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - data_source_types = cast(List[str], d.pop("dataSourceTypes", UNSET)) - database_types = [] _database_types = d.pop("databaseTypes", UNSET) for database_types_item_data in _database_types or []: @@ -66,10 +64,12 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: local_formats = cast(List[str], d.pop("localFormats", UNSET)) + data_source_types = cast(List[str], d.pop("dataSourceTypes", UNSET)) + data_source_types_info = cls( - data_source_types=data_source_types, database_types=database_types, local_formats=local_formats, + data_source_types=data_source_types, ) data_source_types_info.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/dataset_statistics.py b/src/tuneinsight/api/sdk/models/dataset_statistics.py index 49bcbb4..05f177b 100644 --- a/src/tuneinsight/api/sdk/models/dataset_statistics.py +++ b/src/tuneinsight/api/sdk/models/dataset_statistics.py @@ -21,83 +21,89 @@ class DatasetStatistics: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. statistics (Union[Unset, List['StatisticDefinition']]): list of statistics to be extracted from the dataset """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET statistics: Union[Unset, List["StatisticDefinition"]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object statistics: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.statistics, Unset): statistics = [] @@ -113,36 +119,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if statistics is not UNSET: field_dict["statistics"] = statistics @@ -159,7 +167,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -168,15 +176,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -185,22 +198,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -209,6 +217,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + statistics = [] _statistics = d.pop("statistics", UNSET) for statistics_item_data in _statistics or []: @@ -218,21 +228,22 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: dataset_statistics = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, statistics=statistics, ) diff --git a/src/tuneinsight/api/sdk/models/distributed_join.py b/src/tuneinsight/api/sdk/models/distributed_join.py index 0dee47f..6547c5b 100644 --- a/src/tuneinsight/api/sdk/models/distributed_join.py +++ b/src/tuneinsight/api/sdk/models/distributed_join.py @@ -20,91 +20,93 @@ class DistributedJoin: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation - target_columns (Union[Unset, List[str]]): + input_data_object (Union[Unset, str]): Shared identifier of a data object. join_columns (Union[Unset, List[str]]): missing_patterns (Union[Unset, List[str]]): + target_columns (Union[Unset, List[str]]): """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET - target_columns: Union[Unset, List[str]] = UNSET + input_data_object: Union[Unset, str] = UNSET join_columns: Union[Unset, List[str]] = UNSET missing_patterns: Union[Unset, List[str]] = UNSET + target_columns: Union[Unset, List[str]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() - target_columns: Union[Unset, List[str]] = UNSET - if not isinstance(self.target_columns, Unset): - target_columns = self.target_columns - + input_data_object = self.input_data_object join_columns: Union[Unset, List[str]] = UNSET if not isinstance(self.join_columns, Unset): join_columns = self.join_columns @@ -113,6 +115,10 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.missing_patterns, Unset): missing_patterns = self.missing_patterns + target_columns: Union[Unset, List[str]] = UNSET + if not isinstance(self.target_columns, Unset): + target_columns = self.target_columns + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -120,42 +126,44 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters - if target_columns is not UNSET: - field_dict["targetColumns"] = target_columns + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if join_columns is not UNSET: field_dict["joinColumns"] = join_columns if missing_patterns is not UNSET: field_dict["missingPatterns"] = missing_patterns + if target_columns is not UNSET: + field_dict["targetColumns"] = target_columns return field_dict @@ -169,7 +177,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -178,15 +186,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -195,22 +208,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -219,32 +227,35 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) - target_columns = cast(List[str], d.pop("targetColumns", UNSET)) + input_data_object = d.pop("inputDataObject", UNSET) join_columns = cast(List[str], d.pop("joinColumns", UNSET)) missing_patterns = cast(List[str], d.pop("missingPatterns", UNSET)) + target_columns = cast(List[str], d.pop("targetColumns", UNSET)) + distributed_join = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, - target_columns=target_columns, + input_data_object=input_data_object, join_columns=join_columns, missing_patterns=missing_patterns, + target_columns=target_columns, ) distributed_join.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/dp_policy.py b/src/tuneinsight/api/sdk/models/dp_policy.py index 692cea7..63e5ed7 100644 --- a/src/tuneinsight/api/sdk/models/dp_policy.py +++ b/src/tuneinsight/api/sdk/models/dp_policy.py @@ -18,16 +18,17 @@ class DPPolicy: """represents the disclosure prevention policy that enables toggling various disclosure prevention mechanisms Attributes: - max_factors (Union[Unset, Threshold]): represents a threshold, which can be made relative of the dataset size - noisy_global_size (Union[Unset, bool]): when computing the global size, whether noise is used or not. If so, - each node adds discrete noise to its input to the encrypted aggregation + privacy_budget_parameters (Union[Unset, PrivacyBudgetParameters]): Differential privacy budget settings. + The unit of the privacy budget is in terms of epsilon value (ϵ). + More precisely, if a computation adds noise that is equivalent ϵ=0.1 then 0.1 of the privacy budget is used. + min_frequencies (Union[Unset, Threshold]): represents a threshold, which can be made relative of the dataset + size min_global_dataset_size (Union[Unset, int]): minimum size of the global / collective dataset. It is collectively computed using the encrypted aggregation noise_parameters (Union[Unset, NoiseParameters]): parameters for adding differential privacy noise to the computation's encrypted output - privacy_budget_parameters (Union[Unset, PrivacyBudgetParameters]): Differential privacy budget settings. - The unit of the privacy budget is in terms of epsilon value (ϵ). - More precisely, if a computation adds noise that is equivalent ϵ=0.1 then 0.1 of the privacy budget is used. + noisy_global_size (Union[Unset, bool]): when computing the global size, whether noise is used or not. If so, + each node adds discrete noise to its input to the encrypted aggregation authorized_variables (Union[Unset, List[str]]): constraint on the set of variables that can be used as input, in order to prevent misuse of variables that are out of context of the project. if > 0 variables are defined here, then the dataset will automatically drop any variables that do not belong to @@ -37,38 +38,37 @@ class DPPolicy: returned variables cannot be aliased (for example using aliases in SQL SELECT statements) to evade this trap. max_column_count (Union[Unset, Threshold]): represents a threshold, which can be made relative of the dataset size + max_factors (Union[Unset, Threshold]): represents a threshold, which can be made relative of the dataset size min_dataset_size (Union[Unset, int]): minimum size of the dataset used as local input (checked both before and after the preprocessing operations are run) - min_frequencies (Union[Unset, Threshold]): represents a threshold, which can be made relative of the dataset - size """ - max_factors: Union[Unset, "Threshold"] = UNSET - noisy_global_size: Union[Unset, bool] = UNSET + privacy_budget_parameters: Union[Unset, "PrivacyBudgetParameters"] = UNSET + min_frequencies: Union[Unset, "Threshold"] = UNSET min_global_dataset_size: Union[Unset, int] = UNSET noise_parameters: Union[Unset, "NoiseParameters"] = UNSET - privacy_budget_parameters: Union[Unset, "PrivacyBudgetParameters"] = UNSET + noisy_global_size: Union[Unset, bool] = UNSET authorized_variables: Union[Unset, List[str]] = UNSET max_column_count: Union[Unset, "Threshold"] = UNSET + max_factors: Union[Unset, "Threshold"] = UNSET min_dataset_size: Union[Unset, int] = UNSET - min_frequencies: Union[Unset, "Threshold"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - max_factors: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.max_factors, Unset): - max_factors = self.max_factors.to_dict() + privacy_budget_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.privacy_budget_parameters, Unset): + privacy_budget_parameters = self.privacy_budget_parameters.to_dict() + + min_frequencies: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.min_frequencies, Unset): + min_frequencies = self.min_frequencies.to_dict() - noisy_global_size = self.noisy_global_size min_global_dataset_size = self.min_global_dataset_size noise_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.noise_parameters, Unset): noise_parameters = self.noise_parameters.to_dict() - privacy_budget_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.privacy_budget_parameters, Unset): - privacy_budget_parameters = self.privacy_budget_parameters.to_dict() - + noisy_global_size = self.noisy_global_size authorized_variables: Union[Unset, List[str]] = UNSET if not isinstance(self.authorized_variables, Unset): authorized_variables = self.authorized_variables @@ -77,32 +77,33 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.max_column_count, Unset): max_column_count = self.max_column_count.to_dict() + max_factors: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.max_factors, Unset): + max_factors = self.max_factors.to_dict() + min_dataset_size = self.min_dataset_size - min_frequencies: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.min_frequencies, Unset): - min_frequencies = self.min_frequencies.to_dict() field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if max_factors is not UNSET: - field_dict["maxFactors"] = max_factors - if noisy_global_size is not UNSET: - field_dict["noisyGlobalSize"] = noisy_global_size + if privacy_budget_parameters is not UNSET: + field_dict["privacyBudgetParameters"] = privacy_budget_parameters + if min_frequencies is not UNSET: + field_dict["minFrequencies"] = min_frequencies if min_global_dataset_size is not UNSET: field_dict["minGlobalDatasetSize"] = min_global_dataset_size if noise_parameters is not UNSET: field_dict["noiseParameters"] = noise_parameters - if privacy_budget_parameters is not UNSET: - field_dict["privacyBudgetParameters"] = privacy_budget_parameters + if noisy_global_size is not UNSET: + field_dict["noisyGlobalSize"] = noisy_global_size if authorized_variables is not UNSET: field_dict["authorizedVariables"] = authorized_variables if max_column_count is not UNSET: field_dict["maxColumnCount"] = max_column_count + if max_factors is not UNSET: + field_dict["maxFactors"] = max_factors if min_dataset_size is not UNSET: field_dict["minDatasetSize"] = min_dataset_size - if min_frequencies is not UNSET: - field_dict["minFrequencies"] = min_frequencies return field_dict @@ -113,14 +114,19 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.threshold import Threshold d = src_dict.copy() - _max_factors = d.pop("maxFactors", UNSET) - max_factors: Union[Unset, Threshold] - if isinstance(_max_factors, Unset): - max_factors = UNSET + _privacy_budget_parameters = d.pop("privacyBudgetParameters", UNSET) + privacy_budget_parameters: Union[Unset, PrivacyBudgetParameters] + if isinstance(_privacy_budget_parameters, Unset): + privacy_budget_parameters = UNSET else: - max_factors = Threshold.from_dict(_max_factors) + privacy_budget_parameters = PrivacyBudgetParameters.from_dict(_privacy_budget_parameters) - noisy_global_size = d.pop("noisyGlobalSize", UNSET) + _min_frequencies = d.pop("minFrequencies", UNSET) + min_frequencies: Union[Unset, Threshold] + if isinstance(_min_frequencies, Unset): + min_frequencies = UNSET + else: + min_frequencies = Threshold.from_dict(_min_frequencies) min_global_dataset_size = d.pop("minGlobalDatasetSize", UNSET) @@ -131,12 +137,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: noise_parameters = NoiseParameters.from_dict(_noise_parameters) - _privacy_budget_parameters = d.pop("privacyBudgetParameters", UNSET) - privacy_budget_parameters: Union[Unset, PrivacyBudgetParameters] - if isinstance(_privacy_budget_parameters, Unset): - privacy_budget_parameters = UNSET - else: - privacy_budget_parameters = PrivacyBudgetParameters.from_dict(_privacy_budget_parameters) + noisy_global_size = d.pop("noisyGlobalSize", UNSET) authorized_variables = cast(List[str], d.pop("authorizedVariables", UNSET)) @@ -147,25 +148,25 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: max_column_count = Threshold.from_dict(_max_column_count) - min_dataset_size = d.pop("minDatasetSize", UNSET) - - _min_frequencies = d.pop("minFrequencies", UNSET) - min_frequencies: Union[Unset, Threshold] - if isinstance(_min_frequencies, Unset): - min_frequencies = UNSET + _max_factors = d.pop("maxFactors", UNSET) + max_factors: Union[Unset, Threshold] + if isinstance(_max_factors, Unset): + max_factors = UNSET else: - min_frequencies = Threshold.from_dict(_min_frequencies) + max_factors = Threshold.from_dict(_max_factors) + + min_dataset_size = d.pop("minDatasetSize", UNSET) dp_policy = cls( - max_factors=max_factors, - noisy_global_size=noisy_global_size, + privacy_budget_parameters=privacy_budget_parameters, + min_frequencies=min_frequencies, min_global_dataset_size=min_global_dataset_size, noise_parameters=noise_parameters, - privacy_budget_parameters=privacy_budget_parameters, + noisy_global_size=noisy_global_size, authorized_variables=authorized_variables, max_column_count=max_column_count, + max_factors=max_factors, min_dataset_size=min_dataset_size, - min_frequencies=min_frequencies, ) dp_policy.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/dummy.py b/src/tuneinsight/api/sdk/models/dummy.py index ee36488..781c0dd 100644 --- a/src/tuneinsight/api/sdk/models/dummy.py +++ b/src/tuneinsight/api/sdk/models/dummy.py @@ -20,93 +20,99 @@ class Dummy: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation - error_in_constructor (Union[Unset, bool]): - error_in_start (Union[Unset, bool]): + input_data_object (Union[Unset, str]): Shared identifier of a data object. panic_in_constructor (Union[Unset, bool]): panic_in_start (Union[Unset, bool]): + error_in_constructor (Union[Unset, bool]): + error_in_start (Union[Unset, bool]): """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET - error_in_constructor: Union[Unset, bool] = UNSET - error_in_start: Union[Unset, bool] = UNSET + input_data_object: Union[Unset, str] = UNSET panic_in_constructor: Union[Unset, bool] = UNSET panic_in_start: Union[Unset, bool] = UNSET + error_in_constructor: Union[Unset, bool] = UNSET + error_in_start: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() - error_in_constructor = self.error_in_constructor - error_in_start = self.error_in_start + input_data_object = self.input_data_object panic_in_constructor = self.panic_in_constructor panic_in_start = self.panic_in_start + error_in_constructor = self.error_in_constructor + error_in_start = self.error_in_start field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -115,44 +121,46 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters - if error_in_constructor is not UNSET: - field_dict["errorInConstructor"] = error_in_constructor - if error_in_start is not UNSET: - field_dict["errorInStart"] = error_in_start + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if panic_in_constructor is not UNSET: field_dict["panicInConstructor"] = panic_in_constructor if panic_in_start is not UNSET: field_dict["panicInStart"] = panic_in_start + if error_in_constructor is not UNSET: + field_dict["errorInConstructor"] = error_in_constructor + if error_in_start is not UNSET: + field_dict["errorInStart"] = error_in_start return field_dict @@ -166,7 +174,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -175,15 +183,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -192,22 +205,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -216,35 +224,38 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) - error_in_constructor = d.pop("errorInConstructor", UNSET) - - error_in_start = d.pop("errorInStart", UNSET) + input_data_object = d.pop("inputDataObject", UNSET) panic_in_constructor = d.pop("panicInConstructor", UNSET) panic_in_start = d.pop("panicInStart", UNSET) + error_in_constructor = d.pop("errorInConstructor", UNSET) + + error_in_start = d.pop("errorInStart", UNSET) + dummy = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, - error_in_constructor=error_in_constructor, - error_in_start=error_in_start, + input_data_object=input_data_object, panic_in_constructor=panic_in_constructor, panic_in_start=panic_in_start, + error_in_constructor=error_in_constructor, + error_in_start=error_in_start, ) dummy.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/encrypted_aggregation.py b/src/tuneinsight/api/sdk/models/encrypted_aggregation.py index a6e6746..6151555 100644 --- a/src/tuneinsight/api/sdk/models/encrypted_aggregation.py +++ b/src/tuneinsight/api/sdk/models/encrypted_aggregation.py @@ -20,96 +20,102 @@ class EncryptedAggregation: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. + nb_features (Union[Unset, int]): Number of columns of the dataset aggregate_columns (Union[Unset, List[str]]): The columns on which the data should be aggregated aggregate_features (Union[Unset, bool]): If true, sum the columns together into one number features (Union[Unset, str]): Shared identifier of a data object. - nb_features (Union[Unset, int]): Number of columns of the dataset """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET + nb_features: Union[Unset, int] = UNSET aggregate_columns: Union[Unset, List[str]] = UNSET aggregate_features: Union[Unset, bool] = UNSET features: Union[Unset, str] = UNSET - nb_features: Union[Unset, int] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + nb_features = self.nb_features aggregate_columns: Union[Unset, List[str]] = UNSET if not isinstance(self.aggregate_columns, Unset): aggregate_columns = self.aggregate_columns aggregate_features = self.aggregate_features features = self.features - nb_features = self.nb_features field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -118,44 +124,46 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object + if nb_features is not UNSET: + field_dict["nbFeatures"] = nb_features if aggregate_columns is not UNSET: field_dict["aggregateColumns"] = aggregate_columns if aggregate_features is not UNSET: field_dict["aggregateFeatures"] = aggregate_features if features is not UNSET: field_dict["features"] = features - if nb_features is not UNSET: - field_dict["nbFeatures"] = nb_features return field_dict @@ -169,7 +177,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -178,15 +186,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -195,22 +208,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -219,35 +227,38 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + + nb_features = d.pop("nbFeatures", UNSET) + aggregate_columns = cast(List[str], d.pop("aggregateColumns", UNSET)) aggregate_features = d.pop("aggregateFeatures", UNSET) features = d.pop("features", UNSET) - nb_features = d.pop("nbFeatures", UNSET) - encrypted_aggregation = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, + nb_features=nb_features, aggregate_columns=aggregate_columns, aggregate_features=aggregate_features, features=features, - nb_features=nb_features, ) encrypted_aggregation.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/encrypted_mean.py b/src/tuneinsight/api/sdk/models/encrypted_mean.py new file mode 100644 index 0000000..f36b6d8 --- /dev/null +++ b/src/tuneinsight/api/sdk/models/encrypted_mean.py @@ -0,0 +1,299 @@ +from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union, cast + +import attr + +from ..models.computation_type import ComputationType +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.computation_data_source_parameters import ComputationDataSourceParameters + from ..models.computation_preprocessing_parameters import ComputationPreprocessingParameters + from ..models.dp_policy import DPPolicy + from ..models.local_input import LocalInput + + +T = TypeVar("T", bound="EncryptedMean") + + +@attr.s(auto_attribs=True) +class EncryptedMean: + """ + Attributes: + type (ComputationType): Type of the computation. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) + local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will + use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration + of the computation. The local input columns/values must be in the form {: [, , ...], + ...} + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable + cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. + project_id (Union[Unset, str]): Unique identifier of a project. + release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output + results. + If set, then encrypted results are automatically key switched and decrypted + and a Result entity is saved + data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource + from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. + grouping_keys (Union[Unset, List[str]]): This parameter is used to specify the composite keys for grouping the + aggregated values. + For example, when the groupingKeys are set to [id, name], the aggregation will be performed separately + for values where the attributes id and name differ. + This parameter is essential for grouping the data before applying the aggregation function, + allowing the aggregation to be performed on distinct groups based on the specified keys. + min_participants (Union[Unset, int]): defines the minimum number of participating organizations for each + aggregated group. + outlier_threshold (Union[Unset, float]): the threshold T to use to filter out outlier values. + A value x will be considered an outlier if abs(x - mean) > T * STD. + participant (Union[Unset, str]): optional column that is used to identify the name of the participant. + If empty, the name of the instance will be used instead. + variables (Union[Unset, List[str]]): list of variables to compute the mean on. + """ + + type: ComputationType + local: Union[Unset, bool] = UNSET + local_input: Union[Unset, "LocalInput"] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET + cohort_id: Union[Unset, str] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET + dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET + encrypted: Union[Unset, bool] = UNSET + owner: Union[Unset, str] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET + data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET + grouping_keys: Union[Unset, List[str]] = UNSET + min_participants: Union[Unset, int] = UNSET + outlier_threshold: Union[Unset, float] = UNSET + participant: Union[Unset, str] = UNSET + variables: Union[Unset, List[str]] = UNSET + additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + type = self.type.value + + local = self.local + local_input: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.local_input, Unset): + local_input = self.local_input.to_dict() + + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + + cohort_id = self.cohort_id + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait + dp_policy: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.dp_policy, Unset): + dp_policy = self.dp_policy.to_dict() + + join_id = self.join_id + timeout = self.timeout + encrypted = self.encrypted + owner = self.owner + project_id = self.project_id + release_results = self.release_results + data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.data_source_parameters, Unset): + data_source_parameters = self.data_source_parameters.to_dict() + + input_data_object = self.input_data_object + grouping_keys: Union[Unset, List[str]] = UNSET + if not isinstance(self.grouping_keys, Unset): + grouping_keys = self.grouping_keys + + min_participants = self.min_participants + outlier_threshold = self.outlier_threshold + participant = self.participant + variables: Union[Unset, List[str]] = UNSET + if not isinstance(self.variables, Unset): + variables = self.variables + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "type": type, + } + ) + if local is not UNSET: + field_dict["local"] = local + if local_input is not UNSET: + field_dict["localInput"] = local_input + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters + if cohort_id is not UNSET: + field_dict["cohortId"] = cohort_id + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait + if dp_policy is not UNSET: + field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout + if encrypted is not UNSET: + field_dict["encrypted"] = encrypted + if owner is not UNSET: + field_dict["owner"] = owner + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results + if data_source_parameters is not UNSET: + field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object + if grouping_keys is not UNSET: + field_dict["groupingKeys"] = grouping_keys + if min_participants is not UNSET: + field_dict["minParticipants"] = min_participants + if outlier_threshold is not UNSET: + field_dict["outlierThreshold"] = outlier_threshold + if participant is not UNSET: + field_dict["participant"] = participant + if variables is not UNSET: + field_dict["variables"] = variables + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.computation_data_source_parameters import ComputationDataSourceParameters + from ..models.computation_preprocessing_parameters import ComputationPreprocessingParameters + from ..models.dp_policy import DPPolicy + from ..models.local_input import LocalInput + + d = src_dict.copy() + type = ComputationType(d.pop("type")) + + local = d.pop("local", UNSET) + + _local_input = d.pop("localInput", UNSET) + local_input: Union[Unset, LocalInput] + if isinstance(_local_input, Unset): + local_input = UNSET + else: + local_input = LocalInput.from_dict(_local_input) + + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + + cohort_id = d.pop("cohortId", UNSET) + + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) + + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) + + _dp_policy = d.pop("DPPolicy", UNSET) + dp_policy: Union[Unset, DPPolicy] + if isinstance(_dp_policy, Unset): + dp_policy = UNSET + else: + dp_policy = DPPolicy.from_dict(_dp_policy) + + join_id = d.pop("joinId", UNSET) + + timeout = d.pop("timeout", UNSET) + + encrypted = d.pop("encrypted", UNSET) + + owner = d.pop("owner", UNSET) + + project_id = d.pop("projectId", UNSET) + + release_results = d.pop("releaseResults", UNSET) + + _data_source_parameters = d.pop("dataSourceParameters", UNSET) + data_source_parameters: Union[Unset, ComputationDataSourceParameters] + if isinstance(_data_source_parameters, Unset): + data_source_parameters = UNSET + else: + data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + + input_data_object = d.pop("inputDataObject", UNSET) + + grouping_keys = cast(List[str], d.pop("groupingKeys", UNSET)) + + min_participants = d.pop("minParticipants", UNSET) + + outlier_threshold = d.pop("outlierThreshold", UNSET) + + participant = d.pop("participant", UNSET) + + variables = cast(List[str], d.pop("variables", UNSET)) + + encrypted_mean = cls( + type=type, + local=local, + local_input=local_input, + preprocessing_parameters=preprocessing_parameters, + cohort_id=cohort_id, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, + dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, + encrypted=encrypted, + owner=owner, + project_id=project_id, + release_results=release_results, + data_source_parameters=data_source_parameters, + input_data_object=input_data_object, + grouping_keys=grouping_keys, + min_participants=min_participants, + outlier_threshold=outlier_threshold, + participant=participant, + variables=variables, + ) + + encrypted_mean.additional_properties = d + return encrypted_mean + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/src/tuneinsight/api/sdk/models/encrypted_prediction.py b/src/tuneinsight/api/sdk/models/encrypted_prediction.py index b701074..5cc3200 100644 --- a/src/tuneinsight/api/sdk/models/encrypted_prediction.py +++ b/src/tuneinsight/api/sdk/models/encrypted_prediction.py @@ -20,34 +20,35 @@ class EncryptedPrediction: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation - data (Union[Unset, str]): Unique identifier of a data object. - feature_columns (Union[Unset, List[str]]): specified features columns, not required, if not specified, all - columns from the input dataset are used + input_data_object (Union[Unset, str]): Shared identifier of a data object. include_ground_truth_labels (Union[Unset, bool]): if true, then the result should contain the associated ground truth labels label_columns (Union[Unset, List[str]]): specified label columns of the original dataset if the computation @@ -55,67 +56,67 @@ class EncryptedPrediction: model (Union[Unset, str]): Unique identifier of a data object. only_root_prediction (Union[Unset, bool]): if true and the computation is not local, then the prediction is only computed by the root node and the ct is broadcast to other nodes for later key switch + data (Union[Unset, str]): Unique identifier of a data object. + feature_columns (Union[Unset, List[str]]): specified features columns, not required, if not specified, all + columns from the input dataset are used """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET - data: Union[Unset, str] = UNSET - feature_columns: Union[Unset, List[str]] = UNSET + input_data_object: Union[Unset, str] = UNSET include_ground_truth_labels: Union[Unset, bool] = UNSET label_columns: Union[Unset, List[str]] = UNSET model: Union[Unset, str] = UNSET only_root_prediction: Union[Unset, bool] = UNSET + data: Union[Unset, str] = UNSET + feature_columns: Union[Unset, List[str]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() - data = self.data - feature_columns: Union[Unset, List[str]] = UNSET - if not isinstance(self.feature_columns, Unset): - feature_columns = self.feature_columns - + input_data_object = self.input_data_object include_ground_truth_labels = self.include_ground_truth_labels label_columns: Union[Unset, List[str]] = UNSET if not isinstance(self.label_columns, Unset): @@ -123,6 +124,10 @@ def to_dict(self) -> Dict[str, Any]: model = self.model only_root_prediction = self.only_root_prediction + data = self.data + feature_columns: Union[Unset, List[str]] = UNSET + if not isinstance(self.feature_columns, Unset): + feature_columns = self.feature_columns field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -131,40 +136,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters - if data is not UNSET: - field_dict["data"] = data - if feature_columns is not UNSET: - field_dict["featureColumns"] = feature_columns + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if include_ground_truth_labels is not UNSET: field_dict["includeGroundTruthLabels"] = include_ground_truth_labels if label_columns is not UNSET: @@ -173,6 +176,10 @@ def to_dict(self) -> Dict[str, Any]: field_dict["model"] = model if only_root_prediction is not UNSET: field_dict["onlyRootPrediction"] = only_root_prediction + if data is not UNSET: + field_dict["data"] = data + if feature_columns is not UNSET: + field_dict["featureColumns"] = feature_columns return field_dict @@ -186,7 +193,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -195,15 +202,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -212,22 +224,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -236,9 +243,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) - data = d.pop("data", UNSET) - - feature_columns = cast(List[str], d.pop("featureColumns", UNSET)) + input_data_object = d.pop("inputDataObject", UNSET) include_ground_truth_labels = d.pop("includeGroundTruthLabels", UNSET) @@ -248,29 +253,34 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: only_root_prediction = d.pop("onlyRootPrediction", UNSET) + data = d.pop("data", UNSET) + + feature_columns = cast(List[str], d.pop("featureColumns", UNSET)) + encrypted_prediction = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, - data=data, - feature_columns=feature_columns, + input_data_object=input_data_object, include_ground_truth_labels=include_ground_truth_labels, label_columns=label_columns, model=model, only_root_prediction=only_root_prediction, + data=data, + feature_columns=feature_columns, ) encrypted_prediction.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/encrypted_regression.py b/src/tuneinsight/api/sdk/models/encrypted_regression.py index fe401d5..8113dc4 100644 --- a/src/tuneinsight/api/sdk/models/encrypted_regression.py +++ b/src/tuneinsight/api/sdk/models/encrypted_regression.py @@ -21,90 +21,97 @@ class EncryptedRegression: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. + target_model_name (Union[Unset, str]): common name to give to the output model feature_columns (Union[Unset, List[str]]): specified columns from the input dataset corresponding to the features label_columns (Union[Unset, List[str]]): specified columns from the input dataset corresponding to the labels params (Union[Unset, EncryptedRegressionParams]): Parameters for the encrypted regression. - target_model_name (Union[Unset, str]): common name to give to the output model """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET + target_model_name: Union[Unset, str] = UNSET feature_columns: Union[Unset, List[str]] = UNSET label_columns: Union[Unset, List[str]] = UNSET params: Union[Unset, "EncryptedRegressionParams"] = UNSET - target_model_name: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + target_model_name = self.target_model_name feature_columns: Union[Unset, List[str]] = UNSET if not isinstance(self.feature_columns, Unset): feature_columns = self.feature_columns @@ -117,8 +124,6 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.params, Unset): params = self.params.to_dict() - target_model_name = self.target_model_name - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -126,44 +131,46 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object + if target_model_name is not UNSET: + field_dict["targetModelName"] = target_model_name if feature_columns is not UNSET: field_dict["featureColumns"] = feature_columns if label_columns is not UNSET: field_dict["labelColumns"] = label_columns if params is not UNSET: field_dict["params"] = params - if target_model_name is not UNSET: - field_dict["targetModelName"] = target_model_name return field_dict @@ -178,7 +185,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -187,15 +194,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -204,22 +216,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -228,6 +235,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + + target_model_name = d.pop("targetModelName", UNSET) + feature_columns = cast(List[str], d.pop("featureColumns", UNSET)) label_columns = cast(List[str], d.pop("labelColumns", UNSET)) @@ -239,29 +250,28 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: params = EncryptedRegressionParams.from_dict(_params) - target_model_name = d.pop("targetModelName", UNSET) - encrypted_regression = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, + target_model_name=target_model_name, feature_columns=feature_columns, label_columns=label_columns, params=params, - target_model_name=target_model_name, ) encrypted_regression.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/encrypted_regression_params.py b/src/tuneinsight/api/sdk/models/encrypted_regression_params.py index 38f875c..4ef0986 100644 --- a/src/tuneinsight/api/sdk/models/encrypted_regression_params.py +++ b/src/tuneinsight/api/sdk/models/encrypted_regression_params.py @@ -18,74 +18,73 @@ class EncryptedRegressionParams: """Parameters for the encrypted regression. Attributes: - approximation_params (Union[Unset, ApproximationParams]): parameters for polynomial approximation - local_iteration_count (Union[Unset, int]): The maximum number of local iterations. Default: 1. - seed (Union[Unset, float]): The seed to sample the initial weights. - type (Union[Unset, RegressionType]): type of the regression elastic_rate (Union[Unset, float]): The elastic rate of the regression. Default: 0.85. learning_rate (Union[Unset, float]): The learning rate of the regression. Default: 0.02. + local_iteration_count (Union[Unset, int]): The maximum number of local iterations. Default: 1. + network_iteration_count (Union[Unset, int]): The global maximum number of iteration. Default: 1. + seed (Union[Unset, float]): The seed to sample the initial weights. + approximation_params (Union[Unset, ApproximationParams]): parameters for polynomial approximation linear (Union[Unset, EncryptedRegressionParamsLinear]): Parameters specific for the linear regression. local_batch_size (Union[Unset, int]): The batch size in each local iteration. Default: 64. momentum (Union[Unset, float]): The momentum rate of the regression. Default: 0.92. - network_iteration_count (Union[Unset, int]): The global maximum number of iteration. Default: 1. + type (Union[Unset, RegressionType]): type of the regression """ - approximation_params: Union[Unset, "ApproximationParams"] = UNSET - local_iteration_count: Union[Unset, int] = 1 - seed: Union[Unset, float] = 0.0 - type: Union[Unset, RegressionType] = UNSET elastic_rate: Union[Unset, float] = 0.85 learning_rate: Union[Unset, float] = 0.02 + local_iteration_count: Union[Unset, int] = 1 + network_iteration_count: Union[Unset, int] = 1 + seed: Union[Unset, float] = 0.0 + approximation_params: Union[Unset, "ApproximationParams"] = UNSET linear: Union[Unset, "EncryptedRegressionParamsLinear"] = UNSET local_batch_size: Union[Unset, int] = 64 momentum: Union[Unset, float] = 0.92 - network_iteration_count: Union[Unset, int] = 1 + type: Union[Unset, RegressionType] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + elastic_rate = self.elastic_rate + learning_rate = self.learning_rate + local_iteration_count = self.local_iteration_count + network_iteration_count = self.network_iteration_count + seed = self.seed approximation_params: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.approximation_params, Unset): approximation_params = self.approximation_params.to_dict() - local_iteration_count = self.local_iteration_count - seed = self.seed - type: Union[Unset, str] = UNSET - if not isinstance(self.type, Unset): - type = self.type.value - - elastic_rate = self.elastic_rate - learning_rate = self.learning_rate linear: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.linear, Unset): linear = self.linear.to_dict() local_batch_size = self.local_batch_size momentum = self.momentum - network_iteration_count = self.network_iteration_count + type: Union[Unset, str] = UNSET + if not isinstance(self.type, Unset): + type = self.type.value field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if approximation_params is not UNSET: - field_dict["approximationParams"] = approximation_params - if local_iteration_count is not UNSET: - field_dict["localIterationCount"] = local_iteration_count - if seed is not UNSET: - field_dict["seed"] = seed - if type is not UNSET: - field_dict["type"] = type if elastic_rate is not UNSET: field_dict["elasticRate"] = elastic_rate if learning_rate is not UNSET: field_dict["learningRate"] = learning_rate + if local_iteration_count is not UNSET: + field_dict["localIterationCount"] = local_iteration_count + if network_iteration_count is not UNSET: + field_dict["networkIterationCount"] = network_iteration_count + if seed is not UNSET: + field_dict["seed"] = seed + if approximation_params is not UNSET: + field_dict["approximationParams"] = approximation_params if linear is not UNSET: field_dict["linear"] = linear if local_batch_size is not UNSET: field_dict["localBatchSize"] = local_batch_size if momentum is not UNSET: field_dict["momentum"] = momentum - if network_iteration_count is not UNSET: - field_dict["networkIterationCount"] = network_iteration_count + if type is not UNSET: + field_dict["type"] = type return field_dict @@ -95,27 +94,22 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.encrypted_regression_params_linear import EncryptedRegressionParamsLinear d = src_dict.copy() - _approximation_params = d.pop("approximationParams", UNSET) - approximation_params: Union[Unset, ApproximationParams] - if isinstance(_approximation_params, Unset): - approximation_params = UNSET - else: - approximation_params = ApproximationParams.from_dict(_approximation_params) + elastic_rate = d.pop("elasticRate", UNSET) + + learning_rate = d.pop("learningRate", UNSET) local_iteration_count = d.pop("localIterationCount", UNSET) + network_iteration_count = d.pop("networkIterationCount", UNSET) + seed = d.pop("seed", UNSET) - _type = d.pop("type", UNSET) - type: Union[Unset, RegressionType] - if isinstance(_type, Unset): - type = UNSET + _approximation_params = d.pop("approximationParams", UNSET) + approximation_params: Union[Unset, ApproximationParams] + if isinstance(_approximation_params, Unset): + approximation_params = UNSET else: - type = RegressionType(_type) - - elastic_rate = d.pop("elasticRate", UNSET) - - learning_rate = d.pop("learningRate", UNSET) + approximation_params = ApproximationParams.from_dict(_approximation_params) _linear = d.pop("linear", UNSET) linear: Union[Unset, EncryptedRegressionParamsLinear] @@ -128,19 +122,24 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: momentum = d.pop("momentum", UNSET) - network_iteration_count = d.pop("networkIterationCount", UNSET) + _type = d.pop("type", UNSET) + type: Union[Unset, RegressionType] + if isinstance(_type, Unset): + type = UNSET + else: + type = RegressionType(_type) encrypted_regression_params = cls( - approximation_params=approximation_params, - local_iteration_count=local_iteration_count, - seed=seed, - type=type, elastic_rate=elastic_rate, learning_rate=learning_rate, + local_iteration_count=local_iteration_count, + network_iteration_count=network_iteration_count, + seed=seed, + approximation_params=approximation_params, linear=linear, local_batch_size=local_batch_size, momentum=momentum, - network_iteration_count=network_iteration_count, + type=type, ) encrypted_regression_params.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/external_ml_history.py b/src/tuneinsight/api/sdk/models/external_ml_history.py index 2ffa0c3..29c92f0 100644 --- a/src/tuneinsight/api/sdk/models/external_ml_history.py +++ b/src/tuneinsight/api/sdk/models/external_ml_history.py @@ -12,28 +12,22 @@ class ExternalMlHistory: """Training history of external ML containing the evolution of the metrics during training Attributes: - end_timestamps (List[List[float]]): Ending timestamps of local training epochs in unix milliseconds timestamps metrics (List[str]): Metrics at each round at each local epoch start_timestamps (List[List[float]]): Starting timestamps of local training epochs in unix milliseconds timestamps + end_timestamps (List[List[float]]): Ending timestamps of local training epochs in unix milliseconds timestamps init_metrics (Union[Unset, List[str]]): Metrics at each round before local training init_timestamps (Union[Unset, List[float]]): Init timestamps of local training in unix milliseconds timestamps """ - end_timestamps: List[List[float]] metrics: List[str] start_timestamps: List[List[float]] + end_timestamps: List[List[float]] init_metrics: Union[Unset, List[str]] = UNSET init_timestamps: Union[Unset, List[float]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - end_timestamps = [] - for end_timestamps_item_data in self.end_timestamps: - end_timestamps_item = end_timestamps_item_data - - end_timestamps.append(end_timestamps_item) - metrics = self.metrics start_timestamps = [] @@ -42,6 +36,12 @@ def to_dict(self) -> Dict[str, Any]: start_timestamps.append(start_timestamps_item) + end_timestamps = [] + for end_timestamps_item_data in self.end_timestamps: + end_timestamps_item = end_timestamps_item_data + + end_timestamps.append(end_timestamps_item) + init_metrics: Union[Unset, List[str]] = UNSET if not isinstance(self.init_metrics, Unset): init_metrics = self.init_metrics @@ -54,9 +54,9 @@ def to_dict(self) -> Dict[str, Any]: field_dict.update(self.additional_properties) field_dict.update( { - "endTimestamps": end_timestamps, "metrics": metrics, "startTimestamps": start_timestamps, + "endTimestamps": end_timestamps, } ) if init_metrics is not UNSET: @@ -69,13 +69,6 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - end_timestamps = [] - _end_timestamps = d.pop("endTimestamps") - for end_timestamps_item_data in _end_timestamps: - end_timestamps_item = cast(List[float], end_timestamps_item_data) - - end_timestamps.append(end_timestamps_item) - metrics = cast(List[str], d.pop("metrics")) start_timestamps = [] @@ -85,14 +78,21 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: start_timestamps.append(start_timestamps_item) + end_timestamps = [] + _end_timestamps = d.pop("endTimestamps") + for end_timestamps_item_data in _end_timestamps: + end_timestamps_item = cast(List[float], end_timestamps_item_data) + + end_timestamps.append(end_timestamps_item) + init_metrics = cast(List[str], d.pop("initMetrics", UNSET)) init_timestamps = cast(List[float], d.pop("initTimestamps", UNSET)) external_ml_history = cls( - end_timestamps=end_timestamps, metrics=metrics, start_timestamps=start_timestamps, + end_timestamps=end_timestamps, init_metrics=init_metrics, init_timestamps=init_timestamps, ) diff --git a/src/tuneinsight/api/sdk/models/extract_dict_field.py b/src/tuneinsight/api/sdk/models/extract_dict_field.py index cc09675..e6a1a1f 100644 --- a/src/tuneinsight/api/sdk/models/extract_dict_field.py +++ b/src/tuneinsight/api/sdk/models/extract_dict_field.py @@ -14,29 +14,29 @@ class ExtractDictField: Attributes: type (PreprocessingOperationType): type of preprocessing operation field (str): name of the dictionary field to extract + cols (Union[Unset, List[str]]): cols from which to extract field names (Union[Unset, List[str]]): names of new columns with extracted fields (if none, no new columns are created) - cols (Union[Unset, List[str]]): cols from which to extract field """ type: PreprocessingOperationType field: str - names: Union[Unset, List[str]] = UNSET cols: Union[Unset, List[str]] = UNSET + names: Union[Unset, List[str]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value field = self.field - names: Union[Unset, List[str]] = UNSET - if not isinstance(self.names, Unset): - names = self.names - cols: Union[Unset, List[str]] = UNSET if not isinstance(self.cols, Unset): cols = self.cols + names: Union[Unset, List[str]] = UNSET + if not isinstance(self.names, Unset): + names = self.names + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -45,10 +45,10 @@ def to_dict(self) -> Dict[str, Any]: "field": field, } ) - if names is not UNSET: - field_dict["names"] = names if cols is not UNSET: field_dict["cols"] = cols + if names is not UNSET: + field_dict["names"] = names return field_dict @@ -59,15 +59,15 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: field = d.pop("field") - names = cast(List[str], d.pop("names", UNSET)) - cols = cast(List[str], d.pop("cols", UNSET)) + names = cast(List[str], d.pop("names", UNSET)) + extract_dict_field = cls( type=type, field=field, - names=names, cols=cols, + names=names, ) extract_dict_field.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/float_matrix.py b/src/tuneinsight/api/sdk/models/float_matrix.py index 4bc40e1..deb17ad 100644 --- a/src/tuneinsight/api/sdk/models/float_matrix.py +++ b/src/tuneinsight/api/sdk/models/float_matrix.py @@ -20,16 +20,16 @@ class FloatMatrix: columns (List[str]): Name of the columns of the matrix data (List[List[float]]): 2d array of float values contextual_info (Union[Unset, ResultContextualInfo]): contextual information about the content retrieved - row_count (Union[Unset, int]): column_count (Union[Unset, int]): + row_count (Union[Unset, int]): """ type: ContentType columns: List[str] data: List[List[float]] contextual_info: Union[Unset, "ResultContextualInfo"] = UNSET - row_count: Union[Unset, int] = UNSET column_count: Union[Unset, int] = UNSET + row_count: Union[Unset, int] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: @@ -47,8 +47,8 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.contextual_info, Unset): contextual_info = self.contextual_info.to_dict() - row_count = self.row_count column_count = self.column_count + row_count = self.row_count field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -61,10 +61,10 @@ def to_dict(self) -> Dict[str, Any]: ) if contextual_info is not UNSET: field_dict["contextualInfo"] = contextual_info - if row_count is not UNSET: - field_dict["rowCount"] = row_count if column_count is not UNSET: field_dict["columnCount"] = column_count + if row_count is not UNSET: + field_dict["rowCount"] = row_count return field_dict @@ -91,17 +91,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: contextual_info = ResultContextualInfo.from_dict(_contextual_info) - row_count = d.pop("rowCount", UNSET) - column_count = d.pop("columnCount", UNSET) + row_count = d.pop("rowCount", UNSET) + float_matrix = cls( type=type, columns=columns, data=data, contextual_info=contextual_info, - row_count=row_count, column_count=column_count, + row_count=row_count, ) float_matrix.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/get_network_metadata_response_200.py b/src/tuneinsight/api/sdk/models/get_network_metadata_response_200.py index ac08a41..16a0ba3 100644 --- a/src/tuneinsight/api/sdk/models/get_network_metadata_response_200.py +++ b/src/tuneinsight/api/sdk/models/get_network_metadata_response_200.py @@ -16,25 +16,23 @@ class GetNetworkMetadataResponse200: """ Attributes: + dpo_authorization_enabled (Union[Unset, bool]): Indicates if collective projects require authorization. + networks (Union[Unset, List['Network']]): + nodes (Union[Unset, List['Node']]): compound_queries_enabled (Union[Unset, bool]): Indicates if compound queries are enabled. If true, the data source queries can be composed of multiple queries. default_topology (Union[Unset, str]): Indicates the default topology of the network used when creating a project. Values can be "star" or "tree". - dpo_authorization_enabled (Union[Unset, bool]): Indicates if collective projects require authorization. - networks (Union[Unset, List['Network']]): - nodes (Union[Unset, List['Node']]): """ - compound_queries_enabled: Union[Unset, bool] = UNSET - default_topology: Union[Unset, str] = UNSET dpo_authorization_enabled: Union[Unset, bool] = UNSET networks: Union[Unset, List["Network"]] = UNSET nodes: Union[Unset, List["Node"]] = UNSET + compound_queries_enabled: Union[Unset, bool] = UNSET + default_topology: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - compound_queries_enabled = self.compound_queries_enabled - default_topology = self.default_topology dpo_authorization_enabled = self.dpo_authorization_enabled networks: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.networks, Unset): @@ -52,19 +50,22 @@ def to_dict(self) -> Dict[str, Any]: nodes.append(nodes_item) + compound_queries_enabled = self.compound_queries_enabled + default_topology = self.default_topology + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if compound_queries_enabled is not UNSET: - field_dict["compoundQueriesEnabled"] = compound_queries_enabled - if default_topology is not UNSET: - field_dict["default-topology"] = default_topology if dpo_authorization_enabled is not UNSET: field_dict["dpoAuthorizationEnabled"] = dpo_authorization_enabled if networks is not UNSET: field_dict["networks"] = networks if nodes is not UNSET: field_dict["nodes"] = nodes + if compound_queries_enabled is not UNSET: + field_dict["compoundQueriesEnabled"] = compound_queries_enabled + if default_topology is not UNSET: + field_dict["default-topology"] = default_topology return field_dict @@ -74,10 +75,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.node import Node d = src_dict.copy() - compound_queries_enabled = d.pop("compoundQueriesEnabled", UNSET) - - default_topology = d.pop("default-topology", UNSET) - dpo_authorization_enabled = d.pop("dpoAuthorizationEnabled", UNSET) networks = [] @@ -94,12 +91,16 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: nodes.append(nodes_item) + compound_queries_enabled = d.pop("compoundQueriesEnabled", UNSET) + + default_topology = d.pop("default-topology", UNSET) + get_network_metadata_response_200 = cls( - compound_queries_enabled=compound_queries_enabled, - default_topology=default_topology, dpo_authorization_enabled=dpo_authorization_enabled, networks=networks, nodes=nodes, + compound_queries_enabled=compound_queries_enabled, + default_topology=default_topology, ) get_network_metadata_response_200.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/get_project_network_status_response_200_item.py b/src/tuneinsight/api/sdk/models/get_project_network_status_response_200_item.py index 0384fab..c77157b 100644 --- a/src/tuneinsight/api/sdk/models/get_project_network_status_response_200_item.py +++ b/src/tuneinsight/api/sdk/models/get_project_network_status_response_200_item.py @@ -15,15 +15,16 @@ class GetProjectNetworkStatusResponse200Item: """ Attributes: - statuses (Union[Unset, List['NodeStatus']]): from_ (Union[Unset, str]): + statuses (Union[Unset, List['NodeStatus']]): """ - statuses: Union[Unset, List["NodeStatus"]] = UNSET from_: Union[Unset, str] = UNSET + statuses: Union[Unset, List["NodeStatus"]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + from_ = self.from_ statuses: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.statuses, Unset): statuses = [] @@ -32,15 +33,13 @@ def to_dict(self) -> Dict[str, Any]: statuses.append(statuses_item) - from_ = self.from_ - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if statuses is not UNSET: - field_dict["statuses"] = statuses if from_ is not UNSET: field_dict["from"] = from_ + if statuses is not UNSET: + field_dict["statuses"] = statuses return field_dict @@ -49,6 +48,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.node_status import NodeStatus d = src_dict.copy() + from_ = d.pop("from", UNSET) + statuses = [] _statuses = d.pop("statuses", UNSET) for statuses_item_data in _statuses or []: @@ -56,11 +57,9 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: statuses.append(statuses_item) - from_ = d.pop("from", UNSET) - get_project_network_status_response_200_item = cls( - statuses=statuses, from_=from_, + statuses=statuses, ) get_project_network_status_response_200_item.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/get_project_status_response_200.py b/src/tuneinsight/api/sdk/models/get_project_status_response_200.py new file mode 100644 index 0000000..56d0e5c --- /dev/null +++ b/src/tuneinsight/api/sdk/models/get_project_status_response_200.py @@ -0,0 +1,90 @@ +from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union + +import attr + +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.participant import Participant + + +T = TypeVar("T", bound="GetProjectStatusResponse200") + + +@attr.s(auto_attribs=True) +class GetProjectStatusResponse200: + """ + Attributes: + remote_participants (Union[Unset, List['Participant']]): + participant (Union[Unset, Participant]): Node participating in a project + """ + + remote_participants: Union[Unset, List["Participant"]] = UNSET + participant: Union[Unset, "Participant"] = UNSET + additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + remote_participants: Union[Unset, List[Dict[str, Any]]] = UNSET + if not isinstance(self.remote_participants, Unset): + remote_participants = [] + for remote_participants_item_data in self.remote_participants: + remote_participants_item = remote_participants_item_data.to_dict() + + remote_participants.append(remote_participants_item) + + participant: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.participant, Unset): + participant = self.participant.to_dict() + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update({}) + if remote_participants is not UNSET: + field_dict["remoteParticipants"] = remote_participants + if participant is not UNSET: + field_dict["participant"] = participant + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.participant import Participant + + d = src_dict.copy() + remote_participants = [] + _remote_participants = d.pop("remoteParticipants", UNSET) + for remote_participants_item_data in _remote_participants or []: + remote_participants_item = Participant.from_dict(remote_participants_item_data) + + remote_participants.append(remote_participants_item) + + _participant = d.pop("participant", UNSET) + participant: Union[Unset, Participant] + if isinstance(_participant, Unset): + participant = UNSET + else: + participant = Participant.from_dict(_participant) + + get_project_status_response_200 = cls( + remote_participants=remote_participants, + participant=participant, + ) + + get_project_status_response_200.additional_properties = d + return get_project_status_response_200 + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/src/tuneinsight/api/sdk/models/gwas.py b/src/tuneinsight/api/sdk/models/gwas.py index bd7b2ac..c1c813d 100644 --- a/src/tuneinsight/api/sdk/models/gwas.py +++ b/src/tuneinsight/api/sdk/models/gwas.py @@ -22,31 +22,35 @@ class GWAS: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. matching_params (Union[Unset, MatchingParams]): parameters relevant for matching target_label (Union[Unset, str]): target to use from the clinical datasets variants_organization (Union[Unset, str]): organization holding the variants @@ -55,21 +59,22 @@ class GWAS: """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET matching_params: Union[Unset, "MatchingParams"] = UNSET target_label: Union[Unset, str] = UNSET variants_organization: Union[Unset, str] = UNSET @@ -80,33 +85,34 @@ class GWAS: def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object matching_params: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.matching_params, Unset): matching_params = self.matching_params.to_dict() @@ -128,36 +134,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if matching_params is not UNSET: field_dict["matchingParams"] = matching_params if target_label is not UNSET: @@ -183,7 +191,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -192,15 +200,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -209,22 +222,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -233,6 +241,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + _matching_params = d.pop("matchingParams", UNSET) matching_params: Union[Unset, MatchingParams] if isinstance(_matching_params, Unset): @@ -255,21 +265,22 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: gwas = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, matching_params=matching_params, target_label=target_label, variants_organization=variants_organization, diff --git a/src/tuneinsight/api/sdk/models/hybrid_fl.py b/src/tuneinsight/api/sdk/models/hybrid_fl.py index 2978447..1ef7f32 100644 --- a/src/tuneinsight/api/sdk/models/hybrid_fl.py +++ b/src/tuneinsight/api/sdk/models/hybrid_fl.py @@ -21,52 +21,57 @@ class HybridFL: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. learning_params (Union[Unset, HybridFLLearningParams]): Hyperparameters for the Hybrid Federated Learning task_def (Union[Unset, str]): task_id (Union[Unset, str]): """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET learning_params: Union[Unset, "HybridFLLearningParams"] = UNSET task_def: Union[Unset, str] = UNSET task_id: Union[Unset, str] = UNSET @@ -75,33 +80,34 @@ class HybridFL: def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object learning_params: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.learning_params, Unset): learning_params = self.learning_params.to_dict() @@ -116,36 +122,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if learning_params is not UNSET: field_dict["learningParams"] = learning_params if task_def is not UNSET: @@ -166,7 +174,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -175,15 +183,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -192,22 +205,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -216,6 +224,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + _learning_params = d.pop("learningParams", UNSET) learning_params: Union[Unset, HybridFLLearningParams] if isinstance(_learning_params, Unset): @@ -229,21 +239,22 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: hybrid_fl = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, learning_params=learning_params, task_def=task_def, task_id=task_id, diff --git a/src/tuneinsight/api/sdk/models/hybrid_fl_learning_params.py b/src/tuneinsight/api/sdk/models/hybrid_fl_learning_params.py index 46dd8d1..51ca075 100644 --- a/src/tuneinsight/api/sdk/models/hybrid_fl_learning_params.py +++ b/src/tuneinsight/api/sdk/models/hybrid_fl_learning_params.py @@ -13,98 +13,110 @@ class HybridFLLearningParams: """Hyperparameters for the Hybrid Federated Learning Attributes: + learning_rate (Union[Unset, float]): Learning rate of the optimizer in the python-server + local_epochs (Union[Unset, int]): Number of local epochs of the Hybrid FL between aggregations + batch_size (Union[Unset, int]): Batch size for the training in the python-server + delta (Union[Unset, float]): Delta parameter of the differential privacy in HybridFL epsilon (Union[Unset, float]): Epsilon parameter of the differential privacy in HybridFL fl_rounds (Union[Unset, int]): Number of federated rounds of the Hybrid FL + gradient_clipping (Union[Unset, float]): Gradient clipping to apply for the training and the noise computation momentum (Union[Unset, float]): Momentum of the optimizer in the python-server + num_workers (Union[Unset, int]): Number of workers loading the data for training in the python-server strategy (Union[Unset, AggregationStrategy]): weighting aggregation strategy Default: AggregationStrategy.CONSTANT. - use_clipping_factor (Union[Unset, bool]): If set to true, gradient clipping is adjusted specifically at each - layer Default: True. - batch_size (Union[Unset, int]): Batch size for the training in the python-server - delta (Union[Unset, float]): Delta parameter of the differential privacy in HybridFL + add_noise (Union[Unset, bool]): Whether to add differential privacy or not to the HybridFL Default: True. encrypt_aggregation (Union[Unset, bool]): Whether to to the aggregation encrypted or not in HybridFL Default: True. - gradient_clipping (Union[Unset, float]): Gradient clipping to apply for the training and the noise computation - learning_rate (Union[Unset, float]): Learning rate of the optimizer in the python-server - local_epochs (Union[Unset, int]): Number of local epochs of the Hybrid FL between aggregations - num_workers (Union[Unset, int]): Number of workers loading the data for training in the python-server - add_noise (Union[Unset, bool]): Whether to add differential privacy or not to the HybridFL Default: True. + use_clipping_factor (Union[Unset, bool]): If set to true, gradient clipping is adjusted specifically at each + layer Default: True. """ - epsilon: Union[Unset, float] = UNSET - fl_rounds: Union[Unset, int] = UNSET - momentum: Union[Unset, float] = UNSET - strategy: Union[Unset, AggregationStrategy] = AggregationStrategy.CONSTANT - use_clipping_factor: Union[Unset, bool] = True + learning_rate: Union[Unset, float] = UNSET + local_epochs: Union[Unset, int] = UNSET batch_size: Union[Unset, int] = UNSET delta: Union[Unset, float] = UNSET - encrypt_aggregation: Union[Unset, bool] = True + epsilon: Union[Unset, float] = UNSET + fl_rounds: Union[Unset, int] = UNSET gradient_clipping: Union[Unset, float] = UNSET - learning_rate: Union[Unset, float] = UNSET - local_epochs: Union[Unset, int] = UNSET + momentum: Union[Unset, float] = UNSET num_workers: Union[Unset, int] = UNSET + strategy: Union[Unset, AggregationStrategy] = AggregationStrategy.CONSTANT add_noise: Union[Unset, bool] = True + encrypt_aggregation: Union[Unset, bool] = True + use_clipping_factor: Union[Unset, bool] = True additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + learning_rate = self.learning_rate + local_epochs = self.local_epochs + batch_size = self.batch_size + delta = self.delta epsilon = self.epsilon fl_rounds = self.fl_rounds + gradient_clipping = self.gradient_clipping momentum = self.momentum + num_workers = self.num_workers strategy: Union[Unset, str] = UNSET if not isinstance(self.strategy, Unset): strategy = self.strategy.value - use_clipping_factor = self.use_clipping_factor - batch_size = self.batch_size - delta = self.delta - encrypt_aggregation = self.encrypt_aggregation - gradient_clipping = self.gradient_clipping - learning_rate = self.learning_rate - local_epochs = self.local_epochs - num_workers = self.num_workers add_noise = self.add_noise + encrypt_aggregation = self.encrypt_aggregation + use_clipping_factor = self.use_clipping_factor field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if epsilon is not UNSET: - field_dict["epsilon"] = epsilon - if fl_rounds is not UNSET: - field_dict["flRounds"] = fl_rounds - if momentum is not UNSET: - field_dict["momentum"] = momentum - if strategy is not UNSET: - field_dict["strategy"] = strategy - if use_clipping_factor is not UNSET: - field_dict["useClippingFactor"] = use_clipping_factor + if learning_rate is not UNSET: + field_dict["learningRate"] = learning_rate + if local_epochs is not UNSET: + field_dict["localEpochs"] = local_epochs if batch_size is not UNSET: field_dict["batchSize"] = batch_size if delta is not UNSET: field_dict["delta"] = delta - if encrypt_aggregation is not UNSET: - field_dict["encryptAggregation"] = encrypt_aggregation + if epsilon is not UNSET: + field_dict["epsilon"] = epsilon + if fl_rounds is not UNSET: + field_dict["flRounds"] = fl_rounds if gradient_clipping is not UNSET: field_dict["gradientClipping"] = gradient_clipping - if learning_rate is not UNSET: - field_dict["learningRate"] = learning_rate - if local_epochs is not UNSET: - field_dict["localEpochs"] = local_epochs + if momentum is not UNSET: + field_dict["momentum"] = momentum if num_workers is not UNSET: field_dict["numWorkers"] = num_workers + if strategy is not UNSET: + field_dict["strategy"] = strategy if add_noise is not UNSET: field_dict["addNoise"] = add_noise + if encrypt_aggregation is not UNSET: + field_dict["encryptAggregation"] = encrypt_aggregation + if use_clipping_factor is not UNSET: + field_dict["useClippingFactor"] = use_clipping_factor return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() + learning_rate = d.pop("learningRate", UNSET) + + local_epochs = d.pop("localEpochs", UNSET) + + batch_size = d.pop("batchSize", UNSET) + + delta = d.pop("delta", UNSET) + epsilon = d.pop("epsilon", UNSET) fl_rounds = d.pop("flRounds", UNSET) + gradient_clipping = d.pop("gradientClipping", UNSET) + momentum = d.pop("momentum", UNSET) + num_workers = d.pop("numWorkers", UNSET) + _strategy = d.pop("strategy", UNSET) strategy: Union[Unset, AggregationStrategy] if isinstance(_strategy, Unset): @@ -112,38 +124,26 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: strategy = AggregationStrategy(_strategy) - use_clipping_factor = d.pop("useClippingFactor", UNSET) - - batch_size = d.pop("batchSize", UNSET) - - delta = d.pop("delta", UNSET) + add_noise = d.pop("addNoise", UNSET) encrypt_aggregation = d.pop("encryptAggregation", UNSET) - gradient_clipping = d.pop("gradientClipping", UNSET) - - learning_rate = d.pop("learningRate", UNSET) - - local_epochs = d.pop("localEpochs", UNSET) - - num_workers = d.pop("numWorkers", UNSET) - - add_noise = d.pop("addNoise", UNSET) + use_clipping_factor = d.pop("useClippingFactor", UNSET) hybrid_fl_learning_params = cls( - epsilon=epsilon, - fl_rounds=fl_rounds, - momentum=momentum, - strategy=strategy, - use_clipping_factor=use_clipping_factor, + learning_rate=learning_rate, + local_epochs=local_epochs, batch_size=batch_size, delta=delta, - encrypt_aggregation=encrypt_aggregation, + epsilon=epsilon, + fl_rounds=fl_rounds, gradient_clipping=gradient_clipping, - learning_rate=learning_rate, - local_epochs=local_epochs, + momentum=momentum, num_workers=num_workers, + strategy=strategy, add_noise=add_noise, + encrypt_aggregation=encrypt_aggregation, + use_clipping_factor=use_clipping_factor, ) hybrid_fl_learning_params.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/key_switched_computation.py b/src/tuneinsight/api/sdk/models/key_switched_computation.py index 6068005..b6fee30 100644 --- a/src/tuneinsight/api/sdk/models/key_switched_computation.py +++ b/src/tuneinsight/api/sdk/models/key_switched_computation.py @@ -21,97 +21,103 @@ class KeySwitchedComputation: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. + target_public_key (Union[Unset, str]): Unique identifier of a data object. computation (Union[Unset, ComputationDefinition]): Generic computation. decrypt_results (Union[Unset, bool]): if true, the key-switched results are decrypted using either the specified secret key or the secret key from the session secret_key (Union[Unset, str]): Unique identifier of a data object. - target_public_key (Union[Unset, str]): Unique identifier of a data object. """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET + target_public_key: Union[Unset, str] = UNSET computation: Union[Unset, "ComputationDefinition"] = UNSET decrypt_results: Union[Unset, bool] = UNSET secret_key: Union[Unset, str] = UNSET - target_public_key: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + target_public_key = self.target_public_key computation: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.computation, Unset): computation = self.computation.to_dict() decrypt_results = self.decrypt_results secret_key = self.secret_key - target_public_key = self.target_public_key field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -120,44 +126,46 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object + if target_public_key is not UNSET: + field_dict["targetPublicKey"] = target_public_key if computation is not UNSET: field_dict["computation"] = computation if decrypt_results is not UNSET: field_dict["decryptResults"] = decrypt_results if secret_key is not UNSET: field_dict["secretKey"] = secret_key - if target_public_key is not UNSET: - field_dict["targetPublicKey"] = target_public_key return field_dict @@ -172,7 +180,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -181,15 +189,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -198,22 +211,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -222,6 +230,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + + target_public_key = d.pop("targetPublicKey", UNSET) + _computation = d.pop("computation", UNSET) computation: Union[Unset, ComputationDefinition] if isinstance(_computation, Unset): @@ -233,29 +245,28 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: secret_key = d.pop("secretKey", UNSET) - target_public_key = d.pop("targetPublicKey", UNSET) - key_switched_computation = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, + target_public_key=target_public_key, computation=computation, decrypt_results=decrypt_results, secret_key=secret_key, - target_public_key=target_public_key, ) key_switched_computation.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/logical_formula.py b/src/tuneinsight/api/sdk/models/logical_formula.py index 67725bf..3a8d23f 100644 --- a/src/tuneinsight/api/sdk/models/logical_formula.py +++ b/src/tuneinsight/api/sdk/models/logical_formula.py @@ -17,27 +17,19 @@ class LogicalFormula: """logical formula composing filters Attributes: - right_formula (Union[Unset, LogicalFormula]): logical formula composing filters - single_filter (Union[Unset, Filter]): left_formula (Union[Unset, LogicalFormula]): logical formula composing filters operator (Union[Unset, LogicalFormulaOperator]): + right_formula (Union[Unset, LogicalFormula]): logical formula composing filters + single_filter (Union[Unset, Filter]): """ - right_formula: Union[Unset, "LogicalFormula"] = UNSET - single_filter: Union[Unset, "Filter"] = UNSET left_formula: Union[Unset, "LogicalFormula"] = UNSET operator: Union[Unset, LogicalFormulaOperator] = UNSET + right_formula: Union[Unset, "LogicalFormula"] = UNSET + single_filter: Union[Unset, "Filter"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - right_formula: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.right_formula, Unset): - right_formula = self.right_formula.to_dict() - - single_filter: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.single_filter, Unset): - single_filter = self.single_filter.to_dict() - left_formula: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.left_formula, Unset): left_formula = self.left_formula.to_dict() @@ -46,17 +38,25 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.operator, Unset): operator = self.operator.value + right_formula: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.right_formula, Unset): + right_formula = self.right_formula.to_dict() + + single_filter: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.single_filter, Unset): + single_filter = self.single_filter.to_dict() + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if right_formula is not UNSET: - field_dict["rightFormula"] = right_formula - if single_filter is not UNSET: - field_dict["singleFilter"] = single_filter if left_formula is not UNSET: field_dict["leftFormula"] = left_formula if operator is not UNSET: field_dict["operator"] = operator + if right_formula is not UNSET: + field_dict["rightFormula"] = right_formula + if single_filter is not UNSET: + field_dict["singleFilter"] = single_filter return field_dict @@ -65,20 +65,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.filter_ import Filter d = src_dict.copy() - _right_formula = d.pop("rightFormula", UNSET) - right_formula: Union[Unset, LogicalFormula] - if isinstance(_right_formula, Unset): - right_formula = UNSET - else: - right_formula = LogicalFormula.from_dict(_right_formula) - - _single_filter = d.pop("singleFilter", UNSET) - single_filter: Union[Unset, Filter] - if isinstance(_single_filter, Unset): - single_filter = UNSET - else: - single_filter = Filter.from_dict(_single_filter) - _left_formula = d.pop("leftFormula", UNSET) left_formula: Union[Unset, LogicalFormula] if isinstance(_left_formula, Unset): @@ -93,11 +79,25 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: operator = LogicalFormulaOperator(_operator) + _right_formula = d.pop("rightFormula", UNSET) + right_formula: Union[Unset, LogicalFormula] + if isinstance(_right_formula, Unset): + right_formula = UNSET + else: + right_formula = LogicalFormula.from_dict(_right_formula) + + _single_filter = d.pop("singleFilter", UNSET) + single_filter: Union[Unset, Filter] + if isinstance(_single_filter, Unset): + single_filter = UNSET + else: + single_filter = Filter.from_dict(_single_filter) + logical_formula = cls( - right_formula=right_formula, - single_filter=single_filter, left_formula=left_formula, operator=operator, + right_formula=right_formula, + single_filter=single_filter, ) logical_formula.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/mock_method.py b/src/tuneinsight/api/sdk/models/mock_method.py new file mode 100644 index 0000000..ed1f63a --- /dev/null +++ b/src/tuneinsight/api/sdk/models/mock_method.py @@ -0,0 +1,15 @@ +from enum import Enum + + +class MockMethod(str, Enum): + PATIENTS = "patients" + ALERTS = "alerts" + NEUROLOGY_OBSERVATIONS = "neurology_observations" + GENERIC = "generic" + PRICES = "prices" + PERSONS = "persons" + SKUS = "skus" + CUSTOM_FUNCTION = "custom_function" + + def __str__(self) -> str: + return str(self.value) diff --git a/src/tuneinsight/api/sdk/models/model.py b/src/tuneinsight/api/sdk/models/model.py index 2d8a587..48e5f1a 100644 --- a/src/tuneinsight/api/sdk/models/model.py +++ b/src/tuneinsight/api/sdk/models/model.py @@ -20,79 +20,79 @@ class Model: """Machine learning model metadata definition Attributes: - computation_id (Union[Unset, str]): Computation that created this model if collective model - model_params (Union[Unset, ModelParams]): detailed parameters about the model, only returned when getting - specific model - training_algorithm (Union[Unset, TrainingAlgorithm]): the algorithm used to train the model - type (Union[Unset, ModelType]): whether the model is local (plaintext) or collective (ciphertext) - updated_at (Union[Unset, str]): created_at (Union[Unset, str]): data_object (Union[Unset, DataObject]): A data object definition. + type (Union[Unset, ModelType]): whether the model is local (plaintext) or collective (ciphertext) + updated_at (Union[Unset, str]): + training_algorithm (Union[Unset, TrainingAlgorithm]): the algorithm used to train the model + computation_id (Union[Unset, str]): Computation that created this model if collective model metadata (Union[Unset, ModelMetadata]): public metadata about the model model_id (Union[Unset, str]): Unique identifier of a model. + model_params (Union[Unset, ModelParams]): detailed parameters about the model, only returned when getting + specific model name (Union[Unset, str]): common name for the model """ - computation_id: Union[Unset, str] = UNSET - model_params: Union[Unset, "ModelParams"] = UNSET - training_algorithm: Union[Unset, TrainingAlgorithm] = UNSET - type: Union[Unset, ModelType] = UNSET - updated_at: Union[Unset, str] = UNSET created_at: Union[Unset, str] = UNSET data_object: Union[Unset, "DataObject"] = UNSET + type: Union[Unset, ModelType] = UNSET + updated_at: Union[Unset, str] = UNSET + training_algorithm: Union[Unset, TrainingAlgorithm] = UNSET + computation_id: Union[Unset, str] = UNSET metadata: Union[Unset, "ModelMetadata"] = UNSET model_id: Union[Unset, str] = UNSET + model_params: Union[Unset, "ModelParams"] = UNSET name: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - computation_id = self.computation_id - model_params: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.model_params, Unset): - model_params = self.model_params.to_dict() - - training_algorithm: Union[Unset, str] = UNSET - if not isinstance(self.training_algorithm, Unset): - training_algorithm = self.training_algorithm.value + created_at = self.created_at + data_object: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.data_object, Unset): + data_object = self.data_object.to_dict() type: Union[Unset, str] = UNSET if not isinstance(self.type, Unset): type = self.type.value updated_at = self.updated_at - created_at = self.created_at - data_object: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.data_object, Unset): - data_object = self.data_object.to_dict() + training_algorithm: Union[Unset, str] = UNSET + if not isinstance(self.training_algorithm, Unset): + training_algorithm = self.training_algorithm.value + computation_id = self.computation_id metadata: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.metadata, Unset): metadata = self.metadata.to_dict() model_id = self.model_id + model_params: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.model_params, Unset): + model_params = self.model_params.to_dict() + name = self.name field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if computation_id is not UNSET: - field_dict["computationId"] = computation_id - if model_params is not UNSET: - field_dict["modelParams"] = model_params - if training_algorithm is not UNSET: - field_dict["trainingAlgorithm"] = training_algorithm - if type is not UNSET: - field_dict["type"] = type - if updated_at is not UNSET: - field_dict["updatedAt"] = updated_at if created_at is not UNSET: field_dict["createdAt"] = created_at if data_object is not UNSET: field_dict["dataObject"] = data_object + if type is not UNSET: + field_dict["type"] = type + if updated_at is not UNSET: + field_dict["updatedAt"] = updated_at + if training_algorithm is not UNSET: + field_dict["trainingAlgorithm"] = training_algorithm + if computation_id is not UNSET: + field_dict["computationId"] = computation_id if metadata is not UNSET: field_dict["metadata"] = metadata if model_id is not UNSET: field_dict["modelID"] = model_id + if model_params is not UNSET: + field_dict["modelParams"] = model_params if name is not UNSET: field_dict["name"] = name @@ -105,21 +105,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.model_params import ModelParams d = src_dict.copy() - computation_id = d.pop("computationId", UNSET) - - _model_params = d.pop("modelParams", UNSET) - model_params: Union[Unset, ModelParams] - if isinstance(_model_params, Unset): - model_params = UNSET - else: - model_params = ModelParams.from_dict(_model_params) + created_at = d.pop("createdAt", UNSET) - _training_algorithm = d.pop("trainingAlgorithm", UNSET) - training_algorithm: Union[Unset, TrainingAlgorithm] - if isinstance(_training_algorithm, Unset): - training_algorithm = UNSET + _data_object = d.pop("dataObject", UNSET) + data_object: Union[Unset, DataObject] + if isinstance(_data_object, Unset): + data_object = UNSET else: - training_algorithm = TrainingAlgorithm(_training_algorithm) + data_object = DataObject.from_dict(_data_object) _type = d.pop("type", UNSET) type: Union[Unset, ModelType] @@ -130,14 +123,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: updated_at = d.pop("updatedAt", UNSET) - created_at = d.pop("createdAt", UNSET) - - _data_object = d.pop("dataObject", UNSET) - data_object: Union[Unset, DataObject] - if isinstance(_data_object, Unset): - data_object = UNSET + _training_algorithm = d.pop("trainingAlgorithm", UNSET) + training_algorithm: Union[Unset, TrainingAlgorithm] + if isinstance(_training_algorithm, Unset): + training_algorithm = UNSET else: - data_object = DataObject.from_dict(_data_object) + training_algorithm = TrainingAlgorithm(_training_algorithm) + + computation_id = d.pop("computationId", UNSET) _metadata = d.pop("metadata", UNSET) metadata: Union[Unset, ModelMetadata] @@ -148,18 +141,25 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: model_id = d.pop("modelID", UNSET) + _model_params = d.pop("modelParams", UNSET) + model_params: Union[Unset, ModelParams] + if isinstance(_model_params, Unset): + model_params = UNSET + else: + model_params = ModelParams.from_dict(_model_params) + name = d.pop("name", UNSET) model = cls( - computation_id=computation_id, - model_params=model_params, - training_algorithm=training_algorithm, - type=type, - updated_at=updated_at, created_at=created_at, data_object=data_object, + type=type, + updated_at=updated_at, + training_algorithm=training_algorithm, + computation_id=computation_id, metadata=metadata, model_id=model_id, + model_params=model_params, name=name, ) diff --git a/src/tuneinsight/api/sdk/models/model_definition.py b/src/tuneinsight/api/sdk/models/model_definition.py index 0503cbf..e6ca752 100644 --- a/src/tuneinsight/api/sdk/models/model_definition.py +++ b/src/tuneinsight/api/sdk/models/model_definition.py @@ -17,22 +17,21 @@ class ModelDefinition: """Definition of a model to upload Attributes: - name (str): common name to give to the model prediction_params (PredictionParams): subset of parameters required for only the prediction weights (List[List[float]]): Plaintext weights of the model as a float matrix - metadata (Union[Unset, ModelMetadata]): public metadata about the model + name (str): common name to give to the model project_id (Union[Unset, str]): Unique identifier of a project. + metadata (Union[Unset, ModelMetadata]): public metadata about the model """ - name: str prediction_params: "PredictionParams" weights: List[List[float]] - metadata: Union[Unset, "ModelMetadata"] = UNSET + name: str project_id: Union[Unset, str] = UNSET + metadata: Union[Unset, "ModelMetadata"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - name = self.name prediction_params = self.prediction_params.to_dict() weights = [] @@ -41,25 +40,25 @@ def to_dict(self) -> Dict[str, Any]: weights.append(weights_item) + name = self.name + project_id = self.project_id metadata: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.metadata, Unset): metadata = self.metadata.to_dict() - project_id = self.project_id - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { - "name": name, "predictionParams": prediction_params, "weights": weights, + "name": name, } ) - if metadata is not UNSET: - field_dict["metadata"] = metadata if project_id is not UNSET: field_dict["projectId"] = project_id + if metadata is not UNSET: + field_dict["metadata"] = metadata return field_dict @@ -69,8 +68,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.prediction_params import PredictionParams d = src_dict.copy() - name = d.pop("name") - prediction_params = PredictionParams.from_dict(d.pop("predictionParams")) weights = [] @@ -80,6 +77,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: weights.append(weights_item) + name = d.pop("name") + + project_id = d.pop("projectId", UNSET) + _metadata = d.pop("metadata", UNSET) metadata: Union[Unset, ModelMetadata] if isinstance(_metadata, Unset): @@ -87,14 +88,12 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: metadata = ModelMetadata.from_dict(_metadata) - project_id = d.pop("projectId", UNSET) - model_definition = cls( - name=name, prediction_params=prediction_params, weights=weights, - metadata=metadata, + name=name, project_id=project_id, + metadata=metadata, ) model_definition.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/model_metadata.py b/src/tuneinsight/api/sdk/models/model_metadata.py index 048eb87..a46b302 100644 --- a/src/tuneinsight/api/sdk/models/model_metadata.py +++ b/src/tuneinsight/api/sdk/models/model_metadata.py @@ -12,25 +12,21 @@ class ModelMetadata: """public metadata about the model Attributes: - classes (Union[Unset, List[str]]): optional labels for classes description (Union[Unset, str]): optional description for the model features (Union[Unset, List[str]]): optional labels for features num_classes (Union[Unset, int]): number classes num_features (Union[Unset, int]): number of features + classes (Union[Unset, List[str]]): optional labels for classes """ - classes: Union[Unset, List[str]] = UNSET description: Union[Unset, str] = UNSET features: Union[Unset, List[str]] = UNSET num_classes: Union[Unset, int] = UNSET num_features: Union[Unset, int] = UNSET + classes: Union[Unset, List[str]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - classes: Union[Unset, List[str]] = UNSET - if not isinstance(self.classes, Unset): - classes = self.classes - description = self.description features: Union[Unset, List[str]] = UNSET if not isinstance(self.features, Unset): @@ -38,12 +34,13 @@ def to_dict(self) -> Dict[str, Any]: num_classes = self.num_classes num_features = self.num_features + classes: Union[Unset, List[str]] = UNSET + if not isinstance(self.classes, Unset): + classes = self.classes field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if classes is not UNSET: - field_dict["classes"] = classes if description is not UNSET: field_dict["description"] = description if features is not UNSET: @@ -52,14 +49,14 @@ def to_dict(self) -> Dict[str, Any]: field_dict["numClasses"] = num_classes if num_features is not UNSET: field_dict["numFeatures"] = num_features + if classes is not UNSET: + field_dict["classes"] = classes return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - classes = cast(List[str], d.pop("classes", UNSET)) - description = d.pop("description", UNSET) features = cast(List[str], d.pop("features", UNSET)) @@ -68,12 +65,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: num_features = d.pop("numFeatures", UNSET) + classes = cast(List[str], d.pop("classes", UNSET)) + model_metadata = cls( - classes=classes, description=description, features=features, num_classes=num_classes, num_features=num_features, + classes=classes, ) model_metadata.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/model_params.py b/src/tuneinsight/api/sdk/models/model_params.py index 727fcb0..45fc347 100644 --- a/src/tuneinsight/api/sdk/models/model_params.py +++ b/src/tuneinsight/api/sdk/models/model_params.py @@ -12,38 +12,38 @@ class ModelParams: """detailed parameters about the model, only returned when getting specific model Attributes: - cryptolib_params (Union[Unset, str]): cryptolib.Parameters marshaled and encoded in base64 for client operations prediction_params (Union[Unset, str]): base64 encoded prediction parameters + cryptolib_params (Union[Unset, str]): cryptolib.Parameters marshaled and encoded in base64 for client operations """ - cryptolib_params: Union[Unset, str] = UNSET prediction_params: Union[Unset, str] = UNSET + cryptolib_params: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - cryptolib_params = self.cryptolib_params prediction_params = self.prediction_params + cryptolib_params = self.cryptolib_params field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if cryptolib_params is not UNSET: - field_dict["cryptolibParams"] = cryptolib_params if prediction_params is not UNSET: field_dict["predictionParams"] = prediction_params + if cryptolib_params is not UNSET: + field_dict["cryptolibParams"] = cryptolib_params return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - cryptolib_params = d.pop("cryptolibParams", UNSET) - prediction_params = d.pop("predictionParams", UNSET) + cryptolib_params = d.pop("cryptolibParams", UNSET) + model_params = cls( - cryptolib_params=cryptolib_params, prediction_params=prediction_params, + cryptolib_params=cryptolib_params, ) model_params.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/network.py b/src/tuneinsight/api/sdk/models/network.py index 9bf05b9..999cc73 100644 --- a/src/tuneinsight/api/sdk/models/network.py +++ b/src/tuneinsight/api/sdk/models/network.py @@ -18,22 +18,21 @@ class Network: """Network that represents a set of nodes Attributes: - name (Union[Unset, str]): nodes (Union[Unset, List['Node']]): topology (Union[Unset, Topology]): Network Topologies. 'star' or 'tree'. In star topology all nodes are connected to a central node. In tree topology all nodes are connected and aware of each other. visibility_type (Union[Unset, NetworkVisibilityType]): represents the type of visibility leaf nodes have in a network + name (Union[Unset, str]): """ - name: Union[Unset, str] = UNSET nodes: Union[Unset, List["Node"]] = UNSET topology: Union[Unset, Topology] = UNSET visibility_type: Union[Unset, NetworkVisibilityType] = UNSET + name: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - name = self.name nodes: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.nodes, Unset): nodes = [] @@ -50,17 +49,19 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.visibility_type, Unset): visibility_type = self.visibility_type.value + name = self.name + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if name is not UNSET: - field_dict["name"] = name if nodes is not UNSET: field_dict["nodes"] = nodes if topology is not UNSET: field_dict["topology"] = topology if visibility_type is not UNSET: field_dict["visibilityType"] = visibility_type + if name is not UNSET: + field_dict["name"] = name return field_dict @@ -69,8 +70,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.node import Node d = src_dict.copy() - name = d.pop("name", UNSET) - nodes = [] _nodes = d.pop("nodes", UNSET) for nodes_item_data in _nodes or []: @@ -92,11 +91,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: visibility_type = NetworkVisibilityType(_visibility_type) + name = d.pop("name", UNSET) + network = cls( - name=name, nodes=nodes, topology=topology, visibility_type=visibility_type, + name=name, ) network.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/node.py b/src/tuneinsight/api/sdk/models/node.py index 4de3733..1c0bad2 100644 --- a/src/tuneinsight/api/sdk/models/node.py +++ b/src/tuneinsight/api/sdk/models/node.py @@ -16,63 +16,63 @@ class Node: """Node or agent of the network Attributes: + has_user_management (Union[Unset, bool]): True if the node has the user management APIs enabled. is_contributor (Union[Unset, bool]): Indicates if this instance does contribute data. + url (Union[Unset, str]): name (Union[Unset, str]): organization (Union[Unset, Organization]): Organization taking part in a project - url (Union[Unset, str]): - certificate (Union[Unset, str]): Certificate of the node, in base64-encoded DER format. - has_user_management (Union[Unset, bool]): True if the node has the user management APIs enabled. - is_root (Union[Unset, bool]): True if the node is the root node in a tree topology network. api_path (Union[Unset, str]): + certificate (Union[Unset, str]): Certificate of the node, in base64-encoded DER format. current (Union[Unset, bool]): True if this node is the current one (root node). + is_root (Union[Unset, bool]): True if the node is the root node in a tree topology network. """ + has_user_management: Union[Unset, bool] = UNSET is_contributor: Union[Unset, bool] = UNSET + url: Union[Unset, str] = UNSET name: Union[Unset, str] = UNSET organization: Union[Unset, "Organization"] = UNSET - url: Union[Unset, str] = UNSET - certificate: Union[Unset, str] = UNSET - has_user_management: Union[Unset, bool] = UNSET - is_root: Union[Unset, bool] = UNSET api_path: Union[Unset, str] = UNSET + certificate: Union[Unset, str] = UNSET current: Union[Unset, bool] = UNSET + is_root: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + has_user_management = self.has_user_management is_contributor = self.is_contributor + url = self.url name = self.name organization: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.organization, Unset): organization = self.organization.to_dict() - url = self.url - certificate = self.certificate - has_user_management = self.has_user_management - is_root = self.is_root api_path = self.api_path + certificate = self.certificate current = self.current + is_root = self.is_root field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if has_user_management is not UNSET: + field_dict["hasUserManagement"] = has_user_management if is_contributor is not UNSET: field_dict["isContributor"] = is_contributor + if url is not UNSET: + field_dict["url"] = url if name is not UNSET: field_dict["name"] = name if organization is not UNSET: field_dict["organization"] = organization - if url is not UNSET: - field_dict["url"] = url - if certificate is not UNSET: - field_dict["certificate"] = certificate - if has_user_management is not UNSET: - field_dict["hasUserManagement"] = has_user_management - if is_root is not UNSET: - field_dict["isRoot"] = is_root if api_path is not UNSET: field_dict["apiPath"] = api_path + if certificate is not UNSET: + field_dict["certificate"] = certificate if current is not UNSET: field_dict["current"] = current + if is_root is not UNSET: + field_dict["isRoot"] = is_root return field_dict @@ -81,8 +81,12 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.organization import Organization d = src_dict.copy() + has_user_management = d.pop("hasUserManagement", UNSET) + is_contributor = d.pop("isContributor", UNSET) + url = d.pop("url", UNSET) + name = d.pop("name", UNSET) _organization = d.pop("organization", UNSET) @@ -92,28 +96,24 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: organization = Organization.from_dict(_organization) - url = d.pop("url", UNSET) + api_path = d.pop("apiPath", UNSET) certificate = d.pop("certificate", UNSET) - has_user_management = d.pop("hasUserManagement", UNSET) + current = d.pop("current", UNSET) is_root = d.pop("isRoot", UNSET) - api_path = d.pop("apiPath", UNSET) - - current = d.pop("current", UNSET) - node = cls( + has_user_management=has_user_management, is_contributor=is_contributor, + url=url, name=name, organization=organization, - url=url, - certificate=certificate, - has_user_management=has_user_management, - is_root=is_root, api_path=api_path, + certificate=certificate, current=current, + is_root=is_root, ) node.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/node_status.py b/src/tuneinsight/api/sdk/models/node_status.py index 372a180..101856d 100644 --- a/src/tuneinsight/api/sdk/models/node_status.py +++ b/src/tuneinsight/api/sdk/models/node_status.py @@ -12,46 +12,46 @@ class NodeStatus: """Network Status of a node Attributes: + node (Union[Unset, str]): URL of the node status (Union[Unset, str]): Status (ok/nok) version (Union[Unset, str]): Version of the node - node (Union[Unset, str]): URL of the node """ + node: Union[Unset, str] = UNSET status: Union[Unset, str] = UNSET version: Union[Unset, str] = UNSET - node: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + node = self.node status = self.status version = self.version - node = self.node field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if node is not UNSET: + field_dict["node"] = node if status is not UNSET: field_dict["status"] = status if version is not UNSET: field_dict["version"] = version - if node is not UNSET: - field_dict["node"] = node return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() + node = d.pop("node", UNSET) + status = d.pop("status", UNSET) version = d.pop("version", UNSET) - node = d.pop("node", UNSET) - node_status = cls( + node=node, status=status, version=version, - node=node, ) node_status.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/noise_parameters.py b/src/tuneinsight/api/sdk/models/noise_parameters.py index 2234670..7799e7e 100644 --- a/src/tuneinsight/api/sdk/models/noise_parameters.py +++ b/src/tuneinsight/api/sdk/models/noise_parameters.py @@ -12,54 +12,54 @@ class NoiseParameters: """parameters for adding differential privacy noise to the computation's encrypted output Attributes: - delta (Union[Unset, float]): probability of privacy leakage Default: 0.0001. - discrete (Union[Unset, bool]): whether to sample discrete noise or not Default: True. epsilon (Union[Unset, float]): the privacy budget Default: 0.2. sensitivity (Union[Unset, float]): sensitivity of the function applied Default: 1.0. + delta (Union[Unset, float]): probability of privacy leakage Default: 0.0001. + discrete (Union[Unset, bool]): whether to sample discrete noise or not Default: True. """ - delta: Union[Unset, float] = 0.0001 - discrete: Union[Unset, bool] = True epsilon: Union[Unset, float] = 0.2 sensitivity: Union[Unset, float] = 1.0 + delta: Union[Unset, float] = 0.0001 + discrete: Union[Unset, bool] = True additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - delta = self.delta - discrete = self.discrete epsilon = self.epsilon sensitivity = self.sensitivity + delta = self.delta + discrete = self.discrete field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if delta is not UNSET: - field_dict["delta"] = delta - if discrete is not UNSET: - field_dict["discrete"] = discrete if epsilon is not UNSET: field_dict["epsilon"] = epsilon if sensitivity is not UNSET: field_dict["sensitivity"] = sensitivity + if delta is not UNSET: + field_dict["delta"] = delta + if discrete is not UNSET: + field_dict["discrete"] = discrete return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - delta = d.pop("delta", UNSET) - - discrete = d.pop("discrete", UNSET) - epsilon = d.pop("epsilon", UNSET) sensitivity = d.pop("sensitivity", UNSET) + delta = d.pop("delta", UNSET) + + discrete = d.pop("discrete", UNSET) + noise_parameters = cls( - delta=delta, - discrete=discrete, epsilon=epsilon, sensitivity=sensitivity, + delta=delta, + discrete=discrete, ) noise_parameters.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/organization.py b/src/tuneinsight/api/sdk/models/organization.py index b18f92a..91c6eed 100644 --- a/src/tuneinsight/api/sdk/models/organization.py +++ b/src/tuneinsight/api/sdk/models/organization.py @@ -17,25 +17,23 @@ class Organization: """Organization taking part in a project Attributes: - group (Union[Unset, str]): Name of the corresponding keycloak group - name (Union[Unset, str]): Name of the organization authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project coordinates (Union[Unset, OrganizationCoordinates]): Coordinates of the organization. (Decimal degrees, WGS84) country (Union[Unset, str]): Country code of the organization. (Lower case two-letter ISO 3166-1 alpha-2) data_officer (Union[Unset, str]): Name of the data officer in charge in the organization + group (Union[Unset, str]): Name of the corresponding keycloak group + name (Union[Unset, str]): Name of the organization """ - group: Union[Unset, str] = UNSET - name: Union[Unset, str] = UNSET authorization_status: Union[Unset, AuthorizationStatus] = UNSET coordinates: Union[Unset, "OrganizationCoordinates"] = UNSET country: Union[Unset, str] = UNSET data_officer: Union[Unset, str] = UNSET + group: Union[Unset, str] = UNSET + name: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - group = self.group - name = self.name authorization_status: Union[Unset, str] = UNSET if not isinstance(self.authorization_status, Unset): authorization_status = self.authorization_status.value @@ -46,14 +44,12 @@ def to_dict(self) -> Dict[str, Any]: country = self.country data_officer = self.data_officer + group = self.group + name = self.name field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if group is not UNSET: - field_dict["group"] = group - if name is not UNSET: - field_dict["name"] = name if authorization_status is not UNSET: field_dict["authorizationStatus"] = authorization_status if coordinates is not UNSET: @@ -62,6 +58,10 @@ def to_dict(self) -> Dict[str, Any]: field_dict["country"] = country if data_officer is not UNSET: field_dict["dataOfficer"] = data_officer + if group is not UNSET: + field_dict["group"] = group + if name is not UNSET: + field_dict["name"] = name return field_dict @@ -70,10 +70,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.organization_coordinates import OrganizationCoordinates d = src_dict.copy() - group = d.pop("group", UNSET) - - name = d.pop("name", UNSET) - _authorization_status = d.pop("authorizationStatus", UNSET) authorization_status: Union[Unset, AuthorizationStatus] if isinstance(_authorization_status, Unset): @@ -92,13 +88,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: data_officer = d.pop("dataOfficer", UNSET) + group = d.pop("group", UNSET) + + name = d.pop("name", UNSET) + organization = cls( - group=group, - name=name, authorization_status=authorization_status, coordinates=coordinates, country=country, data_officer=data_officer, + group=group, + name=name, ) organization.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/organization_coordinates.py b/src/tuneinsight/api/sdk/models/organization_coordinates.py index 0da918f..d640851 100644 --- a/src/tuneinsight/api/sdk/models/organization_coordinates.py +++ b/src/tuneinsight/api/sdk/models/organization_coordinates.py @@ -12,38 +12,38 @@ class OrganizationCoordinates: """Coordinates of the organization. (Decimal degrees, WGS84) Attributes: - latitude (Union[Unset, float]): longitude (Union[Unset, float]): + latitude (Union[Unset, float]): """ - latitude: Union[Unset, float] = UNSET longitude: Union[Unset, float] = UNSET + latitude: Union[Unset, float] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - latitude = self.latitude longitude = self.longitude + latitude = self.latitude field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if latitude is not UNSET: - field_dict["latitude"] = latitude if longitude is not UNSET: field_dict["longitude"] = longitude + if latitude is not UNSET: + field_dict["latitude"] = latitude return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - latitude = d.pop("latitude", UNSET) - longitude = d.pop("longitude", UNSET) + latitude = d.pop("latitude", UNSET) + organization_coordinates = cls( - latitude=latitude, longitude=longitude, + latitude=latitude, ) organization_coordinates.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/paginated_result.py b/src/tuneinsight/api/sdk/models/paginated_result.py index 7ad02b0..15e8f1a 100644 --- a/src/tuneinsight/api/sdk/models/paginated_result.py +++ b/src/tuneinsight/api/sdk/models/paginated_result.py @@ -11,40 +11,41 @@ class PaginatedResult: """ Attributes: - total (Union[Unset, int]): items (Union[Unset, List[Any]]): + total (Union[Unset, int]): """ - total: Union[Unset, int] = UNSET items: Union[Unset, List[Any]] = UNSET + total: Union[Unset, int] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - total = self.total items: Union[Unset, List[Any]] = UNSET if not isinstance(self.items, Unset): items = self.items + total = self.total + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if total is not UNSET: - field_dict["total"] = total if items is not UNSET: field_dict["items"] = items + if total is not UNSET: + field_dict["total"] = total return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - total = d.pop("total", UNSET) - items = cast(List[Any], d.pop("items", UNSET)) + total = d.pop("total", UNSET) + paginated_result = cls( - total=total, items=items, + total=total, ) paginated_result.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/participant.py b/src/tuneinsight/api/sdk/models/participant.py index 23fb228..7fb917d 100644 --- a/src/tuneinsight/api/sdk/models/participant.py +++ b/src/tuneinsight/api/sdk/models/participant.py @@ -3,6 +3,7 @@ import attr from ..models.authorization_status import AuthorizationStatus +from ..models.participation_status import ParticipationStatus from ..models.project_status import ProjectStatus from ..types import UNSET, Unset @@ -19,21 +20,27 @@ class Participant: """Node participating in a project Attributes: + authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project input_metadata (Union[Unset, DataSourceMetadata]): metadata about a datasource is_contributor (Union[Unset, None, bool]): node (Union[Unset, Node]): Node or agent of the network + participation_status (Union[Unset, ParticipationStatus]): participation state of a project's participant status (Union[Unset, ProjectStatus]): Stages of a project workflow - authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project """ + authorization_status: Union[Unset, AuthorizationStatus] = UNSET input_metadata: Union[Unset, "DataSourceMetadata"] = UNSET is_contributor: Union[Unset, None, bool] = UNSET node: Union[Unset, "Node"] = UNSET + participation_status: Union[Unset, ParticipationStatus] = UNSET status: Union[Unset, ProjectStatus] = UNSET - authorization_status: Union[Unset, AuthorizationStatus] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + authorization_status: Union[Unset, str] = UNSET + if not isinstance(self.authorization_status, Unset): + authorization_status = self.authorization_status.value + input_metadata: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.input_metadata, Unset): input_metadata = self.input_metadata.to_dict() @@ -43,27 +50,29 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.node, Unset): node = self.node.to_dict() + participation_status: Union[Unset, str] = UNSET + if not isinstance(self.participation_status, Unset): + participation_status = self.participation_status.value + status: Union[Unset, str] = UNSET if not isinstance(self.status, Unset): status = self.status.value - authorization_status: Union[Unset, str] = UNSET - if not isinstance(self.authorization_status, Unset): - authorization_status = self.authorization_status.value - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if authorization_status is not UNSET: + field_dict["authorizationStatus"] = authorization_status if input_metadata is not UNSET: field_dict["inputMetadata"] = input_metadata if is_contributor is not UNSET: field_dict["isContributor"] = is_contributor if node is not UNSET: field_dict["node"] = node + if participation_status is not UNSET: + field_dict["participationStatus"] = participation_status if status is not UNSET: field_dict["status"] = status - if authorization_status is not UNSET: - field_dict["authorizationStatus"] = authorization_status return field_dict @@ -73,6 +82,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.node import Node d = src_dict.copy() + _authorization_status = d.pop("authorizationStatus", UNSET) + authorization_status: Union[Unset, AuthorizationStatus] + if isinstance(_authorization_status, Unset): + authorization_status = UNSET + else: + authorization_status = AuthorizationStatus(_authorization_status) + _input_metadata = d.pop("inputMetadata", UNSET) input_metadata: Union[Unset, DataSourceMetadata] if isinstance(_input_metadata, Unset): @@ -89,6 +105,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: node = Node.from_dict(_node) + _participation_status = d.pop("participationStatus", UNSET) + participation_status: Union[Unset, ParticipationStatus] + if isinstance(_participation_status, Unset): + participation_status = UNSET + else: + participation_status = ParticipationStatus(_participation_status) + _status = d.pop("status", UNSET) status: Union[Unset, ProjectStatus] if isinstance(_status, Unset): @@ -96,19 +119,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: status = ProjectStatus(_status) - _authorization_status = d.pop("authorizationStatus", UNSET) - authorization_status: Union[Unset, AuthorizationStatus] - if isinstance(_authorization_status, Unset): - authorization_status = UNSET - else: - authorization_status = AuthorizationStatus(_authorization_status) - participant = cls( + authorization_status=authorization_status, input_metadata=input_metadata, is_contributor=is_contributor, node=node, + participation_status=participation_status, status=status, - authorization_status=authorization_status, ) participant.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/participation_status.py b/src/tuneinsight/api/sdk/models/participation_status.py new file mode 100644 index 0000000..5bc02d8 --- /dev/null +++ b/src/tuneinsight/api/sdk/models/participation_status.py @@ -0,0 +1,10 @@ +from enum import Enum + + +class ParticipationStatus(str, Enum): + PARTICIPATING = "participating" + PENDING = "pending" + UNAVAILABLE = "unavailable" + + def __str__(self) -> str: + return str(self.value) diff --git a/src/tuneinsight/api/sdk/models/post_data_object_json_body.py b/src/tuneinsight/api/sdk/models/post_data_object_json_body.py index 8fd9261..1833ea6 100644 --- a/src/tuneinsight/api/sdk/models/post_data_object_json_body.py +++ b/src/tuneinsight/api/sdk/models/post_data_object_json_body.py @@ -18,110 +18,110 @@ class PostDataObjectJsonBody: """ Attributes: - data_object_id (Union[Unset, str]): Unique identifier of a data object. - project_id (Union[Unset, str]): Unique identifier of a project. columns (Union[Unset, List[str]]): - data_source_id (Union[Unset, str]): Data source adapting into data object - encrypted (Union[Unset, bool]): indicator whether or not the uploaded dataobject is encrypted + data_object_id (Union[Unset, str]): Unique identifier of a data object. + shared (Union[Unset, bool]): whether the dataobject is meant to be used as a collective input + visibility_status (Union[Unset, DataObjectVisibilityStatus]): type of visibility set to the dataobject + data_object_shared_id (Union[Unset, str]): Shared identifier of a data object. json_path (Union[Unset, str]): - key_info (Union[Unset, KeyInfo]): information about keys + private_key (Union[Unset, str]): Unique identifier of a data object. public_key (Union[Unset, str]): Unique identifier of a data object. + query (Union[Unset, str]): + session_id (Union[Unset, str]): Unique identifier of a session type (Union[Unset, DataObjectType]): type of the dataobject - data_object_shared_id (Union[Unset, str]): Shared identifier of a data object. + encrypted (Union[Unset, bool]): indicator whether or not the uploaded dataobject is encrypted method (Union[Unset, DataObjectCreationMethod]): Method of creation: from a data source or by encrypting/decrypting a data object, or simply create a new one - private_key (Union[Unset, str]): Unique identifier of a data object. - session_id (Union[Unset, str]): Unique identifier of a session - shared (Union[Unset, bool]): whether the dataobject is meant to be used as a collective input - query (Union[Unset, str]): - visibility_status (Union[Unset, DataObjectVisibilityStatus]): type of visibility set to the dataobject + project_id (Union[Unset, str]): Unique identifier of a project. + data_source_id (Union[Unset, str]): Data source adapting into data object + key_info (Union[Unset, KeyInfo]): information about keys """ - data_object_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET columns: Union[Unset, List[str]] = UNSET - data_source_id: Union[Unset, str] = UNSET - encrypted: Union[Unset, bool] = UNSET + data_object_id: Union[Unset, str] = UNSET + shared: Union[Unset, bool] = UNSET + visibility_status: Union[Unset, DataObjectVisibilityStatus] = UNSET + data_object_shared_id: Union[Unset, str] = UNSET json_path: Union[Unset, str] = UNSET - key_info: Union[Unset, "KeyInfo"] = UNSET + private_key: Union[Unset, str] = UNSET public_key: Union[Unset, str] = UNSET + query: Union[Unset, str] = UNSET + session_id: Union[Unset, str] = UNSET type: Union[Unset, DataObjectType] = UNSET - data_object_shared_id: Union[Unset, str] = UNSET + encrypted: Union[Unset, bool] = UNSET method: Union[Unset, DataObjectCreationMethod] = UNSET - private_key: Union[Unset, str] = UNSET - session_id: Union[Unset, str] = UNSET - shared: Union[Unset, bool] = UNSET - query: Union[Unset, str] = UNSET - visibility_status: Union[Unset, DataObjectVisibilityStatus] = UNSET + project_id: Union[Unset, str] = UNSET + data_source_id: Union[Unset, str] = UNSET + key_info: Union[Unset, "KeyInfo"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - data_object_id = self.data_object_id - project_id = self.project_id columns: Union[Unset, List[str]] = UNSET if not isinstance(self.columns, Unset): columns = self.columns - data_source_id = self.data_source_id - encrypted = self.encrypted - json_path = self.json_path - key_info: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.key_info, Unset): - key_info = self.key_info.to_dict() + data_object_id = self.data_object_id + shared = self.shared + visibility_status: Union[Unset, str] = UNSET + if not isinstance(self.visibility_status, Unset): + visibility_status = self.visibility_status.value + data_object_shared_id = self.data_object_shared_id + json_path = self.json_path + private_key = self.private_key public_key = self.public_key + query = self.query + session_id = self.session_id type: Union[Unset, str] = UNSET if not isinstance(self.type, Unset): type = self.type.value - data_object_shared_id = self.data_object_shared_id + encrypted = self.encrypted method: Union[Unset, str] = UNSET if not isinstance(self.method, Unset): method = self.method.value - private_key = self.private_key - session_id = self.session_id - shared = self.shared - query = self.query - visibility_status: Union[Unset, str] = UNSET - if not isinstance(self.visibility_status, Unset): - visibility_status = self.visibility_status.value + project_id = self.project_id + data_source_id = self.data_source_id + key_info: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.key_info, Unset): + key_info = self.key_info.to_dict() field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if data_object_id is not UNSET: - field_dict["dataObjectId"] = data_object_id - if project_id is not UNSET: - field_dict["projectId"] = project_id if columns is not UNSET: field_dict["columns"] = columns - if data_source_id is not UNSET: - field_dict["dataSourceId"] = data_source_id - if encrypted is not UNSET: - field_dict["encrypted"] = encrypted + if data_object_id is not UNSET: + field_dict["dataObjectId"] = data_object_id + if shared is not UNSET: + field_dict["shared"] = shared + if visibility_status is not UNSET: + field_dict["visibilityStatus"] = visibility_status + if data_object_shared_id is not UNSET: + field_dict["dataObjectSharedId"] = data_object_shared_id if json_path is not UNSET: field_dict["jsonPath"] = json_path - if key_info is not UNSET: - field_dict["keyInfo"] = key_info + if private_key is not UNSET: + field_dict["privateKey"] = private_key if public_key is not UNSET: field_dict["publicKey"] = public_key + if query is not UNSET: + field_dict["query"] = query + if session_id is not UNSET: + field_dict["sessionId"] = session_id if type is not UNSET: field_dict["type"] = type - if data_object_shared_id is not UNSET: - field_dict["dataObjectSharedId"] = data_object_shared_id + if encrypted is not UNSET: + field_dict["encrypted"] = encrypted if method is not UNSET: field_dict["method"] = method - if private_key is not UNSET: - field_dict["privateKey"] = private_key - if session_id is not UNSET: - field_dict["sessionId"] = session_id - if shared is not UNSET: - field_dict["shared"] = shared - if query is not UNSET: - field_dict["query"] = query - if visibility_status is not UNSET: - field_dict["visibilityStatus"] = visibility_status + if project_id is not UNSET: + field_dict["projectId"] = project_id + if data_source_id is not UNSET: + field_dict["dataSourceId"] = data_source_id + if key_info is not UNSET: + field_dict["keyInfo"] = key_info return field_dict @@ -130,27 +130,31 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.key_info import KeyInfo d = src_dict.copy() - data_object_id = d.pop("dataObjectId", UNSET) + columns = cast(List[str], d.pop("columns", UNSET)) - project_id = d.pop("projectId", UNSET) + data_object_id = d.pop("dataObjectId", UNSET) - columns = cast(List[str], d.pop("columns", UNSET)) + shared = d.pop("shared", UNSET) - data_source_id = d.pop("dataSourceId", UNSET) + _visibility_status = d.pop("visibilityStatus", UNSET) + visibility_status: Union[Unset, DataObjectVisibilityStatus] + if isinstance(_visibility_status, Unset): + visibility_status = UNSET + else: + visibility_status = DataObjectVisibilityStatus(_visibility_status) - encrypted = d.pop("encrypted", UNSET) + data_object_shared_id = d.pop("dataObjectSharedId", UNSET) json_path = d.pop("jsonPath", UNSET) - _key_info = d.pop("keyInfo", UNSET) - key_info: Union[Unset, KeyInfo] - if isinstance(_key_info, Unset): - key_info = UNSET - else: - key_info = KeyInfo.from_dict(_key_info) + private_key = d.pop("privateKey", UNSET) public_key = d.pop("publicKey", UNSET) + query = d.pop("query", UNSET) + + session_id = d.pop("sessionId", UNSET) + _type = d.pop("type", UNSET) type: Union[Unset, DataObjectType] if isinstance(_type, Unset): @@ -158,7 +162,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: type = DataObjectType(_type) - data_object_shared_id = d.pop("dataObjectSharedId", UNSET) + encrypted = d.pop("encrypted", UNSET) _method = d.pop("method", UNSET) method: Union[Unset, DataObjectCreationMethod] @@ -167,38 +171,34 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: method = DataObjectCreationMethod(_method) - private_key = d.pop("privateKey", UNSET) - - session_id = d.pop("sessionId", UNSET) - - shared = d.pop("shared", UNSET) + project_id = d.pop("projectId", UNSET) - query = d.pop("query", UNSET) + data_source_id = d.pop("dataSourceId", UNSET) - _visibility_status = d.pop("visibilityStatus", UNSET) - visibility_status: Union[Unset, DataObjectVisibilityStatus] - if isinstance(_visibility_status, Unset): - visibility_status = UNSET + _key_info = d.pop("keyInfo", UNSET) + key_info: Union[Unset, KeyInfo] + if isinstance(_key_info, Unset): + key_info = UNSET else: - visibility_status = DataObjectVisibilityStatus(_visibility_status) + key_info = KeyInfo.from_dict(_key_info) post_data_object_json_body = cls( - data_object_id=data_object_id, - project_id=project_id, columns=columns, - data_source_id=data_source_id, - encrypted=encrypted, + data_object_id=data_object_id, + shared=shared, + visibility_status=visibility_status, + data_object_shared_id=data_object_shared_id, json_path=json_path, - key_info=key_info, + private_key=private_key, public_key=public_key, + query=query, + session_id=session_id, type=type, - data_object_shared_id=data_object_shared_id, + encrypted=encrypted, method=method, - private_key=private_key, - session_id=session_id, - shared=shared, - query=query, - visibility_status=visibility_status, + project_id=project_id, + data_source_id=data_source_id, + key_info=key_info, ) post_data_object_json_body.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/post_data_source_query_json_body.py b/src/tuneinsight/api/sdk/models/post_data_source_query_json_body.py index 988b70b..b19ac03 100644 --- a/src/tuneinsight/api/sdk/models/post_data_source_query_json_body.py +++ b/src/tuneinsight/api/sdk/models/post_data_source_query_json_body.py @@ -18,47 +18,48 @@ class PostDataSourceQueryJsonBody: """ Attributes: + operation (Union[Unset, str]): + output_data_objects_shared_i_ds (Union[Unset, PostDataSourceQueryJsonBodyOutputDataObjectsSharedIDs]): Map with + of key/value pairs containing the shared IDs of the output data objects for the requested operation. parameters (Union[Unset, PostDataSourceQueryJsonBodyParameters]): Parameters for the requested operation. target_public_key (Union[Unset, str]): If specified, b64 encoded public key to encrypt the data objects with. target_public_key_id (Union[Unset, str]): If specified, id of the dataobject of thew public key to encrypt the data objects with. - operation (Union[Unset, str]): - output_data_objects_shared_i_ds (Union[Unset, PostDataSourceQueryJsonBodyOutputDataObjectsSharedIDs]): Map with - of key/value pairs containing the shared IDs of the output data objects for the requested operation. """ + operation: Union[Unset, str] = UNSET + output_data_objects_shared_i_ds: Union[Unset, "PostDataSourceQueryJsonBodyOutputDataObjectsSharedIDs"] = UNSET parameters: Union[Unset, "PostDataSourceQueryJsonBodyParameters"] = UNSET target_public_key: Union[Unset, str] = UNSET target_public_key_id: Union[Unset, str] = UNSET - operation: Union[Unset, str] = UNSET - output_data_objects_shared_i_ds: Union[Unset, "PostDataSourceQueryJsonBodyOutputDataObjectsSharedIDs"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + operation = self.operation + output_data_objects_shared_i_ds: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.output_data_objects_shared_i_ds, Unset): + output_data_objects_shared_i_ds = self.output_data_objects_shared_i_ds.to_dict() + parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.parameters, Unset): parameters = self.parameters.to_dict() target_public_key = self.target_public_key target_public_key_id = self.target_public_key_id - operation = self.operation - output_data_objects_shared_i_ds: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.output_data_objects_shared_i_ds, Unset): - output_data_objects_shared_i_ds = self.output_data_objects_shared_i_ds.to_dict() field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if operation is not UNSET: + field_dict["operation"] = operation + if output_data_objects_shared_i_ds is not UNSET: + field_dict["outputDataObjectsSharedIDs"] = output_data_objects_shared_i_ds if parameters is not UNSET: field_dict["parameters"] = parameters if target_public_key is not UNSET: field_dict["targetPublicKey"] = target_public_key if target_public_key_id is not UNSET: field_dict["targetPublicKeyId"] = target_public_key_id - if operation is not UNSET: - field_dict["operation"] = operation - if output_data_objects_shared_i_ds is not UNSET: - field_dict["outputDataObjectsSharedIDs"] = output_data_objects_shared_i_ds return field_dict @@ -70,17 +71,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.post_data_source_query_json_body_parameters import PostDataSourceQueryJsonBodyParameters d = src_dict.copy() - _parameters = d.pop("parameters", UNSET) - parameters: Union[Unset, PostDataSourceQueryJsonBodyParameters] - if isinstance(_parameters, Unset): - parameters = UNSET - else: - parameters = PostDataSourceQueryJsonBodyParameters.from_dict(_parameters) - - target_public_key = d.pop("targetPublicKey", UNSET) - - target_public_key_id = d.pop("targetPublicKeyId", UNSET) - operation = d.pop("operation", UNSET) _output_data_objects_shared_i_ds = d.pop("outputDataObjectsSharedIDs", UNSET) @@ -92,12 +82,23 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: _output_data_objects_shared_i_ds ) + _parameters = d.pop("parameters", UNSET) + parameters: Union[Unset, PostDataSourceQueryJsonBodyParameters] + if isinstance(_parameters, Unset): + parameters = UNSET + else: + parameters = PostDataSourceQueryJsonBodyParameters.from_dict(_parameters) + + target_public_key = d.pop("targetPublicKey", UNSET) + + target_public_key_id = d.pop("targetPublicKeyId", UNSET) + post_data_source_query_json_body = cls( + operation=operation, + output_data_objects_shared_i_ds=output_data_objects_shared_i_ds, parameters=parameters, target_public_key=target_public_key, target_public_key_id=target_public_key_id, - operation=operation, - output_data_objects_shared_i_ds=output_data_objects_shared_i_ds, ) post_data_source_query_json_body.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/post_mock_dataset_method.py b/src/tuneinsight/api/sdk/models/post_mock_dataset_method.py new file mode 100644 index 0000000..3503569 --- /dev/null +++ b/src/tuneinsight/api/sdk/models/post_mock_dataset_method.py @@ -0,0 +1,15 @@ +from enum import Enum + + +class PostMockDatasetMethod(str, Enum): + PATIENTS = "patients" + ALERTS = "alerts" + NEUROLOGY_OBSERVATIONS = "neurology_observations" + GENERIC = "generic" + PRICES = "prices" + PERSONS = "persons" + SKUS = "skus" + CUSTOM_FUNCTION = "custom_function" + + def __str__(self) -> str: + return str(self.value) diff --git a/src/tuneinsight/api/sdk/models/post_project_data_source_query_json_body.py b/src/tuneinsight/api/sdk/models/post_project_data_source_query_json_body.py index cd08013..7c3ec5f 100644 --- a/src/tuneinsight/api/sdk/models/post_project_data_source_query_json_body.py +++ b/src/tuneinsight/api/sdk/models/post_project_data_source_query_json_body.py @@ -23,79 +23,79 @@ class PostProjectDataSourceQueryJsonBody: """ Attributes: - output_data_objects_names (Union[Unset, List[str]]): (Only for client) List of output data object names for the - requested operation. It should match the specific data source requirements. (e.g. ["count", "patientList"]) - parameters (Union[Unset, PostProjectDataSourceQueryJsonBodyParameters]): Parameters for the requested operation. - target_public_key (Union[Unset, str]): if provided, the results are key switched to this public key (should be - encoded in base64 from its bytes representation) and the resulting ciphertables returned target_public_key_id (Union[Unset, str]): If specified, id of the dataobject of thew public key to encrypt the data objects with. aggregation_type (Union[Unset, PostProjectDataSourceQueryJsonBodyAggregationType]): Requests if and how results should be aggregated across the nodes broadcast (Union[Unset, bool]): Temporary field. Always set to false. Only used for server-server communication - operation (Union[Unset, str]): Is the string describing the type of operation to run in the data source output_data_objects_shared_i_ds (Union[Unset, PostProjectDataSourceQueryJsonBodyOutputDataObjectsSharedIDs]): (Only for node-to-node) Map with of key/value pairs containing the shared IDs of the output data objects for the requested operation. + parameters (Union[Unset, PostProjectDataSourceQueryJsonBodyParameters]): Parameters for the requested operation. + target_public_key (Union[Unset, str]): if provided, the results are key switched to this public key (should be + encoded in base64 from its bytes representation) and the resulting ciphertables returned + operation (Union[Unset, str]): Is the string describing the type of operation to run in the data source + output_data_objects_names (Union[Unset, List[str]]): (Only for client) List of output data object names for the + requested operation. It should match the specific data source requirements. (e.g. ["count", "patientList"]) wait (Union[Unset, bool]): If true, the request will wait for the result (synchronous). If false, the request will return immediately with a query id (asynchronous). Default: True. """ - output_data_objects_names: Union[Unset, List[str]] = UNSET - parameters: Union[Unset, "PostProjectDataSourceQueryJsonBodyParameters"] = UNSET - target_public_key: Union[Unset, str] = UNSET target_public_key_id: Union[Unset, str] = UNSET aggregation_type: Union[Unset, PostProjectDataSourceQueryJsonBodyAggregationType] = UNSET broadcast: Union[Unset, bool] = UNSET - operation: Union[Unset, str] = UNSET output_data_objects_shared_i_ds: Union[ Unset, "PostProjectDataSourceQueryJsonBodyOutputDataObjectsSharedIDs" ] = UNSET + parameters: Union[Unset, "PostProjectDataSourceQueryJsonBodyParameters"] = UNSET + target_public_key: Union[Unset, str] = UNSET + operation: Union[Unset, str] = UNSET + output_data_objects_names: Union[Unset, List[str]] = UNSET wait: Union[Unset, bool] = True additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - output_data_objects_names: Union[Unset, List[str]] = UNSET - if not isinstance(self.output_data_objects_names, Unset): - output_data_objects_names = self.output_data_objects_names - - parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.parameters, Unset): - parameters = self.parameters.to_dict() - - target_public_key = self.target_public_key target_public_key_id = self.target_public_key_id aggregation_type: Union[Unset, str] = UNSET if not isinstance(self.aggregation_type, Unset): aggregation_type = self.aggregation_type.value broadcast = self.broadcast - operation = self.operation output_data_objects_shared_i_ds: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.output_data_objects_shared_i_ds, Unset): output_data_objects_shared_i_ds = self.output_data_objects_shared_i_ds.to_dict() + parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.parameters, Unset): + parameters = self.parameters.to_dict() + + target_public_key = self.target_public_key + operation = self.operation + output_data_objects_names: Union[Unset, List[str]] = UNSET + if not isinstance(self.output_data_objects_names, Unset): + output_data_objects_names = self.output_data_objects_names + wait = self.wait field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if output_data_objects_names is not UNSET: - field_dict["outputDataObjectsNames"] = output_data_objects_names - if parameters is not UNSET: - field_dict["parameters"] = parameters - if target_public_key is not UNSET: - field_dict["targetPublicKey"] = target_public_key if target_public_key_id is not UNSET: field_dict["targetPublicKeyId"] = target_public_key_id if aggregation_type is not UNSET: field_dict["aggregationType"] = aggregation_type if broadcast is not UNSET: field_dict["broadcast"] = broadcast - if operation is not UNSET: - field_dict["operation"] = operation if output_data_objects_shared_i_ds is not UNSET: field_dict["outputDataObjectsSharedIDs"] = output_data_objects_shared_i_ds + if parameters is not UNSET: + field_dict["parameters"] = parameters + if target_public_key is not UNSET: + field_dict["targetPublicKey"] = target_public_key + if operation is not UNSET: + field_dict["operation"] = operation + if output_data_objects_names is not UNSET: + field_dict["outputDataObjectsNames"] = output_data_objects_names if wait is not UNSET: field_dict["wait"] = wait @@ -111,17 +111,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: ) d = src_dict.copy() - output_data_objects_names = cast(List[str], d.pop("outputDataObjectsNames", UNSET)) - - _parameters = d.pop("parameters", UNSET) - parameters: Union[Unset, PostProjectDataSourceQueryJsonBodyParameters] - if isinstance(_parameters, Unset): - parameters = UNSET - else: - parameters = PostProjectDataSourceQueryJsonBodyParameters.from_dict(_parameters) - - target_public_key = d.pop("targetPublicKey", UNSET) - target_public_key_id = d.pop("targetPublicKeyId", UNSET) _aggregation_type = d.pop("aggregationType", UNSET) @@ -133,8 +122,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: broadcast = d.pop("broadcast", UNSET) - operation = d.pop("operation", UNSET) - _output_data_objects_shared_i_ds = d.pop("outputDataObjectsSharedIDs", UNSET) output_data_objects_shared_i_ds: Union[Unset, PostProjectDataSourceQueryJsonBodyOutputDataObjectsSharedIDs] if isinstance(_output_data_objects_shared_i_ds, Unset): @@ -144,17 +131,30 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: _output_data_objects_shared_i_ds ) + _parameters = d.pop("parameters", UNSET) + parameters: Union[Unset, PostProjectDataSourceQueryJsonBodyParameters] + if isinstance(_parameters, Unset): + parameters = UNSET + else: + parameters = PostProjectDataSourceQueryJsonBodyParameters.from_dict(_parameters) + + target_public_key = d.pop("targetPublicKey", UNSET) + + operation = d.pop("operation", UNSET) + + output_data_objects_names = cast(List[str], d.pop("outputDataObjectsNames", UNSET)) + wait = d.pop("wait", UNSET) post_project_data_source_query_json_body = cls( - output_data_objects_names=output_data_objects_names, - parameters=parameters, - target_public_key=target_public_key, target_public_key_id=target_public_key_id, aggregation_type=aggregation_type, broadcast=broadcast, - operation=operation, output_data_objects_shared_i_ds=output_data_objects_shared_i_ds, + parameters=parameters, + target_public_key=target_public_key, + operation=operation, + output_data_objects_names=output_data_objects_names, wait=wait, ) diff --git a/src/tuneinsight/api/sdk/models/privacy_budget_parameters.py b/src/tuneinsight/api/sdk/models/privacy_budget_parameters.py index 3aa6357..02ae7e3 100644 --- a/src/tuneinsight/api/sdk/models/privacy_budget_parameters.py +++ b/src/tuneinsight/api/sdk/models/privacy_budget_parameters.py @@ -21,23 +21,28 @@ class PrivacyBudgetParameters: More precisely, if a computation adds noise that is equivalent ϵ=0.1 then 0.1 of the privacy budget is used. Attributes: + allocation (Union[Unset, float]): budget allocated initially. + allocation_interval (Union[Unset, Duration]): definition of a date-independent time interval increment (Union[Unset, float]): value incremented after each allocation interval max_allocation (Union[Unset, float]): maximum value that can be taken by the privacy budget scope (Union[Unset, PrivacyBudgetParametersScope]): scope of the budget start (Union[Unset, datetime.datetime]): date time at which the budget is effective - allocation (Union[Unset, float]): budget allocated initially. - allocation_interval (Union[Unset, Duration]): definition of a date-independent time interval """ + allocation: Union[Unset, float] = UNSET + allocation_interval: Union[Unset, "Duration"] = UNSET increment: Union[Unset, float] = UNSET max_allocation: Union[Unset, float] = UNSET scope: Union[Unset, PrivacyBudgetParametersScope] = UNSET start: Union[Unset, datetime.datetime] = UNSET - allocation: Union[Unset, float] = UNSET - allocation_interval: Union[Unset, "Duration"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + allocation = self.allocation + allocation_interval: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.allocation_interval, Unset): + allocation_interval = self.allocation_interval.to_dict() + increment = self.increment max_allocation = self.max_allocation scope: Union[Unset, str] = UNSET @@ -48,14 +53,13 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.start, Unset): start = self.start.isoformat() - allocation = self.allocation - allocation_interval: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.allocation_interval, Unset): - allocation_interval = self.allocation_interval.to_dict() - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if allocation is not UNSET: + field_dict["allocation"] = allocation + if allocation_interval is not UNSET: + field_dict["allocationInterval"] = allocation_interval if increment is not UNSET: field_dict["increment"] = increment if max_allocation is not UNSET: @@ -64,10 +68,6 @@ def to_dict(self) -> Dict[str, Any]: field_dict["scope"] = scope if start is not UNSET: field_dict["start"] = start - if allocation is not UNSET: - field_dict["allocation"] = allocation - if allocation_interval is not UNSET: - field_dict["allocationInterval"] = allocation_interval return field_dict @@ -76,6 +76,15 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.duration import Duration d = src_dict.copy() + allocation = d.pop("allocation", UNSET) + + _allocation_interval = d.pop("allocationInterval", UNSET) + allocation_interval: Union[Unset, Duration] + if isinstance(_allocation_interval, Unset): + allocation_interval = UNSET + else: + allocation_interval = Duration.from_dict(_allocation_interval) + increment = d.pop("increment", UNSET) max_allocation = d.pop("maxAllocation", UNSET) @@ -94,22 +103,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: start = isoparse(_start) - allocation = d.pop("allocation", UNSET) - - _allocation_interval = d.pop("allocationInterval", UNSET) - allocation_interval: Union[Unset, Duration] - if isinstance(_allocation_interval, Unset): - allocation_interval = UNSET - else: - allocation_interval = Duration.from_dict(_allocation_interval) - privacy_budget_parameters = cls( + allocation=allocation, + allocation_interval=allocation_interval, increment=increment, max_allocation=max_allocation, scope=scope, start=start, - allocation=allocation, - allocation_interval=allocation_interval, ) privacy_budget_parameters.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/privacy_summary.py b/src/tuneinsight/api/sdk/models/privacy_summary.py index 2990afd..3a50a98 100644 --- a/src/tuneinsight/api/sdk/models/privacy_summary.py +++ b/src/tuneinsight/api/sdk/models/privacy_summary.py @@ -19,28 +19,20 @@ class PrivacySummary: """Privacy summary for a project Attributes: - privacy_budget (Union[Unset, PrivacyBudget]): stores information about the status of the privacy budget - authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project computation (Union[Unset, PrivacySummaryComputation]): Description of the computation that will be run for the project data_source (Union[Unset, DataSource]): + privacy_budget (Union[Unset, PrivacyBudget]): stores information about the status of the privacy budget + authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project """ - privacy_budget: Union[Unset, "PrivacyBudget"] = UNSET - authorization_status: Union[Unset, AuthorizationStatus] = UNSET computation: Union[Unset, "PrivacySummaryComputation"] = UNSET data_source: Union[Unset, "DataSource"] = UNSET + privacy_budget: Union[Unset, "PrivacyBudget"] = UNSET + authorization_status: Union[Unset, AuthorizationStatus] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - privacy_budget: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.privacy_budget, Unset): - privacy_budget = self.privacy_budget.to_dict() - - authorization_status: Union[Unset, str] = UNSET - if not isinstance(self.authorization_status, Unset): - authorization_status = self.authorization_status.value - computation: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.computation, Unset): computation = self.computation.to_dict() @@ -49,17 +41,25 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.data_source, Unset): data_source = self.data_source.to_dict() + privacy_budget: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.privacy_budget, Unset): + privacy_budget = self.privacy_budget.to_dict() + + authorization_status: Union[Unset, str] = UNSET + if not isinstance(self.authorization_status, Unset): + authorization_status = self.authorization_status.value + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if privacy_budget is not UNSET: - field_dict["privacyBudget"] = privacy_budget - if authorization_status is not UNSET: - field_dict["authorizationStatus"] = authorization_status if computation is not UNSET: field_dict["computation"] = computation if data_source is not UNSET: field_dict["dataSource"] = data_source + if privacy_budget is not UNSET: + field_dict["privacyBudget"] = privacy_budget + if authorization_status is not UNSET: + field_dict["authorizationStatus"] = authorization_status return field_dict @@ -70,20 +70,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.privacy_summary_computation import PrivacySummaryComputation d = src_dict.copy() - _privacy_budget = d.pop("privacyBudget", UNSET) - privacy_budget: Union[Unset, PrivacyBudget] - if isinstance(_privacy_budget, Unset): - privacy_budget = UNSET - else: - privacy_budget = PrivacyBudget.from_dict(_privacy_budget) - - _authorization_status = d.pop("authorizationStatus", UNSET) - authorization_status: Union[Unset, AuthorizationStatus] - if isinstance(_authorization_status, Unset): - authorization_status = UNSET - else: - authorization_status = AuthorizationStatus(_authorization_status) - _computation = d.pop("computation", UNSET) computation: Union[Unset, PrivacySummaryComputation] if isinstance(_computation, Unset): @@ -98,11 +84,25 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source = DataSource.from_dict(_data_source) + _privacy_budget = d.pop("privacyBudget", UNSET) + privacy_budget: Union[Unset, PrivacyBudget] + if isinstance(_privacy_budget, Unset): + privacy_budget = UNSET + else: + privacy_budget = PrivacyBudget.from_dict(_privacy_budget) + + _authorization_status = d.pop("authorizationStatus", UNSET) + authorization_status: Union[Unset, AuthorizationStatus] + if isinstance(_authorization_status, Unset): + authorization_status = UNSET + else: + authorization_status = AuthorizationStatus(_authorization_status) + privacy_summary = cls( - privacy_budget=privacy_budget, - authorization_status=authorization_status, computation=computation, data_source=data_source, + privacy_budget=privacy_budget, + authorization_status=authorization_status, ) privacy_summary.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/privacy_summary_computation.py b/src/tuneinsight/api/sdk/models/privacy_summary_computation.py index d8ac29c..874d60b 100644 --- a/src/tuneinsight/api/sdk/models/privacy_summary_computation.py +++ b/src/tuneinsight/api/sdk/models/privacy_summary_computation.py @@ -12,38 +12,38 @@ class PrivacySummaryComputation: """Description of the computation that will be run for the project Attributes: - description (Union[Unset, str]): name (Union[Unset, str]): + description (Union[Unset, str]): """ - description: Union[Unset, str] = UNSET name: Union[Unset, str] = UNSET + description: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - description = self.description name = self.name + description = self.description field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if description is not UNSET: - field_dict["description"] = description if name is not UNSET: field_dict["name"] = name + if description is not UNSET: + field_dict["description"] = description return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - description = d.pop("description", UNSET) - name = d.pop("name", UNSET) + description = d.pop("description", UNSET) + privacy_summary_computation = cls( - description=description, name=name, + description=description, ) privacy_summary_computation.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/private_search.py b/src/tuneinsight/api/sdk/models/private_search.py index e284b1f..b7fc342 100644 --- a/src/tuneinsight/api/sdk/models/private_search.py +++ b/src/tuneinsight/api/sdk/models/private_search.py @@ -20,51 +20,56 @@ class PrivateSearch: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. pir_dataset_object_id (Union[Unset, str]): Unique identifier of a data object. pir_search_object_id (Union[Unset, str]): Unique identifier of a data object. """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET pir_dataset_object_id: Union[Unset, str] = UNSET pir_search_object_id: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) @@ -72,33 +77,34 @@ class PrivateSearch: def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object pir_dataset_object_id = self.pir_dataset_object_id pir_search_object_id = self.pir_search_object_id @@ -109,36 +115,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if pir_dataset_object_id is not UNSET: field_dict["pirDatasetObjectId"] = pir_dataset_object_id if pir_search_object_id is not UNSET: @@ -156,7 +164,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -165,15 +173,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -182,22 +195,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -206,27 +214,30 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + pir_dataset_object_id = d.pop("pirDatasetObjectId", UNSET) pir_search_object_id = d.pop("pirSearchObjectId", UNSET) private_search = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, pir_dataset_object_id=pir_dataset_object_id, pir_search_object_id=pir_search_object_id, ) diff --git a/src/tuneinsight/api/sdk/models/private_search_setup.py b/src/tuneinsight/api/sdk/models/private_search_setup.py index 05fa713..4f29839 100644 --- a/src/tuneinsight/api/sdk/models/private_search_setup.py +++ b/src/tuneinsight/api/sdk/models/private_search_setup.py @@ -21,31 +21,35 @@ class PrivateSearchSetup: Attributes: type (ComputationType): Type of the computation. keys (str): (required) name of the column from the dataset which stores the keys of the database - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. values (Union[Unset, List[str]]): name of the columns from the dataset which stores the values of the database. If empty, the computation will set this parameter to the column names of the dataset after dropping the keys column. @@ -53,21 +57,22 @@ class PrivateSearchSetup: type: ComputationType keys: str - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET values: Union[Unset, List[str]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) @@ -75,33 +80,34 @@ def to_dict(self) -> Dict[str, Any]: type = self.type.value keys = self.keys - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object values: Union[Unset, List[str]] = UNSET if not isinstance(self.values, Unset): values = self.values @@ -114,36 +120,38 @@ def to_dict(self) -> Dict[str, Any]: "keys": keys, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if values is not UNSET: field_dict["values"] = values @@ -161,7 +169,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: keys = d.pop("keys") - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -170,15 +178,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -187,22 +200,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -211,26 +219,29 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + values = cast(List[str], d.pop("values", UNSET)) private_search_setup = cls( type=type, keys=keys, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, values=values, ) diff --git a/src/tuneinsight/api/sdk/models/project.py b/src/tuneinsight/api/sdk/models/project.py index f0397fe..b0a17e7 100644 --- a/src/tuneinsight/api/sdk/models/project.py +++ b/src/tuneinsight/api/sdk/models/project.py @@ -27,46 +27,49 @@ class Project: """Project entity definition. Attributes: - data_source_auto_match (Union[Unset, bool]): whether or not to automatically assign the first matching - datasource when the project is shared with other nodes - name (Union[Unset, str]): - query (Union[Unset, DataSourceQuery]): schema used for the query - run_async (Union[Unset, bool]): flag indicating if computation should be run asynchronously - topology (Union[Unset, Topology]): Network Topologies. 'star' or 'tree'. In star topology all nodes are - connected to a central node. In tree topology all nodes are connected and aware of each other. - allow_clear_query (Union[Unset, bool]): [Dangerous, can lead to cross code data share] True if it is allowed for - a client to query the data source all participants of the project and return the clear text result - workflow_json (Union[Unset, str]): JSON representation of the workflow UI in the frontend - query_timeout (Union[Unset, int]): Timeout for the data source queries Default: 30. - authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project - compute_only (Union[Unset, bool]): indicates that the current project participant only computes but does not - contribute any data - created_with_client (Union[Unset, Client]): Type of client that communicates with the agent API - description (Union[Unset, None, str]): - local (Union[Unset, None, bool]): True if the project's computation should run only with local data (not - configured the network) - created_by_node (Union[Unset, str]): ID of node where the project was first created - workflow_type (Union[Unset, ProjectBaseWorkflowType]): type of the workflow UI in the frontend + unique_id (Union[Unset, str]): Unique identifier of a project. + unrestricted_access (Union[Unset, None, bool]): when set to true, then all users from the same organization are + authorized to access the project (view / edit depends on the roles) authorized_users (Union[Unset, List[str]]): The IDs of the users who can run the project - locked (Union[Unset, None, bool]): True if the project is read-only (likely because it has already been shared) + name (Union[Unset, str]): allow_shared_edit (Union[Unset, bool]): True if this project can be modified after being shared. Modifications of a shared project will be broadcasted to the network local_data_selection_definition (Union[Unset, LocalDataSelectionDefinition]): datasource selection definition. A selection is a "query" or data selection definition to run on the datasource + created_by_node (Union[Unset, str]): ID of node where the project was first created + data_source_auto_match (Union[Unset, bool]): whether or not to automatically assign the first matching + datasource when the project is shared with other nodes min_contributors (Union[Unset, None, int]): minimum number of participants that contribute with their data required to run computations within this project network_id (Union[Unset, str]): id to uniquely identify the network - policy (Union[Unset, ComputationPolicy]): policy to validate a specific computation + allow_clear_query (Union[Unset, bool]): [Dangerous, can lead to cross code data share] True if it is allowed for + a client to query the data source all participants of the project and return the clear text result + authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project + query_timeout (Union[Unset, int]): Timeout for the data source queries Default: 30. + end_to_end_encrypted (Union[Unset, None, bool]): whether results are always end to end encrypted and decrypted + on the client side + workflow_json (Union[Unset, str]): JSON representation of the workflow UI in the frontend computation_definition (Union[Unset, ComputationDefinition]): Generic computation. - created_by_user (Union[Unset, str]): ID of user who created the project data_source_id (Union[Unset, None, str]): Unique identifier of a data source. + locked (Union[Unset, None, bool]): True if the project is read-only (likely because it has already been shared) + non_contributor (Union[Unset, None, bool]): indicates that the current project participant takes part in the + distributed computations but does not have any input data. + By default this field is set according to the instance's configuration. dpia (Union[Unset, str]): + run_async (Union[Unset, bool]): flag indicating if computation should be run asynchronously shared (Union[Unset, bool]): True if the project has once been shared across the participants - unique_id (Union[Unset, str]): Unique identifier of a project. - workflow_description (Union[Unset, str]): dynamically generated markdown description of the distributed workflow - that is currently configured with the project. - Not to be confused with the project description which is set by the user that has created the project for - informative purposes. + topology (Union[Unset, Topology]): Network Topologies. 'star' or 'tree'. In star topology all nodes are + connected to a central node. In tree topology all nodes are connected and aware of each other. + created_by_user (Union[Unset, str]): ID of user who created the project + description (Union[Unset, None, str]): + local (Union[Unset, None, bool]): True if the project's computation should run only with local data (not + configured the network) + policy (Union[Unset, ComputationPolicy]): policy to validate a specific computation + query (Union[Unset, DataSourceQuery]): schema used for the query + workflow_type (Union[Unset, ProjectBaseWorkflowType]): type of the workflow UI in the frontend + created_with_client (Union[Unset, Client]): Type of client that communicates with the agent API + hide_leaf_participants (Union[Unset, None, bool]): whether leaf project participants are not shown to other leaf + participants when the project is in a star topology. computations (Union[Unset, List['Computation']]): List of computations of the project created_at (Union[Unset, str]): error (Union[Unset, str]): Description of a potential error that happened during the project lifespan @@ -74,37 +77,43 @@ class Project: privacy_summary (Union[Unset, PrivacySummary]): Privacy summary for a project status (Union[Unset, ProjectStatus]): Stages of a project workflow updated_at (Union[Unset, str]): + workflow_description (Union[Unset, str]): dynamically generated markdown description of the distributed workflow + that is currently configured with the project. + Not to be confused with the project description which is set by the user that has created the project for + informative purposes. """ - data_source_auto_match: Union[Unset, bool] = UNSET - name: Union[Unset, str] = UNSET - query: Union[Unset, "DataSourceQuery"] = UNSET - run_async: Union[Unset, bool] = UNSET - topology: Union[Unset, Topology] = UNSET - allow_clear_query: Union[Unset, bool] = UNSET - workflow_json: Union[Unset, str] = UNSET - query_timeout: Union[Unset, int] = 30 - authorization_status: Union[Unset, AuthorizationStatus] = UNSET - compute_only: Union[Unset, bool] = UNSET - created_with_client: Union[Unset, Client] = UNSET - description: Union[Unset, None, str] = UNSET - local: Union[Unset, None, bool] = UNSET - created_by_node: Union[Unset, str] = UNSET - workflow_type: Union[Unset, ProjectBaseWorkflowType] = UNSET + unique_id: Union[Unset, str] = UNSET + unrestricted_access: Union[Unset, None, bool] = UNSET authorized_users: Union[Unset, List[str]] = UNSET - locked: Union[Unset, None, bool] = UNSET + name: Union[Unset, str] = UNSET allow_shared_edit: Union[Unset, bool] = UNSET local_data_selection_definition: Union[Unset, "LocalDataSelectionDefinition"] = UNSET + created_by_node: Union[Unset, str] = UNSET + data_source_auto_match: Union[Unset, bool] = UNSET min_contributors: Union[Unset, None, int] = UNSET network_id: Union[Unset, str] = UNSET - policy: Union[Unset, "ComputationPolicy"] = UNSET + allow_clear_query: Union[Unset, bool] = UNSET + authorization_status: Union[Unset, AuthorizationStatus] = UNSET + query_timeout: Union[Unset, int] = 30 + end_to_end_encrypted: Union[Unset, None, bool] = UNSET + workflow_json: Union[Unset, str] = UNSET computation_definition: Union[Unset, "ComputationDefinition"] = UNSET - created_by_user: Union[Unset, str] = UNSET data_source_id: Union[Unset, None, str] = UNSET + locked: Union[Unset, None, bool] = UNSET + non_contributor: Union[Unset, None, bool] = UNSET dpia: Union[Unset, str] = UNSET + run_async: Union[Unset, bool] = UNSET shared: Union[Unset, bool] = UNSET - unique_id: Union[Unset, str] = UNSET - workflow_description: Union[Unset, str] = UNSET + topology: Union[Unset, Topology] = UNSET + created_by_user: Union[Unset, str] = UNSET + description: Union[Unset, None, str] = UNSET + local: Union[Unset, None, bool] = UNSET + policy: Union[Unset, "ComputationPolicy"] = UNSET + query: Union[Unset, "DataSourceQuery"] = UNSET + workflow_type: Union[Unset, ProjectBaseWorkflowType] = UNSET + created_with_client: Union[Unset, Client] = UNSET + hide_leaf_participants: Union[Unset, None, bool] = UNSET computations: Union[Unset, List["Computation"]] = UNSET created_at: Union[Unset, str] = UNSET error: Union[Unset, str] = UNSET @@ -112,65 +121,68 @@ class Project: privacy_summary: Union[Unset, "PrivacySummary"] = UNSET status: Union[Unset, ProjectStatus] = UNSET updated_at: Union[Unset, str] = UNSET + workflow_description: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - data_source_auto_match = self.data_source_auto_match - name = self.name - query: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.query, Unset): - query = self.query.to_dict() - - run_async = self.run_async - topology: Union[Unset, str] = UNSET - if not isinstance(self.topology, Unset): - topology = self.topology.value - - allow_clear_query = self.allow_clear_query - workflow_json = self.workflow_json - query_timeout = self.query_timeout - authorization_status: Union[Unset, str] = UNSET - if not isinstance(self.authorization_status, Unset): - authorization_status = self.authorization_status.value - - compute_only = self.compute_only - created_with_client: Union[Unset, str] = UNSET - if not isinstance(self.created_with_client, Unset): - created_with_client = self.created_with_client.value - - description = self.description - local = self.local - created_by_node = self.created_by_node - workflow_type: Union[Unset, str] = UNSET - if not isinstance(self.workflow_type, Unset): - workflow_type = self.workflow_type.value - + unique_id = self.unique_id + unrestricted_access = self.unrestricted_access authorized_users: Union[Unset, List[str]] = UNSET if not isinstance(self.authorized_users, Unset): authorized_users = self.authorized_users - locked = self.locked + name = self.name allow_shared_edit = self.allow_shared_edit local_data_selection_definition: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_data_selection_definition, Unset): local_data_selection_definition = self.local_data_selection_definition.to_dict() + created_by_node = self.created_by_node + data_source_auto_match = self.data_source_auto_match min_contributors = self.min_contributors network_id = self.network_id - policy: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.policy, Unset): - policy = self.policy.to_dict() + allow_clear_query = self.allow_clear_query + authorization_status: Union[Unset, str] = UNSET + if not isinstance(self.authorization_status, Unset): + authorization_status = self.authorization_status.value + query_timeout = self.query_timeout + end_to_end_encrypted = self.end_to_end_encrypted + workflow_json = self.workflow_json computation_definition: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.computation_definition, Unset): computation_definition = self.computation_definition.to_dict() - created_by_user = self.created_by_user data_source_id = self.data_source_id + locked = self.locked + non_contributor = self.non_contributor dpia = self.dpia + run_async = self.run_async shared = self.shared - unique_id = self.unique_id - workflow_description = self.workflow_description + topology: Union[Unset, str] = UNSET + if not isinstance(self.topology, Unset): + topology = self.topology.value + + created_by_user = self.created_by_user + description = self.description + local = self.local + policy: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.policy, Unset): + policy = self.policy.to_dict() + + query: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.query, Unset): + query = self.query.to_dict() + + workflow_type: Union[Unset, str] = UNSET + if not isinstance(self.workflow_type, Unset): + workflow_type = self.workflow_type.value + + created_with_client: Union[Unset, str] = UNSET + if not isinstance(self.created_with_client, Unset): + created_with_client = self.created_with_client.value + + hide_leaf_participants = self.hide_leaf_participants computations: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.computations, Unset): computations = [] @@ -198,68 +210,73 @@ def to_dict(self) -> Dict[str, Any]: status = self.status.value updated_at = self.updated_at + workflow_description = self.workflow_description field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if data_source_auto_match is not UNSET: - field_dict["dataSourceAutoMatch"] = data_source_auto_match - if name is not UNSET: - field_dict["name"] = name - if query is not UNSET: - field_dict["query"] = query - if run_async is not UNSET: - field_dict["runAsync"] = run_async - if topology is not UNSET: - field_dict["topology"] = topology - if allow_clear_query is not UNSET: - field_dict["allowClearQuery"] = allow_clear_query - if workflow_json is not UNSET: - field_dict["workflowJSON"] = workflow_json - if query_timeout is not UNSET: - field_dict["queryTimeout"] = query_timeout - if authorization_status is not UNSET: - field_dict["authorizationStatus"] = authorization_status - if compute_only is not UNSET: - field_dict["computeOnly"] = compute_only - if created_with_client is not UNSET: - field_dict["createdWithClient"] = created_with_client - if description is not UNSET: - field_dict["description"] = description - if local is not UNSET: - field_dict["local"] = local - if created_by_node is not UNSET: - field_dict["createdByNode"] = created_by_node - if workflow_type is not UNSET: - field_dict["workflowType"] = workflow_type + if unique_id is not UNSET: + field_dict["uniqueId"] = unique_id + if unrestricted_access is not UNSET: + field_dict["unrestrictedAccess"] = unrestricted_access if authorized_users is not UNSET: field_dict["authorizedUsers"] = authorized_users - if locked is not UNSET: - field_dict["locked"] = locked + if name is not UNSET: + field_dict["name"] = name if allow_shared_edit is not UNSET: field_dict["allowSharedEdit"] = allow_shared_edit if local_data_selection_definition is not UNSET: field_dict["localDataSelectionDefinition"] = local_data_selection_definition + if created_by_node is not UNSET: + field_dict["createdByNode"] = created_by_node + if data_source_auto_match is not UNSET: + field_dict["dataSourceAutoMatch"] = data_source_auto_match if min_contributors is not UNSET: field_dict["minContributors"] = min_contributors if network_id is not UNSET: field_dict["networkId"] = network_id - if policy is not UNSET: - field_dict["policy"] = policy + if allow_clear_query is not UNSET: + field_dict["allowClearQuery"] = allow_clear_query + if authorization_status is not UNSET: + field_dict["authorizationStatus"] = authorization_status + if query_timeout is not UNSET: + field_dict["queryTimeout"] = query_timeout + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if workflow_json is not UNSET: + field_dict["workflowJSON"] = workflow_json if computation_definition is not UNSET: field_dict["computationDefinition"] = computation_definition - if created_by_user is not UNSET: - field_dict["createdByUser"] = created_by_user if data_source_id is not UNSET: field_dict["dataSourceId"] = data_source_id + if locked is not UNSET: + field_dict["locked"] = locked + if non_contributor is not UNSET: + field_dict["nonContributor"] = non_contributor if dpia is not UNSET: field_dict["dpia"] = dpia + if run_async is not UNSET: + field_dict["runAsync"] = run_async if shared is not UNSET: field_dict["shared"] = shared - if unique_id is not UNSET: - field_dict["uniqueId"] = unique_id - if workflow_description is not UNSET: - field_dict["workflowDescription"] = workflow_description + if topology is not UNSET: + field_dict["topology"] = topology + if created_by_user is not UNSET: + field_dict["createdByUser"] = created_by_user + if description is not UNSET: + field_dict["description"] = description + if local is not UNSET: + field_dict["local"] = local + if policy is not UNSET: + field_dict["policy"] = policy + if query is not UNSET: + field_dict["query"] = query + if workflow_type is not UNSET: + field_dict["workflowType"] = workflow_type + if created_with_client is not UNSET: + field_dict["createdWithClient"] = created_with_client + if hide_leaf_participants is not UNSET: + field_dict["hideLeafParticipants"] = hide_leaf_participants if computations is not UNSET: field_dict["computations"] = computations if created_at is not UNSET: @@ -274,6 +291,8 @@ def to_dict(self) -> Dict[str, Any]: field_dict["status"] = status if updated_at is not UNSET: field_dict["updatedAt"] = updated_at + if workflow_description is not UNSET: + field_dict["workflowDescription"] = workflow_description return field_dict @@ -288,31 +307,32 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.privacy_summary import PrivacySummary d = src_dict.copy() - data_source_auto_match = d.pop("dataSourceAutoMatch", UNSET) + unique_id = d.pop("uniqueId", UNSET) + + unrestricted_access = d.pop("unrestrictedAccess", UNSET) + + authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) name = d.pop("name", UNSET) - _query = d.pop("query", UNSET) - query: Union[Unset, DataSourceQuery] - if isinstance(_query, Unset): - query = UNSET + allow_shared_edit = d.pop("allowSharedEdit", UNSET) + + _local_data_selection_definition = d.pop("localDataSelectionDefinition", UNSET) + local_data_selection_definition: Union[Unset, LocalDataSelectionDefinition] + if isinstance(_local_data_selection_definition, Unset): + local_data_selection_definition = UNSET else: - query = DataSourceQuery.from_dict(_query) + local_data_selection_definition = LocalDataSelectionDefinition.from_dict(_local_data_selection_definition) - run_async = d.pop("runAsync", UNSET) + created_by_node = d.pop("createdByNode", UNSET) - _topology = d.pop("topology", UNSET) - topology: Union[Unset, Topology] - if isinstance(_topology, Unset): - topology = UNSET - else: - topology = Topology(_topology) + data_source_auto_match = d.pop("dataSourceAutoMatch", UNSET) - allow_clear_query = d.pop("allowClearQuery", UNSET) + min_contributors = d.pop("minContributors", UNSET) - workflow_json = d.pop("workflowJSON", UNSET) + network_id = d.pop("networkId", UNSET) - query_timeout = d.pop("queryTimeout", UNSET) + allow_clear_query = d.pop("allowClearQuery", UNSET) _authorization_status = d.pop("authorizationStatus", UNSET) authorization_status: Union[Unset, AuthorizationStatus] @@ -321,44 +341,43 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: authorization_status = AuthorizationStatus(_authorization_status) - compute_only = d.pop("computeOnly", UNSET) - - _created_with_client = d.pop("createdWithClient", UNSET) - created_with_client: Union[Unset, Client] - if isinstance(_created_with_client, Unset): - created_with_client = UNSET - else: - created_with_client = Client(_created_with_client) - - description = d.pop("description", UNSET) + query_timeout = d.pop("queryTimeout", UNSET) - local = d.pop("local", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - created_by_node = d.pop("createdByNode", UNSET) + workflow_json = d.pop("workflowJSON", UNSET) - _workflow_type = d.pop("workflowType", UNSET) - workflow_type: Union[Unset, ProjectBaseWorkflowType] - if isinstance(_workflow_type, Unset): - workflow_type = UNSET + _computation_definition = d.pop("computationDefinition", UNSET) + computation_definition: Union[Unset, ComputationDefinition] + if isinstance(_computation_definition, Unset): + computation_definition = UNSET else: - workflow_type = ProjectBaseWorkflowType(_workflow_type) + computation_definition = ComputationDefinition.from_dict(_computation_definition) - authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) + data_source_id = d.pop("dataSourceId", UNSET) locked = d.pop("locked", UNSET) - allow_shared_edit = d.pop("allowSharedEdit", UNSET) + non_contributor = d.pop("nonContributor", UNSET) - _local_data_selection_definition = d.pop("localDataSelectionDefinition", UNSET) - local_data_selection_definition: Union[Unset, LocalDataSelectionDefinition] - if isinstance(_local_data_selection_definition, Unset): - local_data_selection_definition = UNSET + dpia = d.pop("dpia", UNSET) + + run_async = d.pop("runAsync", UNSET) + + shared = d.pop("shared", UNSET) + + _topology = d.pop("topology", UNSET) + topology: Union[Unset, Topology] + if isinstance(_topology, Unset): + topology = UNSET else: - local_data_selection_definition = LocalDataSelectionDefinition.from_dict(_local_data_selection_definition) + topology = Topology(_topology) - min_contributors = d.pop("minContributors", UNSET) + created_by_user = d.pop("createdByUser", UNSET) - network_id = d.pop("networkId", UNSET) + description = d.pop("description", UNSET) + + local = d.pop("local", UNSET) _policy = d.pop("policy", UNSET) policy: Union[Unset, ComputationPolicy] @@ -367,24 +386,28 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: policy = ComputationPolicy.from_dict(_policy) - _computation_definition = d.pop("computationDefinition", UNSET) - computation_definition: Union[Unset, ComputationDefinition] - if isinstance(_computation_definition, Unset): - computation_definition = UNSET + _query = d.pop("query", UNSET) + query: Union[Unset, DataSourceQuery] + if isinstance(_query, Unset): + query = UNSET else: - computation_definition = ComputationDefinition.from_dict(_computation_definition) - - created_by_user = d.pop("createdByUser", UNSET) - - data_source_id = d.pop("dataSourceId", UNSET) - - dpia = d.pop("dpia", UNSET) + query = DataSourceQuery.from_dict(_query) - shared = d.pop("shared", UNSET) + _workflow_type = d.pop("workflowType", UNSET) + workflow_type: Union[Unset, ProjectBaseWorkflowType] + if isinstance(_workflow_type, Unset): + workflow_type = UNSET + else: + workflow_type = ProjectBaseWorkflowType(_workflow_type) - unique_id = d.pop("uniqueId", UNSET) + _created_with_client = d.pop("createdWithClient", UNSET) + created_with_client: Union[Unset, Client] + if isinstance(_created_with_client, Unset): + created_with_client = UNSET + else: + created_with_client = Client(_created_with_client) - workflow_description = d.pop("workflowDescription", UNSET) + hide_leaf_participants = d.pop("hideLeafParticipants", UNSET) computations = [] _computations = d.pop("computations", UNSET) @@ -420,36 +443,40 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: updated_at = d.pop("updatedAt", UNSET) + workflow_description = d.pop("workflowDescription", UNSET) + project = cls( - data_source_auto_match=data_source_auto_match, - name=name, - query=query, - run_async=run_async, - topology=topology, - allow_clear_query=allow_clear_query, - workflow_json=workflow_json, - query_timeout=query_timeout, - authorization_status=authorization_status, - compute_only=compute_only, - created_with_client=created_with_client, - description=description, - local=local, - created_by_node=created_by_node, - workflow_type=workflow_type, + unique_id=unique_id, + unrestricted_access=unrestricted_access, authorized_users=authorized_users, - locked=locked, + name=name, allow_shared_edit=allow_shared_edit, local_data_selection_definition=local_data_selection_definition, + created_by_node=created_by_node, + data_source_auto_match=data_source_auto_match, min_contributors=min_contributors, network_id=network_id, - policy=policy, + allow_clear_query=allow_clear_query, + authorization_status=authorization_status, + query_timeout=query_timeout, + end_to_end_encrypted=end_to_end_encrypted, + workflow_json=workflow_json, computation_definition=computation_definition, - created_by_user=created_by_user, data_source_id=data_source_id, + locked=locked, + non_contributor=non_contributor, dpia=dpia, + run_async=run_async, shared=shared, - unique_id=unique_id, - workflow_description=workflow_description, + topology=topology, + created_by_user=created_by_user, + description=description, + local=local, + policy=policy, + query=query, + workflow_type=workflow_type, + created_with_client=created_with_client, + hide_leaf_participants=hide_leaf_participants, computations=computations, created_at=created_at, error=error, @@ -457,6 +484,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: privacy_summary=privacy_summary, status=status, updated_at=updated_at, + workflow_description=workflow_description, ) project.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/project_base.py b/src/tuneinsight/api/sdk/models/project_base.py index 954b23b..5e1fe3b 100644 --- a/src/tuneinsight/api/sdk/models/project_base.py +++ b/src/tuneinsight/api/sdk/models/project_base.py @@ -23,190 +23,209 @@ class ProjectBase: """Common fields of a project (for get, patch and post) Attributes: - data_source_auto_match (Union[Unset, bool]): whether or not to automatically assign the first matching - datasource when the project is shared with other nodes - name (Union[Unset, str]): - query (Union[Unset, DataSourceQuery]): schema used for the query - run_async (Union[Unset, bool]): flag indicating if computation should be run asynchronously - topology (Union[Unset, Topology]): Network Topologies. 'star' or 'tree'. In star topology all nodes are - connected to a central node. In tree topology all nodes are connected and aware of each other. - allow_clear_query (Union[Unset, bool]): [Dangerous, can lead to cross code data share] True if it is allowed for - a client to query the data source all participants of the project and return the clear text result - workflow_json (Union[Unset, str]): JSON representation of the workflow UI in the frontend - query_timeout (Union[Unset, int]): Timeout for the data source queries Default: 30. - authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project - compute_only (Union[Unset, bool]): indicates that the current project participant only computes but does not - contribute any data - created_with_client (Union[Unset, Client]): Type of client that communicates with the agent API - description (Union[Unset, None, str]): - local (Union[Unset, None, bool]): True if the project's computation should run only with local data (not - configured the network) - created_by_node (Union[Unset, str]): ID of node where the project was first created - workflow_type (Union[Unset, ProjectBaseWorkflowType]): type of the workflow UI in the frontend + unique_id (Union[Unset, str]): Unique identifier of a project. + unrestricted_access (Union[Unset, None, bool]): when set to true, then all users from the same organization are + authorized to access the project (view / edit depends on the roles) authorized_users (Union[Unset, List[str]]): The IDs of the users who can run the project - locked (Union[Unset, None, bool]): True if the project is read-only (likely because it has already been shared) + name (Union[Unset, str]): allow_shared_edit (Union[Unset, bool]): True if this project can be modified after being shared. Modifications of a shared project will be broadcasted to the network local_data_selection_definition (Union[Unset, LocalDataSelectionDefinition]): datasource selection definition. A selection is a "query" or data selection definition to run on the datasource + created_by_node (Union[Unset, str]): ID of node where the project was first created + data_source_auto_match (Union[Unset, bool]): whether or not to automatically assign the first matching + datasource when the project is shared with other nodes min_contributors (Union[Unset, None, int]): minimum number of participants that contribute with their data required to run computations within this project network_id (Union[Unset, str]): id to uniquely identify the network - policy (Union[Unset, ComputationPolicy]): policy to validate a specific computation + allow_clear_query (Union[Unset, bool]): [Dangerous, can lead to cross code data share] True if it is allowed for + a client to query the data source all participants of the project and return the clear text result + authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project + query_timeout (Union[Unset, int]): Timeout for the data source queries Default: 30. + end_to_end_encrypted (Union[Unset, None, bool]): whether results are always end to end encrypted and decrypted + on the client side + workflow_json (Union[Unset, str]): JSON representation of the workflow UI in the frontend computation_definition (Union[Unset, ComputationDefinition]): Generic computation. - created_by_user (Union[Unset, str]): ID of user who created the project data_source_id (Union[Unset, None, str]): Unique identifier of a data source. + locked (Union[Unset, None, bool]): True if the project is read-only (likely because it has already been shared) + non_contributor (Union[Unset, None, bool]): indicates that the current project participant takes part in the + distributed computations but does not have any input data. + By default this field is set according to the instance's configuration. dpia (Union[Unset, str]): + run_async (Union[Unset, bool]): flag indicating if computation should be run asynchronously shared (Union[Unset, bool]): True if the project has once been shared across the participants - unique_id (Union[Unset, str]): Unique identifier of a project. + topology (Union[Unset, Topology]): Network Topologies. 'star' or 'tree'. In star topology all nodes are + connected to a central node. In tree topology all nodes are connected and aware of each other. + created_by_user (Union[Unset, str]): ID of user who created the project + description (Union[Unset, None, str]): + local (Union[Unset, None, bool]): True if the project's computation should run only with local data (not + configured the network) + policy (Union[Unset, ComputationPolicy]): policy to validate a specific computation + query (Union[Unset, DataSourceQuery]): schema used for the query + workflow_type (Union[Unset, ProjectBaseWorkflowType]): type of the workflow UI in the frontend + created_with_client (Union[Unset, Client]): Type of client that communicates with the agent API + hide_leaf_participants (Union[Unset, None, bool]): whether leaf project participants are not shown to other leaf + participants when the project is in a star topology. """ - data_source_auto_match: Union[Unset, bool] = UNSET - name: Union[Unset, str] = UNSET - query: Union[Unset, "DataSourceQuery"] = UNSET - run_async: Union[Unset, bool] = UNSET - topology: Union[Unset, Topology] = UNSET - allow_clear_query: Union[Unset, bool] = UNSET - workflow_json: Union[Unset, str] = UNSET - query_timeout: Union[Unset, int] = 30 - authorization_status: Union[Unset, AuthorizationStatus] = UNSET - compute_only: Union[Unset, bool] = UNSET - created_with_client: Union[Unset, Client] = UNSET - description: Union[Unset, None, str] = UNSET - local: Union[Unset, None, bool] = UNSET - created_by_node: Union[Unset, str] = UNSET - workflow_type: Union[Unset, ProjectBaseWorkflowType] = UNSET + unique_id: Union[Unset, str] = UNSET + unrestricted_access: Union[Unset, None, bool] = UNSET authorized_users: Union[Unset, List[str]] = UNSET - locked: Union[Unset, None, bool] = UNSET + name: Union[Unset, str] = UNSET allow_shared_edit: Union[Unset, bool] = UNSET local_data_selection_definition: Union[Unset, "LocalDataSelectionDefinition"] = UNSET + created_by_node: Union[Unset, str] = UNSET + data_source_auto_match: Union[Unset, bool] = UNSET min_contributors: Union[Unset, None, int] = UNSET network_id: Union[Unset, str] = UNSET - policy: Union[Unset, "ComputationPolicy"] = UNSET + allow_clear_query: Union[Unset, bool] = UNSET + authorization_status: Union[Unset, AuthorizationStatus] = UNSET + query_timeout: Union[Unset, int] = 30 + end_to_end_encrypted: Union[Unset, None, bool] = UNSET + workflow_json: Union[Unset, str] = UNSET computation_definition: Union[Unset, "ComputationDefinition"] = UNSET - created_by_user: Union[Unset, str] = UNSET data_source_id: Union[Unset, None, str] = UNSET + locked: Union[Unset, None, bool] = UNSET + non_contributor: Union[Unset, None, bool] = UNSET dpia: Union[Unset, str] = UNSET + run_async: Union[Unset, bool] = UNSET shared: Union[Unset, bool] = UNSET - unique_id: Union[Unset, str] = UNSET + topology: Union[Unset, Topology] = UNSET + created_by_user: Union[Unset, str] = UNSET + description: Union[Unset, None, str] = UNSET + local: Union[Unset, None, bool] = UNSET + policy: Union[Unset, "ComputationPolicy"] = UNSET + query: Union[Unset, "DataSourceQuery"] = UNSET + workflow_type: Union[Unset, ProjectBaseWorkflowType] = UNSET + created_with_client: Union[Unset, Client] = UNSET + hide_leaf_participants: Union[Unset, None, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - data_source_auto_match = self.data_source_auto_match - name = self.name - query: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.query, Unset): - query = self.query.to_dict() - - run_async = self.run_async - topology: Union[Unset, str] = UNSET - if not isinstance(self.topology, Unset): - topology = self.topology.value - - allow_clear_query = self.allow_clear_query - workflow_json = self.workflow_json - query_timeout = self.query_timeout - authorization_status: Union[Unset, str] = UNSET - if not isinstance(self.authorization_status, Unset): - authorization_status = self.authorization_status.value - - compute_only = self.compute_only - created_with_client: Union[Unset, str] = UNSET - if not isinstance(self.created_with_client, Unset): - created_with_client = self.created_with_client.value - - description = self.description - local = self.local - created_by_node = self.created_by_node - workflow_type: Union[Unset, str] = UNSET - if not isinstance(self.workflow_type, Unset): - workflow_type = self.workflow_type.value - + unique_id = self.unique_id + unrestricted_access = self.unrestricted_access authorized_users: Union[Unset, List[str]] = UNSET if not isinstance(self.authorized_users, Unset): authorized_users = self.authorized_users - locked = self.locked + name = self.name allow_shared_edit = self.allow_shared_edit local_data_selection_definition: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_data_selection_definition, Unset): local_data_selection_definition = self.local_data_selection_definition.to_dict() + created_by_node = self.created_by_node + data_source_auto_match = self.data_source_auto_match min_contributors = self.min_contributors network_id = self.network_id - policy: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.policy, Unset): - policy = self.policy.to_dict() + allow_clear_query = self.allow_clear_query + authorization_status: Union[Unset, str] = UNSET + if not isinstance(self.authorization_status, Unset): + authorization_status = self.authorization_status.value + query_timeout = self.query_timeout + end_to_end_encrypted = self.end_to_end_encrypted + workflow_json = self.workflow_json computation_definition: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.computation_definition, Unset): computation_definition = self.computation_definition.to_dict() - created_by_user = self.created_by_user data_source_id = self.data_source_id + locked = self.locked + non_contributor = self.non_contributor dpia = self.dpia + run_async = self.run_async shared = self.shared - unique_id = self.unique_id + topology: Union[Unset, str] = UNSET + if not isinstance(self.topology, Unset): + topology = self.topology.value + + created_by_user = self.created_by_user + description = self.description + local = self.local + policy: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.policy, Unset): + policy = self.policy.to_dict() + + query: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.query, Unset): + query = self.query.to_dict() + + workflow_type: Union[Unset, str] = UNSET + if not isinstance(self.workflow_type, Unset): + workflow_type = self.workflow_type.value + + created_with_client: Union[Unset, str] = UNSET + if not isinstance(self.created_with_client, Unset): + created_with_client = self.created_with_client.value + + hide_leaf_participants = self.hide_leaf_participants field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if data_source_auto_match is not UNSET: - field_dict["dataSourceAutoMatch"] = data_source_auto_match - if name is not UNSET: - field_dict["name"] = name - if query is not UNSET: - field_dict["query"] = query - if run_async is not UNSET: - field_dict["runAsync"] = run_async - if topology is not UNSET: - field_dict["topology"] = topology - if allow_clear_query is not UNSET: - field_dict["allowClearQuery"] = allow_clear_query - if workflow_json is not UNSET: - field_dict["workflowJSON"] = workflow_json - if query_timeout is not UNSET: - field_dict["queryTimeout"] = query_timeout - if authorization_status is not UNSET: - field_dict["authorizationStatus"] = authorization_status - if compute_only is not UNSET: - field_dict["computeOnly"] = compute_only - if created_with_client is not UNSET: - field_dict["createdWithClient"] = created_with_client - if description is not UNSET: - field_dict["description"] = description - if local is not UNSET: - field_dict["local"] = local - if created_by_node is not UNSET: - field_dict["createdByNode"] = created_by_node - if workflow_type is not UNSET: - field_dict["workflowType"] = workflow_type + if unique_id is not UNSET: + field_dict["uniqueId"] = unique_id + if unrestricted_access is not UNSET: + field_dict["unrestrictedAccess"] = unrestricted_access if authorized_users is not UNSET: field_dict["authorizedUsers"] = authorized_users - if locked is not UNSET: - field_dict["locked"] = locked + if name is not UNSET: + field_dict["name"] = name if allow_shared_edit is not UNSET: field_dict["allowSharedEdit"] = allow_shared_edit if local_data_selection_definition is not UNSET: field_dict["localDataSelectionDefinition"] = local_data_selection_definition + if created_by_node is not UNSET: + field_dict["createdByNode"] = created_by_node + if data_source_auto_match is not UNSET: + field_dict["dataSourceAutoMatch"] = data_source_auto_match if min_contributors is not UNSET: field_dict["minContributors"] = min_contributors if network_id is not UNSET: field_dict["networkId"] = network_id - if policy is not UNSET: - field_dict["policy"] = policy + if allow_clear_query is not UNSET: + field_dict["allowClearQuery"] = allow_clear_query + if authorization_status is not UNSET: + field_dict["authorizationStatus"] = authorization_status + if query_timeout is not UNSET: + field_dict["queryTimeout"] = query_timeout + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if workflow_json is not UNSET: + field_dict["workflowJSON"] = workflow_json if computation_definition is not UNSET: field_dict["computationDefinition"] = computation_definition - if created_by_user is not UNSET: - field_dict["createdByUser"] = created_by_user if data_source_id is not UNSET: field_dict["dataSourceId"] = data_source_id + if locked is not UNSET: + field_dict["locked"] = locked + if non_contributor is not UNSET: + field_dict["nonContributor"] = non_contributor if dpia is not UNSET: field_dict["dpia"] = dpia + if run_async is not UNSET: + field_dict["runAsync"] = run_async if shared is not UNSET: field_dict["shared"] = shared - if unique_id is not UNSET: - field_dict["uniqueId"] = unique_id + if topology is not UNSET: + field_dict["topology"] = topology + if created_by_user is not UNSET: + field_dict["createdByUser"] = created_by_user + if description is not UNSET: + field_dict["description"] = description + if local is not UNSET: + field_dict["local"] = local + if policy is not UNSET: + field_dict["policy"] = policy + if query is not UNSET: + field_dict["query"] = query + if workflow_type is not UNSET: + field_dict["workflowType"] = workflow_type + if created_with_client is not UNSET: + field_dict["createdWithClient"] = created_with_client + if hide_leaf_participants is not UNSET: + field_dict["hideLeafParticipants"] = hide_leaf_participants return field_dict @@ -218,31 +237,32 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.local_data_selection_definition import LocalDataSelectionDefinition d = src_dict.copy() - data_source_auto_match = d.pop("dataSourceAutoMatch", UNSET) + unique_id = d.pop("uniqueId", UNSET) + + unrestricted_access = d.pop("unrestrictedAccess", UNSET) + + authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) name = d.pop("name", UNSET) - _query = d.pop("query", UNSET) - query: Union[Unset, DataSourceQuery] - if isinstance(_query, Unset): - query = UNSET + allow_shared_edit = d.pop("allowSharedEdit", UNSET) + + _local_data_selection_definition = d.pop("localDataSelectionDefinition", UNSET) + local_data_selection_definition: Union[Unset, LocalDataSelectionDefinition] + if isinstance(_local_data_selection_definition, Unset): + local_data_selection_definition = UNSET else: - query = DataSourceQuery.from_dict(_query) + local_data_selection_definition = LocalDataSelectionDefinition.from_dict(_local_data_selection_definition) - run_async = d.pop("runAsync", UNSET) + created_by_node = d.pop("createdByNode", UNSET) - _topology = d.pop("topology", UNSET) - topology: Union[Unset, Topology] - if isinstance(_topology, Unset): - topology = UNSET - else: - topology = Topology(_topology) + data_source_auto_match = d.pop("dataSourceAutoMatch", UNSET) - allow_clear_query = d.pop("allowClearQuery", UNSET) + min_contributors = d.pop("minContributors", UNSET) - workflow_json = d.pop("workflowJSON", UNSET) + network_id = d.pop("networkId", UNSET) - query_timeout = d.pop("queryTimeout", UNSET) + allow_clear_query = d.pop("allowClearQuery", UNSET) _authorization_status = d.pop("authorizationStatus", UNSET) authorization_status: Union[Unset, AuthorizationStatus] @@ -251,44 +271,43 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: authorization_status = AuthorizationStatus(_authorization_status) - compute_only = d.pop("computeOnly", UNSET) - - _created_with_client = d.pop("createdWithClient", UNSET) - created_with_client: Union[Unset, Client] - if isinstance(_created_with_client, Unset): - created_with_client = UNSET - else: - created_with_client = Client(_created_with_client) - - description = d.pop("description", UNSET) + query_timeout = d.pop("queryTimeout", UNSET) - local = d.pop("local", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - created_by_node = d.pop("createdByNode", UNSET) + workflow_json = d.pop("workflowJSON", UNSET) - _workflow_type = d.pop("workflowType", UNSET) - workflow_type: Union[Unset, ProjectBaseWorkflowType] - if isinstance(_workflow_type, Unset): - workflow_type = UNSET + _computation_definition = d.pop("computationDefinition", UNSET) + computation_definition: Union[Unset, ComputationDefinition] + if isinstance(_computation_definition, Unset): + computation_definition = UNSET else: - workflow_type = ProjectBaseWorkflowType(_workflow_type) + computation_definition = ComputationDefinition.from_dict(_computation_definition) - authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) + data_source_id = d.pop("dataSourceId", UNSET) locked = d.pop("locked", UNSET) - allow_shared_edit = d.pop("allowSharedEdit", UNSET) + non_contributor = d.pop("nonContributor", UNSET) - _local_data_selection_definition = d.pop("localDataSelectionDefinition", UNSET) - local_data_selection_definition: Union[Unset, LocalDataSelectionDefinition] - if isinstance(_local_data_selection_definition, Unset): - local_data_selection_definition = UNSET + dpia = d.pop("dpia", UNSET) + + run_async = d.pop("runAsync", UNSET) + + shared = d.pop("shared", UNSET) + + _topology = d.pop("topology", UNSET) + topology: Union[Unset, Topology] + if isinstance(_topology, Unset): + topology = UNSET else: - local_data_selection_definition = LocalDataSelectionDefinition.from_dict(_local_data_selection_definition) + topology = Topology(_topology) - min_contributors = d.pop("minContributors", UNSET) + created_by_user = d.pop("createdByUser", UNSET) - network_id = d.pop("networkId", UNSET) + description = d.pop("description", UNSET) + + local = d.pop("local", UNSET) _policy = d.pop("policy", UNSET) policy: Union[Unset, ComputationPolicy] @@ -297,52 +316,61 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: policy = ComputationPolicy.from_dict(_policy) - _computation_definition = d.pop("computationDefinition", UNSET) - computation_definition: Union[Unset, ComputationDefinition] - if isinstance(_computation_definition, Unset): - computation_definition = UNSET + _query = d.pop("query", UNSET) + query: Union[Unset, DataSourceQuery] + if isinstance(_query, Unset): + query = UNSET else: - computation_definition = ComputationDefinition.from_dict(_computation_definition) - - created_by_user = d.pop("createdByUser", UNSET) - - data_source_id = d.pop("dataSourceId", UNSET) + query = DataSourceQuery.from_dict(_query) - dpia = d.pop("dpia", UNSET) + _workflow_type = d.pop("workflowType", UNSET) + workflow_type: Union[Unset, ProjectBaseWorkflowType] + if isinstance(_workflow_type, Unset): + workflow_type = UNSET + else: + workflow_type = ProjectBaseWorkflowType(_workflow_type) - shared = d.pop("shared", UNSET) + _created_with_client = d.pop("createdWithClient", UNSET) + created_with_client: Union[Unset, Client] + if isinstance(_created_with_client, Unset): + created_with_client = UNSET + else: + created_with_client = Client(_created_with_client) - unique_id = d.pop("uniqueId", UNSET) + hide_leaf_participants = d.pop("hideLeafParticipants", UNSET) project_base = cls( - data_source_auto_match=data_source_auto_match, - name=name, - query=query, - run_async=run_async, - topology=topology, - allow_clear_query=allow_clear_query, - workflow_json=workflow_json, - query_timeout=query_timeout, - authorization_status=authorization_status, - compute_only=compute_only, - created_with_client=created_with_client, - description=description, - local=local, - created_by_node=created_by_node, - workflow_type=workflow_type, + unique_id=unique_id, + unrestricted_access=unrestricted_access, authorized_users=authorized_users, - locked=locked, + name=name, allow_shared_edit=allow_shared_edit, local_data_selection_definition=local_data_selection_definition, + created_by_node=created_by_node, + data_source_auto_match=data_source_auto_match, min_contributors=min_contributors, network_id=network_id, - policy=policy, + allow_clear_query=allow_clear_query, + authorization_status=authorization_status, + query_timeout=query_timeout, + end_to_end_encrypted=end_to_end_encrypted, + workflow_json=workflow_json, computation_definition=computation_definition, - created_by_user=created_by_user, data_source_id=data_source_id, + locked=locked, + non_contributor=non_contributor, dpia=dpia, + run_async=run_async, shared=shared, - unique_id=unique_id, + topology=topology, + created_by_user=created_by_user, + description=description, + local=local, + policy=policy, + query=query, + workflow_type=workflow_type, + created_with_client=created_with_client, + hide_leaf_participants=hide_leaf_participants, ) project_base.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/project_definition.py b/src/tuneinsight/api/sdk/models/project_definition.py index 0a236bc..3e32786 100644 --- a/src/tuneinsight/api/sdk/models/project_definition.py +++ b/src/tuneinsight/api/sdk/models/project_definition.py @@ -22,137 +22,151 @@ class ProjectDefinition: """ Attributes: - data_source_auto_match (Union[Unset, bool]): whether or not to automatically assign the first matching - datasource when the project is shared with other nodes - name (Union[Unset, str]): - query (Union[Unset, DataSourceQuery]): schema used for the query - run_async (Union[Unset, bool]): flag indicating if computation should be run asynchronously - topology (Union[Unset, Topology]): Network Topologies. 'star' or 'tree'. In star topology all nodes are - connected to a central node. In tree topology all nodes are connected and aware of each other. - allow_clear_query (Union[Unset, bool]): [Dangerous, can lead to cross code data share] True if it is allowed for - a client to query the data source all participants of the project and return the clear text result - workflow_json (Union[Unset, str]): JSON representation of the workflow UI in the frontend - query_timeout (Union[Unset, int]): Timeout for the data source queries Default: 30. - authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project - compute_only (Union[Unset, bool]): indicates that the current project participant only computes but does not - contribute any data - created_with_client (Union[Unset, Client]): Type of client that communicates with the agent API - description (Union[Unset, None, str]): - local (Union[Unset, None, bool]): True if the project's computation should run only with local data (not - configured the network) - created_by_node (Union[Unset, str]): ID of node where the project was first created - workflow_type (Union[Unset, ProjectBaseWorkflowType]): type of the workflow UI in the frontend + unique_id (Union[Unset, str]): Unique identifier of a project. + unrestricted_access (Union[Unset, None, bool]): when set to true, then all users from the same organization are + authorized to access the project (view / edit depends on the roles) authorized_users (Union[Unset, List[str]]): The IDs of the users who can run the project - locked (Union[Unset, None, bool]): True if the project is read-only (likely because it has already been shared) + name (Union[Unset, str]): allow_shared_edit (Union[Unset, bool]): True if this project can be modified after being shared. Modifications of a shared project will be broadcasted to the network local_data_selection_definition (Union[Unset, LocalDataSelectionDefinition]): datasource selection definition. A selection is a "query" or data selection definition to run on the datasource + created_by_node (Union[Unset, str]): ID of node where the project was first created + data_source_auto_match (Union[Unset, bool]): whether or not to automatically assign the first matching + datasource when the project is shared with other nodes min_contributors (Union[Unset, None, int]): minimum number of participants that contribute with their data required to run computations within this project network_id (Union[Unset, str]): id to uniquely identify the network - policy (Union[Unset, ComputationPolicy]): policy to validate a specific computation + allow_clear_query (Union[Unset, bool]): [Dangerous, can lead to cross code data share] True if it is allowed for + a client to query the data source all participants of the project and return the clear text result + authorization_status (Union[Unset, AuthorizationStatus]): Authorization status of the project + query_timeout (Union[Unset, int]): Timeout for the data source queries Default: 30. + end_to_end_encrypted (Union[Unset, None, bool]): whether results are always end to end encrypted and decrypted + on the client side + workflow_json (Union[Unset, str]): JSON representation of the workflow UI in the frontend computation_definition (Union[Unset, ComputationDefinition]): Generic computation. - created_by_user (Union[Unset, str]): ID of user who created the project data_source_id (Union[Unset, None, str]): Unique identifier of a data source. + locked (Union[Unset, None, bool]): True if the project is read-only (likely because it has already been shared) + non_contributor (Union[Unset, None, bool]): indicates that the current project participant takes part in the + distributed computations but does not have any input data. + By default this field is set according to the instance's configuration. dpia (Union[Unset, str]): + run_async (Union[Unset, bool]): flag indicating if computation should be run asynchronously shared (Union[Unset, bool]): True if the project has once been shared across the participants - unique_id (Union[Unset, str]): Unique identifier of a project. + topology (Union[Unset, Topology]): Network Topologies. 'star' or 'tree'. In star topology all nodes are + connected to a central node. In tree topology all nodes are connected and aware of each other. + created_by_user (Union[Unset, str]): ID of user who created the project + description (Union[Unset, None, str]): + local (Union[Unset, None, bool]): True if the project's computation should run only with local data (not + configured the network) + policy (Union[Unset, ComputationPolicy]): policy to validate a specific computation + query (Union[Unset, DataSourceQuery]): schema used for the query + workflow_type (Union[Unset, ProjectBaseWorkflowType]): type of the workflow UI in the frontend + created_with_client (Union[Unset, Client]): Type of client that communicates with the agent API + hide_leaf_participants (Union[Unset, None, bool]): whether leaf project participants are not shown to other leaf + participants when the project is in a star topology. + broadcast (Union[Unset, bool]): Temporary field. Always set to false. Only used for server-server communication data_source_type (Union[Unset, str]): Type of the data source to share to other nodes to match with their data source of the same type participants (Union[Unset, None, List[str]]): List of nodes involved in the project's collaboration - broadcast (Union[Unset, bool]): Temporary field. Always set to false. Only used for server-server communication """ - data_source_auto_match: Union[Unset, bool] = UNSET - name: Union[Unset, str] = UNSET - query: Union[Unset, "DataSourceQuery"] = UNSET - run_async: Union[Unset, bool] = UNSET - topology: Union[Unset, Topology] = UNSET - allow_clear_query: Union[Unset, bool] = UNSET - workflow_json: Union[Unset, str] = UNSET - query_timeout: Union[Unset, int] = 30 - authorization_status: Union[Unset, AuthorizationStatus] = UNSET - compute_only: Union[Unset, bool] = UNSET - created_with_client: Union[Unset, Client] = UNSET - description: Union[Unset, None, str] = UNSET - local: Union[Unset, None, bool] = UNSET - created_by_node: Union[Unset, str] = UNSET - workflow_type: Union[Unset, ProjectBaseWorkflowType] = UNSET + unique_id: Union[Unset, str] = UNSET + unrestricted_access: Union[Unset, None, bool] = UNSET authorized_users: Union[Unset, List[str]] = UNSET - locked: Union[Unset, None, bool] = UNSET + name: Union[Unset, str] = UNSET allow_shared_edit: Union[Unset, bool] = UNSET local_data_selection_definition: Union[Unset, "LocalDataSelectionDefinition"] = UNSET + created_by_node: Union[Unset, str] = UNSET + data_source_auto_match: Union[Unset, bool] = UNSET min_contributors: Union[Unset, None, int] = UNSET network_id: Union[Unset, str] = UNSET - policy: Union[Unset, "ComputationPolicy"] = UNSET + allow_clear_query: Union[Unset, bool] = UNSET + authorization_status: Union[Unset, AuthorizationStatus] = UNSET + query_timeout: Union[Unset, int] = 30 + end_to_end_encrypted: Union[Unset, None, bool] = UNSET + workflow_json: Union[Unset, str] = UNSET computation_definition: Union[Unset, "ComputationDefinition"] = UNSET - created_by_user: Union[Unset, str] = UNSET data_source_id: Union[Unset, None, str] = UNSET + locked: Union[Unset, None, bool] = UNSET + non_contributor: Union[Unset, None, bool] = UNSET dpia: Union[Unset, str] = UNSET + run_async: Union[Unset, bool] = UNSET shared: Union[Unset, bool] = UNSET - unique_id: Union[Unset, str] = UNSET + topology: Union[Unset, Topology] = UNSET + created_by_user: Union[Unset, str] = UNSET + description: Union[Unset, None, str] = UNSET + local: Union[Unset, None, bool] = UNSET + policy: Union[Unset, "ComputationPolicy"] = UNSET + query: Union[Unset, "DataSourceQuery"] = UNSET + workflow_type: Union[Unset, ProjectBaseWorkflowType] = UNSET + created_with_client: Union[Unset, Client] = UNSET + hide_leaf_participants: Union[Unset, None, bool] = UNSET + broadcast: Union[Unset, bool] = UNSET data_source_type: Union[Unset, str] = UNSET participants: Union[Unset, None, List[str]] = UNSET - broadcast: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - data_source_auto_match = self.data_source_auto_match - name = self.name - query: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.query, Unset): - query = self.query.to_dict() - - run_async = self.run_async - topology: Union[Unset, str] = UNSET - if not isinstance(self.topology, Unset): - topology = self.topology.value - - allow_clear_query = self.allow_clear_query - workflow_json = self.workflow_json - query_timeout = self.query_timeout - authorization_status: Union[Unset, str] = UNSET - if not isinstance(self.authorization_status, Unset): - authorization_status = self.authorization_status.value - - compute_only = self.compute_only - created_with_client: Union[Unset, str] = UNSET - if not isinstance(self.created_with_client, Unset): - created_with_client = self.created_with_client.value - - description = self.description - local = self.local - created_by_node = self.created_by_node - workflow_type: Union[Unset, str] = UNSET - if not isinstance(self.workflow_type, Unset): - workflow_type = self.workflow_type.value - + unique_id = self.unique_id + unrestricted_access = self.unrestricted_access authorized_users: Union[Unset, List[str]] = UNSET if not isinstance(self.authorized_users, Unset): authorized_users = self.authorized_users - locked = self.locked + name = self.name allow_shared_edit = self.allow_shared_edit local_data_selection_definition: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_data_selection_definition, Unset): local_data_selection_definition = self.local_data_selection_definition.to_dict() + created_by_node = self.created_by_node + data_source_auto_match = self.data_source_auto_match min_contributors = self.min_contributors network_id = self.network_id - policy: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.policy, Unset): - policy = self.policy.to_dict() + allow_clear_query = self.allow_clear_query + authorization_status: Union[Unset, str] = UNSET + if not isinstance(self.authorization_status, Unset): + authorization_status = self.authorization_status.value + query_timeout = self.query_timeout + end_to_end_encrypted = self.end_to_end_encrypted + workflow_json = self.workflow_json computation_definition: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.computation_definition, Unset): computation_definition = self.computation_definition.to_dict() - created_by_user = self.created_by_user data_source_id = self.data_source_id + locked = self.locked + non_contributor = self.non_contributor dpia = self.dpia + run_async = self.run_async shared = self.shared - unique_id = self.unique_id + topology: Union[Unset, str] = UNSET + if not isinstance(self.topology, Unset): + topology = self.topology.value + + created_by_user = self.created_by_user + description = self.description + local = self.local + policy: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.policy, Unset): + policy = self.policy.to_dict() + + query: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.query, Unset): + query = self.query.to_dict() + + workflow_type: Union[Unset, str] = UNSET + if not isinstance(self.workflow_type, Unset): + workflow_type = self.workflow_type.value + + created_with_client: Union[Unset, str] = UNSET + if not isinstance(self.created_with_client, Unset): + created_with_client = self.created_with_client.value + + hide_leaf_participants = self.hide_leaf_participants + broadcast = self.broadcast data_source_type = self.data_source_type participants: Union[Unset, None, List[str]] = UNSET if not isinstance(self.participants, Unset): @@ -161,73 +175,77 @@ def to_dict(self) -> Dict[str, Any]: else: participants = self.participants - broadcast = self.broadcast - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if data_source_auto_match is not UNSET: - field_dict["dataSourceAutoMatch"] = data_source_auto_match - if name is not UNSET: - field_dict["name"] = name - if query is not UNSET: - field_dict["query"] = query - if run_async is not UNSET: - field_dict["runAsync"] = run_async - if topology is not UNSET: - field_dict["topology"] = topology - if allow_clear_query is not UNSET: - field_dict["allowClearQuery"] = allow_clear_query - if workflow_json is not UNSET: - field_dict["workflowJSON"] = workflow_json - if query_timeout is not UNSET: - field_dict["queryTimeout"] = query_timeout - if authorization_status is not UNSET: - field_dict["authorizationStatus"] = authorization_status - if compute_only is not UNSET: - field_dict["computeOnly"] = compute_only - if created_with_client is not UNSET: - field_dict["createdWithClient"] = created_with_client - if description is not UNSET: - field_dict["description"] = description - if local is not UNSET: - field_dict["local"] = local - if created_by_node is not UNSET: - field_dict["createdByNode"] = created_by_node - if workflow_type is not UNSET: - field_dict["workflowType"] = workflow_type + if unique_id is not UNSET: + field_dict["uniqueId"] = unique_id + if unrestricted_access is not UNSET: + field_dict["unrestrictedAccess"] = unrestricted_access if authorized_users is not UNSET: field_dict["authorizedUsers"] = authorized_users - if locked is not UNSET: - field_dict["locked"] = locked + if name is not UNSET: + field_dict["name"] = name if allow_shared_edit is not UNSET: field_dict["allowSharedEdit"] = allow_shared_edit if local_data_selection_definition is not UNSET: field_dict["localDataSelectionDefinition"] = local_data_selection_definition + if created_by_node is not UNSET: + field_dict["createdByNode"] = created_by_node + if data_source_auto_match is not UNSET: + field_dict["dataSourceAutoMatch"] = data_source_auto_match if min_contributors is not UNSET: field_dict["minContributors"] = min_contributors if network_id is not UNSET: field_dict["networkId"] = network_id - if policy is not UNSET: - field_dict["policy"] = policy + if allow_clear_query is not UNSET: + field_dict["allowClearQuery"] = allow_clear_query + if authorization_status is not UNSET: + field_dict["authorizationStatus"] = authorization_status + if query_timeout is not UNSET: + field_dict["queryTimeout"] = query_timeout + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if workflow_json is not UNSET: + field_dict["workflowJSON"] = workflow_json if computation_definition is not UNSET: field_dict["computationDefinition"] = computation_definition - if created_by_user is not UNSET: - field_dict["createdByUser"] = created_by_user if data_source_id is not UNSET: field_dict["dataSourceId"] = data_source_id + if locked is not UNSET: + field_dict["locked"] = locked + if non_contributor is not UNSET: + field_dict["nonContributor"] = non_contributor if dpia is not UNSET: field_dict["dpia"] = dpia + if run_async is not UNSET: + field_dict["runAsync"] = run_async if shared is not UNSET: field_dict["shared"] = shared - if unique_id is not UNSET: - field_dict["uniqueId"] = unique_id + if topology is not UNSET: + field_dict["topology"] = topology + if created_by_user is not UNSET: + field_dict["createdByUser"] = created_by_user + if description is not UNSET: + field_dict["description"] = description + if local is not UNSET: + field_dict["local"] = local + if policy is not UNSET: + field_dict["policy"] = policy + if query is not UNSET: + field_dict["query"] = query + if workflow_type is not UNSET: + field_dict["workflowType"] = workflow_type + if created_with_client is not UNSET: + field_dict["createdWithClient"] = created_with_client + if hide_leaf_participants is not UNSET: + field_dict["hideLeafParticipants"] = hide_leaf_participants + if broadcast is not UNSET: + field_dict["broadcast"] = broadcast if data_source_type is not UNSET: field_dict["dataSourceType"] = data_source_type if participants is not UNSET: field_dict["participants"] = participants - if broadcast is not UNSET: - field_dict["broadcast"] = broadcast return field_dict @@ -239,31 +257,32 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.local_data_selection_definition import LocalDataSelectionDefinition d = src_dict.copy() - data_source_auto_match = d.pop("dataSourceAutoMatch", UNSET) + unique_id = d.pop("uniqueId", UNSET) + + unrestricted_access = d.pop("unrestrictedAccess", UNSET) + + authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) name = d.pop("name", UNSET) - _query = d.pop("query", UNSET) - query: Union[Unset, DataSourceQuery] - if isinstance(_query, Unset): - query = UNSET + allow_shared_edit = d.pop("allowSharedEdit", UNSET) + + _local_data_selection_definition = d.pop("localDataSelectionDefinition", UNSET) + local_data_selection_definition: Union[Unset, LocalDataSelectionDefinition] + if isinstance(_local_data_selection_definition, Unset): + local_data_selection_definition = UNSET else: - query = DataSourceQuery.from_dict(_query) + local_data_selection_definition = LocalDataSelectionDefinition.from_dict(_local_data_selection_definition) - run_async = d.pop("runAsync", UNSET) + created_by_node = d.pop("createdByNode", UNSET) - _topology = d.pop("topology", UNSET) - topology: Union[Unset, Topology] - if isinstance(_topology, Unset): - topology = UNSET - else: - topology = Topology(_topology) + data_source_auto_match = d.pop("dataSourceAutoMatch", UNSET) - allow_clear_query = d.pop("allowClearQuery", UNSET) + min_contributors = d.pop("minContributors", UNSET) - workflow_json = d.pop("workflowJSON", UNSET) + network_id = d.pop("networkId", UNSET) - query_timeout = d.pop("queryTimeout", UNSET) + allow_clear_query = d.pop("allowClearQuery", UNSET) _authorization_status = d.pop("authorizationStatus", UNSET) authorization_status: Union[Unset, AuthorizationStatus] @@ -272,44 +291,43 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: authorization_status = AuthorizationStatus(_authorization_status) - compute_only = d.pop("computeOnly", UNSET) - - _created_with_client = d.pop("createdWithClient", UNSET) - created_with_client: Union[Unset, Client] - if isinstance(_created_with_client, Unset): - created_with_client = UNSET - else: - created_with_client = Client(_created_with_client) - - description = d.pop("description", UNSET) + query_timeout = d.pop("queryTimeout", UNSET) - local = d.pop("local", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - created_by_node = d.pop("createdByNode", UNSET) + workflow_json = d.pop("workflowJSON", UNSET) - _workflow_type = d.pop("workflowType", UNSET) - workflow_type: Union[Unset, ProjectBaseWorkflowType] - if isinstance(_workflow_type, Unset): - workflow_type = UNSET + _computation_definition = d.pop("computationDefinition", UNSET) + computation_definition: Union[Unset, ComputationDefinition] + if isinstance(_computation_definition, Unset): + computation_definition = UNSET else: - workflow_type = ProjectBaseWorkflowType(_workflow_type) + computation_definition = ComputationDefinition.from_dict(_computation_definition) - authorized_users = cast(List[str], d.pop("authorizedUsers", UNSET)) + data_source_id = d.pop("dataSourceId", UNSET) locked = d.pop("locked", UNSET) - allow_shared_edit = d.pop("allowSharedEdit", UNSET) + non_contributor = d.pop("nonContributor", UNSET) - _local_data_selection_definition = d.pop("localDataSelectionDefinition", UNSET) - local_data_selection_definition: Union[Unset, LocalDataSelectionDefinition] - if isinstance(_local_data_selection_definition, Unset): - local_data_selection_definition = UNSET + dpia = d.pop("dpia", UNSET) + + run_async = d.pop("runAsync", UNSET) + + shared = d.pop("shared", UNSET) + + _topology = d.pop("topology", UNSET) + topology: Union[Unset, Topology] + if isinstance(_topology, Unset): + topology = UNSET else: - local_data_selection_definition = LocalDataSelectionDefinition.from_dict(_local_data_selection_definition) + topology = Topology(_topology) - min_contributors = d.pop("minContributors", UNSET) + created_by_user = d.pop("createdByUser", UNSET) - network_id = d.pop("networkId", UNSET) + description = d.pop("description", UNSET) + + local = d.pop("local", UNSET) _policy = d.pop("policy", UNSET) policy: Union[Unset, ComputationPolicy] @@ -318,61 +336,70 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: policy = ComputationPolicy.from_dict(_policy) - _computation_definition = d.pop("computationDefinition", UNSET) - computation_definition: Union[Unset, ComputationDefinition] - if isinstance(_computation_definition, Unset): - computation_definition = UNSET + _query = d.pop("query", UNSET) + query: Union[Unset, DataSourceQuery] + if isinstance(_query, Unset): + query = UNSET else: - computation_definition = ComputationDefinition.from_dict(_computation_definition) - - created_by_user = d.pop("createdByUser", UNSET) + query = DataSourceQuery.from_dict(_query) - data_source_id = d.pop("dataSourceId", UNSET) + _workflow_type = d.pop("workflowType", UNSET) + workflow_type: Union[Unset, ProjectBaseWorkflowType] + if isinstance(_workflow_type, Unset): + workflow_type = UNSET + else: + workflow_type = ProjectBaseWorkflowType(_workflow_type) - dpia = d.pop("dpia", UNSET) + _created_with_client = d.pop("createdWithClient", UNSET) + created_with_client: Union[Unset, Client] + if isinstance(_created_with_client, Unset): + created_with_client = UNSET + else: + created_with_client = Client(_created_with_client) - shared = d.pop("shared", UNSET) + hide_leaf_participants = d.pop("hideLeafParticipants", UNSET) - unique_id = d.pop("uniqueId", UNSET) + broadcast = d.pop("broadcast", UNSET) data_source_type = d.pop("dataSourceType", UNSET) participants = cast(List[str], d.pop("participants", UNSET)) - broadcast = d.pop("broadcast", UNSET) - project_definition = cls( - data_source_auto_match=data_source_auto_match, - name=name, - query=query, - run_async=run_async, - topology=topology, - allow_clear_query=allow_clear_query, - workflow_json=workflow_json, - query_timeout=query_timeout, - authorization_status=authorization_status, - compute_only=compute_only, - created_with_client=created_with_client, - description=description, - local=local, - created_by_node=created_by_node, - workflow_type=workflow_type, + unique_id=unique_id, + unrestricted_access=unrestricted_access, authorized_users=authorized_users, - locked=locked, + name=name, allow_shared_edit=allow_shared_edit, local_data_selection_definition=local_data_selection_definition, + created_by_node=created_by_node, + data_source_auto_match=data_source_auto_match, min_contributors=min_contributors, network_id=network_id, - policy=policy, + allow_clear_query=allow_clear_query, + authorization_status=authorization_status, + query_timeout=query_timeout, + end_to_end_encrypted=end_to_end_encrypted, + workflow_json=workflow_json, computation_definition=computation_definition, - created_by_user=created_by_user, data_source_id=data_source_id, + locked=locked, + non_contributor=non_contributor, dpia=dpia, + run_async=run_async, shared=shared, - unique_id=unique_id, + topology=topology, + created_by_user=created_by_user, + description=description, + local=local, + policy=policy, + query=query, + workflow_type=workflow_type, + created_with_client=created_with_client, + hide_leaf_participants=hide_leaf_participants, + broadcast=broadcast, data_source_type=data_source_type, participants=participants, - broadcast=broadcast, ) project_definition.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/query.py b/src/tuneinsight/api/sdk/models/query.py index 998fd2b..28933eb 100644 --- a/src/tuneinsight/api/sdk/models/query.py +++ b/src/tuneinsight/api/sdk/models/query.py @@ -17,64 +17,64 @@ class Query: """Data source query Attributes: - created_at (Union[Unset, str]): created_by_user (Union[Unset, str]): ID of user who created the project error (Union[Unset, str]): Error message, in case status of the query is error. + results (Union[Unset, QueryResults]): result dataobject IDs + status (Union[Unset, QueryStatus]): + created_at (Union[Unset, str]): id (Union[Unset, str]): project_id (Union[Unset, str]): Unique identifier of a project. - results (Union[Unset, QueryResults]): result dataobject IDs query_string (Union[Unset, str]): String of the query e.g. SQL or JSON - status (Union[Unset, QueryStatus]): updated_at (Union[Unset, str]): """ - created_at: Union[Unset, str] = UNSET created_by_user: Union[Unset, str] = UNSET error: Union[Unset, str] = UNSET + results: Union[Unset, "QueryResults"] = UNSET + status: Union[Unset, QueryStatus] = UNSET + created_at: Union[Unset, str] = UNSET id: Union[Unset, str] = UNSET project_id: Union[Unset, str] = UNSET - results: Union[Unset, "QueryResults"] = UNSET query_string: Union[Unset, str] = UNSET - status: Union[Unset, QueryStatus] = UNSET updated_at: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - created_at = self.created_at created_by_user = self.created_by_user error = self.error - id = self.id - project_id = self.project_id results: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.results, Unset): results = self.results.to_dict() - query_string = self.query_string status: Union[Unset, str] = UNSET if not isinstance(self.status, Unset): status = self.status.value + created_at = self.created_at + id = self.id + project_id = self.project_id + query_string = self.query_string updated_at = self.updated_at field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if created_at is not UNSET: - field_dict["createdAt"] = created_at if created_by_user is not UNSET: field_dict["createdByUser"] = created_by_user if error is not UNSET: field_dict["error"] = error + if results is not UNSET: + field_dict["results"] = results + if status is not UNSET: + field_dict["status"] = status + if created_at is not UNSET: + field_dict["createdAt"] = created_at if id is not UNSET: field_dict["id"] = id if project_id is not UNSET: field_dict["projectId"] = project_id - if results is not UNSET: - field_dict["results"] = results if query_string is not UNSET: field_dict["queryString"] = query_string - if status is not UNSET: - field_dict["status"] = status if updated_at is not UNSET: field_dict["updatedAt"] = updated_at @@ -85,16 +85,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.query_results import QueryResults d = src_dict.copy() - created_at = d.pop("createdAt", UNSET) - created_by_user = d.pop("createdByUser", UNSET) error = d.pop("error", UNSET) - id = d.pop("id", UNSET) - - project_id = d.pop("projectId", UNSET) - _results = d.pop("results", UNSET) results: Union[Unset, QueryResults] if isinstance(_results, Unset): @@ -102,8 +96,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: results = QueryResults.from_dict(_results) - query_string = d.pop("queryString", UNSET) - _status = d.pop("status", UNSET) status: Union[Unset, QueryStatus] if isinstance(_status, Unset): @@ -111,17 +103,25 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: status = QueryStatus(_status) + created_at = d.pop("createdAt", UNSET) + + id = d.pop("id", UNSET) + + project_id = d.pop("projectId", UNSET) + + query_string = d.pop("queryString", UNSET) + updated_at = d.pop("updatedAt", UNSET) query = cls( - created_at=created_at, created_by_user=created_by_user, error=error, + results=results, + status=status, + created_at=created_at, id=id, project_id=project_id, - results=results, query_string=query_string, - status=status, updated_at=updated_at, ) diff --git a/src/tuneinsight/api/sdk/models/realm_role.py b/src/tuneinsight/api/sdk/models/realm_role.py index eab8cd5..6952149 100644 --- a/src/tuneinsight/api/sdk/models/realm_role.py +++ b/src/tuneinsight/api/sdk/models/realm_role.py @@ -11,33 +11,37 @@ class RealmRole: """ Attributes: + client_role (Union[Unset, bool]): + composite (Union[Unset, bool]): container_id (Union[Unset, str]): description (Union[Unset, str]): id (Union[Unset, str]): name (Union[Unset, str]): - client_role (Union[Unset, bool]): - composite (Union[Unset, bool]): """ + client_role: Union[Unset, bool] = UNSET + composite: Union[Unset, bool] = UNSET container_id: Union[Unset, str] = UNSET description: Union[Unset, str] = UNSET id: Union[Unset, str] = UNSET name: Union[Unset, str] = UNSET - client_role: Union[Unset, bool] = UNSET - composite: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + client_role = self.client_role + composite = self.composite container_id = self.container_id description = self.description id = self.id name = self.name - client_role = self.client_role - composite = self.composite field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) + if client_role is not UNSET: + field_dict["clientRole"] = client_role + if composite is not UNSET: + field_dict["composite"] = composite if container_id is not UNSET: field_dict["containerId"] = container_id if description is not UNSET: @@ -46,16 +50,16 @@ def to_dict(self) -> Dict[str, Any]: field_dict["id"] = id if name is not UNSET: field_dict["name"] = name - if client_role is not UNSET: - field_dict["clientRole"] = client_role - if composite is not UNSET: - field_dict["composite"] = composite return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() + client_role = d.pop("clientRole", UNSET) + + composite = d.pop("composite", UNSET) + container_id = d.pop("containerId", UNSET) description = d.pop("description", UNSET) @@ -64,17 +68,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: name = d.pop("name", UNSET) - client_role = d.pop("clientRole", UNSET) - - composite = d.pop("composite", UNSET) - realm_role = cls( + client_role=client_role, + composite=composite, container_id=container_id, description=description, id=id, name=name, - client_role=client_role, - composite=composite, ) realm_role.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/relin_key_gen.py b/src/tuneinsight/api/sdk/models/relin_key_gen.py index aa3af03..f8fd70a 100644 --- a/src/tuneinsight/api/sdk/models/relin_key_gen.py +++ b/src/tuneinsight/api/sdk/models/relin_key_gen.py @@ -20,81 +20,88 @@ class RelinKeyGen: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -102,36 +109,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object return field_dict @@ -145,7 +154,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -154,15 +163,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -171,22 +185,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -195,23 +204,26 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + relin_key_gen = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, ) relin_key_gen.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/rename.py b/src/tuneinsight/api/sdk/models/rename.py index 0bf1127..b790357 100644 --- a/src/tuneinsight/api/sdk/models/rename.py +++ b/src/tuneinsight/api/sdk/models/rename.py @@ -19,16 +19,16 @@ class Rename: Attributes: type (PreprocessingOperationType): type of preprocessing operation mapper (RenameMapper): transformations to apply to that axis’ values + axis (Union[Unset, RenameAxis]): axis to target with mapper copy (Union[Unset, bool]): whether to return a copy errors (Union[Unset, bool]): Control raising of exceptions on invalid data for provided dtype - axis (Union[Unset, RenameAxis]): axis to target with mapper """ type: PreprocessingOperationType mapper: "RenameMapper" + axis: Union[Unset, RenameAxis] = UNSET copy: Union[Unset, bool] = UNSET errors: Union[Unset, bool] = UNSET - axis: Union[Unset, RenameAxis] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: @@ -36,12 +36,13 @@ def to_dict(self) -> Dict[str, Any]: mapper = self.mapper.to_dict() - copy = self.copy - errors = self.errors axis: Union[Unset, str] = UNSET if not isinstance(self.axis, Unset): axis = self.axis.value + copy = self.copy + errors = self.errors + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -50,12 +51,12 @@ def to_dict(self) -> Dict[str, Any]: "mapper": mapper, } ) + if axis is not UNSET: + field_dict["axis"] = axis if copy is not UNSET: field_dict["copy"] = copy if errors is not UNSET: field_dict["errors"] = errors - if axis is not UNSET: - field_dict["axis"] = axis return field_dict @@ -68,10 +69,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: mapper = RenameMapper.from_dict(d.pop("mapper")) - copy = d.pop("copy", UNSET) - - errors = d.pop("errors", UNSET) - _axis = d.pop("axis", UNSET) axis: Union[Unset, RenameAxis] if isinstance(_axis, Unset): @@ -79,12 +76,16 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: axis = RenameAxis(_axis) + copy = d.pop("copy", UNSET) + + errors = d.pop("errors", UNSET) + rename = cls( type=type, mapper=mapper, + axis=axis, copy=copy, errors=errors, - axis=axis, ) rename.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/result.py b/src/tuneinsight/api/sdk/models/result.py index e64fa81..97322e2 100644 --- a/src/tuneinsight/api/sdk/models/result.py +++ b/src/tuneinsight/api/sdk/models/result.py @@ -20,29 +20,29 @@ class Result: tags (Union[Unset, List[str]]): title (Union[Unset, str]): title given to the result is_large (Union[Unset, None, bool]): format to display the result - metadata (Union[Unset, ResultMetadata]): various metadata field along with the result to provide additional - context - owner (Union[Unset, str]): - updated_at (Union[Unset, str]): computation_id (Union[Unset, str]): Identifier of a computation, unique across all computing nodes. computation_type (Union[Unset, ComputationType]): Type of the computation. created_at (Union[Unset, str]): data_object_id (Union[Unset, str]): Unique identifier of a data object. id (Union[Unset, str]): Unique identifier of a result. + metadata (Union[Unset, ResultMetadata]): various metadata field along with the result to provide additional + context + owner (Union[Unset, str]): + updated_at (Union[Unset, str]): """ shared: Union[Unset, None, bool] = UNSET tags: Union[Unset, List[str]] = UNSET title: Union[Unset, str] = UNSET is_large: Union[Unset, None, bool] = UNSET - metadata: Union[Unset, "ResultMetadata"] = UNSET - owner: Union[Unset, str] = UNSET - updated_at: Union[Unset, str] = UNSET computation_id: Union[Unset, str] = UNSET computation_type: Union[Unset, ComputationType] = UNSET created_at: Union[Unset, str] = UNSET data_object_id: Union[Unset, str] = UNSET id: Union[Unset, str] = UNSET + metadata: Union[Unset, "ResultMetadata"] = UNSET + owner: Union[Unset, str] = UNSET + updated_at: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: @@ -53,12 +53,6 @@ def to_dict(self) -> Dict[str, Any]: title = self.title is_large = self.is_large - metadata: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.metadata, Unset): - metadata = self.metadata.to_dict() - - owner = self.owner - updated_at = self.updated_at computation_id = self.computation_id computation_type: Union[Unset, str] = UNSET if not isinstance(self.computation_type, Unset): @@ -67,6 +61,12 @@ def to_dict(self) -> Dict[str, Any]: created_at = self.created_at data_object_id = self.data_object_id id = self.id + metadata: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.metadata, Unset): + metadata = self.metadata.to_dict() + + owner = self.owner + updated_at = self.updated_at field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -79,12 +79,6 @@ def to_dict(self) -> Dict[str, Any]: field_dict["title"] = title if is_large is not UNSET: field_dict["isLarge"] = is_large - if metadata is not UNSET: - field_dict["metadata"] = metadata - if owner is not UNSET: - field_dict["owner"] = owner - if updated_at is not UNSET: - field_dict["updatedAt"] = updated_at if computation_id is not UNSET: field_dict["computationId"] = computation_id if computation_type is not UNSET: @@ -95,6 +89,12 @@ def to_dict(self) -> Dict[str, Any]: field_dict["dataObjectId"] = data_object_id if id is not UNSET: field_dict["id"] = id + if metadata is not UNSET: + field_dict["metadata"] = metadata + if owner is not UNSET: + field_dict["owner"] = owner + if updated_at is not UNSET: + field_dict["updatedAt"] = updated_at return field_dict @@ -111,17 +111,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: is_large = d.pop("isLarge", UNSET) - _metadata = d.pop("metadata", UNSET) - metadata: Union[Unset, ResultMetadata] - if isinstance(_metadata, Unset): - metadata = UNSET - else: - metadata = ResultMetadata.from_dict(_metadata) - - owner = d.pop("owner", UNSET) - - updated_at = d.pop("updatedAt", UNSET) - computation_id = d.pop("computationId", UNSET) _computation_type = d.pop("computationType", UNSET) @@ -137,19 +126,30 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: id = d.pop("id", UNSET) + _metadata = d.pop("metadata", UNSET) + metadata: Union[Unset, ResultMetadata] + if isinstance(_metadata, Unset): + metadata = UNSET + else: + metadata = ResultMetadata.from_dict(_metadata) + + owner = d.pop("owner", UNSET) + + updated_at = d.pop("updatedAt", UNSET) + result = cls( shared=shared, tags=tags, title=title, is_large=is_large, - metadata=metadata, - owner=owner, - updated_at=updated_at, computation_id=computation_id, computation_type=computation_type, created_at=created_at, data_object_id=data_object_id, id=id, + metadata=metadata, + owner=owner, + updated_at=updated_at, ) result.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/result_content.py b/src/tuneinsight/api/sdk/models/result_content.py index ac9e753..d477a64 100644 --- a/src/tuneinsight/api/sdk/models/result_content.py +++ b/src/tuneinsight/api/sdk/models/result_content.py @@ -18,21 +18,17 @@ class ResultContent: """result along with content and computation details Attributes: - computation (Union[Unset, Computation]): Metadata of a computation. content (Union[Unset, Content]): Content that can be retrieved and displayed for the user result (Union[Unset, Result]): + computation (Union[Unset, Computation]): Metadata of a computation. """ - computation: Union[Unset, "Computation"] = UNSET content: Union[Unset, "Content"] = UNSET result: Union[Unset, "Result"] = UNSET + computation: Union[Unset, "Computation"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - computation: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.computation, Unset): - computation = self.computation.to_dict() - content: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.content, Unset): content = self.content.to_dict() @@ -41,15 +37,19 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.result, Unset): result = self.result.to_dict() + computation: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.computation, Unset): + computation = self.computation.to_dict() + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if computation is not UNSET: - field_dict["computation"] = computation if content is not UNSET: field_dict["content"] = content if result is not UNSET: field_dict["result"] = result + if computation is not UNSET: + field_dict["computation"] = computation return field_dict @@ -60,13 +60,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.result import Result d = src_dict.copy() - _computation = d.pop("computation", UNSET) - computation: Union[Unset, Computation] - if isinstance(_computation, Unset): - computation = UNSET - else: - computation = Computation.from_dict(_computation) - _content = d.pop("content", UNSET) content: Union[Unset, Content] if isinstance(_content, Unset): @@ -81,10 +74,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: result = Result.from_dict(_result) + _computation = d.pop("computation", UNSET) + computation: Union[Unset, Computation] + if isinstance(_computation, Unset): + computation = UNSET + else: + computation = Computation.from_dict(_computation) + result_content = cls( - computation=computation, content=content, result=result, + computation=computation, ) result_content.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/rot_key_gen.py b/src/tuneinsight/api/sdk/models/rot_key_gen.py index 4d3e1b9..84793f9 100644 --- a/src/tuneinsight/api/sdk/models/rot_key_gen.py +++ b/src/tuneinsight/api/sdk/models/rot_key_gen.py @@ -21,83 +21,89 @@ class RotKeyGen: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. rotations (Union[Unset, List['RotKeyGenRotationsItem']]): """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET rotations: Union[Unset, List["RotKeyGenRotationsItem"]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object rotations: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.rotations, Unset): rotations = [] @@ -113,36 +119,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if rotations is not UNSET: field_dict["rotations"] = rotations @@ -159,7 +167,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -168,15 +176,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -185,22 +198,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -209,6 +217,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + rotations = [] _rotations = d.pop("rotations", UNSET) for rotations_item_data in _rotations or []: @@ -218,21 +228,22 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: rot_key_gen = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, rotations=rotations, ) diff --git a/src/tuneinsight/api/sdk/models/sample_extraction.py b/src/tuneinsight/api/sdk/models/sample_extraction.py index 86b64b7..e8a7b4f 100644 --- a/src/tuneinsight/api/sdk/models/sample_extraction.py +++ b/src/tuneinsight/api/sdk/models/sample_extraction.py @@ -20,87 +20,93 @@ class SampleExtraction: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation - sample_size (Union[Unset, int]): size of the sample as number of rows + input_data_object (Union[Unset, str]): Shared identifier of a data object. seed (Union[Unset, str]): seed to use for the sampling + sample_size (Union[Unset, int]): size of the sample as number of rows """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET - sample_size: Union[Unset, int] = UNSET + input_data_object: Union[Unset, str] = UNSET seed: Union[Unset, str] = UNSET + sample_size: Union[Unset, int] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() - sample_size = self.sample_size + input_data_object = self.input_data_object seed = self.seed + sample_size = self.sample_size field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -109,40 +115,42 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters - if sample_size is not UNSET: - field_dict["sampleSize"] = sample_size + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if seed is not UNSET: field_dict["seed"] = seed + if sample_size is not UNSET: + field_dict["sampleSize"] = sample_size return field_dict @@ -156,7 +164,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -165,15 +173,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -182,22 +195,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -206,29 +214,32 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) - sample_size = d.pop("sampleSize", UNSET) + input_data_object = d.pop("inputDataObject", UNSET) seed = d.pop("seed", UNSET) + sample_size = d.pop("sampleSize", UNSET) + sample_extraction = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, - sample_size=sample_size, + input_data_object=input_data_object, seed=seed, + sample_size=sample_size, ) sample_extraction.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/set_index.py b/src/tuneinsight/api/sdk/models/set_index.py index d967ee2..b091bef 100644 --- a/src/tuneinsight/api/sdk/models/set_index.py +++ b/src/tuneinsight/api/sdk/models/set_index.py @@ -13,26 +13,26 @@ class SetIndex: """ Attributes: type (PreprocessingOperationType): type of preprocessing operation - append (Union[Unset, bool]): Whether to append columns to existing index cols (Union[Unset, List[str]]): column(s) to use as index drop (Union[Unset, bool]): Delete columns to be used as the new index + append (Union[Unset, bool]): Whether to append columns to existing index """ type: PreprocessingOperationType - append: Union[Unset, bool] = UNSET cols: Union[Unset, List[str]] = UNSET drop: Union[Unset, bool] = UNSET + append: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - append = self.append cols: Union[Unset, List[str]] = UNSET if not isinstance(self.cols, Unset): cols = self.cols drop = self.drop + append = self.append field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -41,12 +41,12 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if append is not UNSET: - field_dict["append"] = append if cols is not UNSET: field_dict["cols"] = cols if drop is not UNSET: field_dict["drop"] = drop + if append is not UNSET: + field_dict["append"] = append return field_dict @@ -55,17 +55,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = PreprocessingOperationType(d.pop("type")) - append = d.pop("append", UNSET) - cols = cast(List[str], d.pop("cols", UNSET)) drop = d.pop("drop", UNSET) + append = d.pop("append", UNSET) + set_index = cls( type=type, - append=append, cols=cols, drop=drop, + append=append, ) set_index.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/set_intersection.py b/src/tuneinsight/api/sdk/models/set_intersection.py index 462ac69..7503300 100644 --- a/src/tuneinsight/api/sdk/models/set_intersection.py +++ b/src/tuneinsight/api/sdk/models/set_intersection.py @@ -22,92 +22,99 @@ class SetIntersection: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. + encrypted_results (Union[Unset, bool]): if true, then the resulting matches are kept encrypted fuzzy_params (Union[Unset, FuzzyMatchingParameters]): hide_matching_origin (Union[Unset, bool]): if true, then the matches are aggregated before being decrypted, hiding the organizations with whom the items matched. matching_columns (Union[Unset, List[str]]): The columns on which the data should be matched result_format (Union[Unset, SetIntersectionOutputFormat]): - encrypted_results (Union[Unset, bool]): if true, then the resulting matches are kept encrypted """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET + encrypted_results: Union[Unset, bool] = UNSET fuzzy_params: Union[Unset, "FuzzyMatchingParameters"] = UNSET hide_matching_origin: Union[Unset, bool] = UNSET matching_columns: Union[Unset, List[str]] = UNSET result_format: Union[Unset, SetIntersectionOutputFormat] = UNSET - encrypted_results: Union[Unset, bool] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + encrypted_results = self.encrypted_results fuzzy_params: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.fuzzy_params, Unset): fuzzy_params = self.fuzzy_params.to_dict() @@ -121,8 +128,6 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.result_format, Unset): result_format = self.result_format.value - encrypted_results = self.encrypted_results - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -130,36 +135,40 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object + if encrypted_results is not UNSET: + field_dict["encryptedResults"] = encrypted_results if fuzzy_params is not UNSET: field_dict["fuzzyParams"] = fuzzy_params if hide_matching_origin is not UNSET: @@ -168,8 +177,6 @@ def to_dict(self) -> Dict[str, Any]: field_dict["matchingColumns"] = matching_columns if result_format is not UNSET: field_dict["resultFormat"] = result_format - if encrypted_results is not UNSET: - field_dict["encryptedResults"] = encrypted_results return field_dict @@ -184,7 +191,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -193,15 +200,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -210,22 +222,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -234,6 +241,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + + encrypted_results = d.pop("encryptedResults", UNSET) + _fuzzy_params = d.pop("fuzzyParams", UNSET) fuzzy_params: Union[Unset, FuzzyMatchingParameters] if isinstance(_fuzzy_params, Unset): @@ -252,30 +263,29 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: result_format = SetIntersectionOutputFormat(_result_format) - encrypted_results = d.pop("encryptedResults", UNSET) - set_intersection = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, + encrypted_results=encrypted_results, fuzzy_params=fuzzy_params, hide_matching_origin=hide_matching_origin, matching_columns=matching_columns, result_format=result_format, - encrypted_results=encrypted_results, ) set_intersection.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/setup_session.py b/src/tuneinsight/api/sdk/models/setup_session.py index 8fe13aa..5e2b6da 100644 --- a/src/tuneinsight/api/sdk/models/setup_session.py +++ b/src/tuneinsight/api/sdk/models/setup_session.py @@ -21,52 +21,57 @@ class SetupSession: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. relinearization_key (Union[Unset, bool]): whether or not to generate the relinearization key target_computation (Union[Unset, ComputationDefinition]): Generic computation. target_scheme_context (Union[Unset, str]): base64 encoded scheme context """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET relinearization_key: Union[Unset, bool] = UNSET target_computation: Union[Unset, "ComputationDefinition"] = UNSET target_scheme_context: Union[Unset, str] = UNSET @@ -75,33 +80,34 @@ class SetupSession: def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object relinearization_key = self.relinearization_key target_computation: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.target_computation, Unset): @@ -116,36 +122,38 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if relinearization_key is not UNSET: field_dict["relinearizationKey"] = relinearization_key if target_computation is not UNSET: @@ -166,7 +174,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -175,15 +183,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -192,22 +205,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -216,6 +224,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + relinearization_key = d.pop("relinearizationKey", UNSET) _target_computation = d.pop("targetComputation", UNSET) @@ -229,21 +239,22 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: setup_session = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, relinearization_key=relinearization_key, target_computation=target_computation, target_scheme_context=target_scheme_context, diff --git a/src/tuneinsight/api/sdk/models/statistic_definition.py b/src/tuneinsight/api/sdk/models/statistic_definition.py index d20395a..933e7bd 100644 --- a/src/tuneinsight/api/sdk/models/statistic_definition.py +++ b/src/tuneinsight/api/sdk/models/statistic_definition.py @@ -19,20 +19,20 @@ class StatisticDefinition: filter_ (Union[Unset, Filter]): name (Union[Unset, str]): given name of the statistic variable (Union[Unset, str]): target variable in the dataset from the which the statistic is computed + max_bound (Union[Unset, float]): specified maximum bound on the variable for sorting Default: 1.0. + min_bound (Union[Unset, float]): specified minimum bound on the variable for sorting quantiles_k_value (Union[Unset, int]): k value used to determine the number of quantiles that are returned quantities (Union[Unset, List[StatisticalQuantity]]): if specified only compute the quantities given in this list if not specified all relevant statistics are computed - max_bound (Union[Unset, float]): specified maximum bound on the variable for sorting Default: 1.0. - min_bound (Union[Unset, float]): specified minimum bound on the variable for sorting """ filter_: Union[Unset, "Filter"] = UNSET name: Union[Unset, str] = UNSET variable: Union[Unset, str] = UNSET - quantiles_k_value: Union[Unset, int] = UNSET - quantities: Union[Unset, List[StatisticalQuantity]] = UNSET max_bound: Union[Unset, float] = 1.0 min_bound: Union[Unset, float] = 0.0 + quantiles_k_value: Union[Unset, int] = UNSET + quantities: Union[Unset, List[StatisticalQuantity]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: @@ -42,6 +42,8 @@ def to_dict(self) -> Dict[str, Any]: name = self.name variable = self.variable + max_bound = self.max_bound + min_bound = self.min_bound quantiles_k_value = self.quantiles_k_value quantities: Union[Unset, List[str]] = UNSET if not isinstance(self.quantities, Unset): @@ -51,9 +53,6 @@ def to_dict(self) -> Dict[str, Any]: quantities.append(quantities_item) - max_bound = self.max_bound - min_bound = self.min_bound - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) @@ -63,14 +62,14 @@ def to_dict(self) -> Dict[str, Any]: field_dict["name"] = name if variable is not UNSET: field_dict["variable"] = variable - if quantiles_k_value is not UNSET: - field_dict["quantilesKValue"] = quantiles_k_value - if quantities is not UNSET: - field_dict["quantities"] = quantities if max_bound is not UNSET: field_dict["maxBound"] = max_bound if min_bound is not UNSET: field_dict["minBound"] = min_bound + if quantiles_k_value is not UNSET: + field_dict["quantilesKValue"] = quantiles_k_value + if quantities is not UNSET: + field_dict["quantities"] = quantities return field_dict @@ -90,6 +89,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: variable = d.pop("variable", UNSET) + max_bound = d.pop("maxBound", UNSET) + + min_bound = d.pop("minBound", UNSET) + quantiles_k_value = d.pop("quantilesKValue", UNSET) quantities = [] @@ -99,18 +102,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: quantities.append(quantities_item) - max_bound = d.pop("maxBound", UNSET) - - min_bound = d.pop("minBound", UNSET) - statistic_definition = cls( filter_=filter_, name=name, variable=variable, - quantiles_k_value=quantiles_k_value, - quantities=quantities, max_bound=max_bound, min_bound=min_bound, + quantiles_k_value=quantiles_k_value, + quantities=quantities, ) statistic_definition.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/statistic_result.py b/src/tuneinsight/api/sdk/models/statistic_result.py index d3c53e9..d21b815 100644 --- a/src/tuneinsight/api/sdk/models/statistic_result.py +++ b/src/tuneinsight/api/sdk/models/statistic_result.py @@ -18,25 +18,25 @@ class StatisticResult: filter_ (Union[Unset, Filter]): name (Union[Unset, str]): given name of the statistic variable (Union[Unset, str]): target variable in the dataset from the which the statistic is computed + min_ (Union[Unset, None, float]): + quantiles (Union[Unset, List[float]]): + variance (Union[Unset, None, float]): iqr (Union[Unset, None, float]): max_ (Union[Unset, None, float]): mean (Union[Unset, None, float]): median (Union[Unset, None, float]): - min_ (Union[Unset, None, float]): - quantiles (Union[Unset, List[float]]): - variance (Union[Unset, None, float]): """ filter_: Union[Unset, "Filter"] = UNSET name: Union[Unset, str] = UNSET variable: Union[Unset, str] = UNSET + min_: Union[Unset, None, float] = UNSET + quantiles: Union[Unset, List[float]] = UNSET + variance: Union[Unset, None, float] = UNSET iqr: Union[Unset, None, float] = UNSET max_: Union[Unset, None, float] = UNSET mean: Union[Unset, None, float] = UNSET median: Union[Unset, None, float] = UNSET - min_: Union[Unset, None, float] = UNSET - quantiles: Union[Unset, List[float]] = UNSET - variance: Union[Unset, None, float] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: @@ -46,16 +46,16 @@ def to_dict(self) -> Dict[str, Any]: name = self.name variable = self.variable - iqr = self.iqr - max_ = self.max_ - mean = self.mean - median = self.median min_ = self.min_ quantiles: Union[Unset, List[float]] = UNSET if not isinstance(self.quantiles, Unset): quantiles = self.quantiles variance = self.variance + iqr = self.iqr + max_ = self.max_ + mean = self.mean + median = self.median field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -66,6 +66,12 @@ def to_dict(self) -> Dict[str, Any]: field_dict["name"] = name if variable is not UNSET: field_dict["variable"] = variable + if min_ is not UNSET: + field_dict["min"] = min_ + if quantiles is not UNSET: + field_dict["quantiles"] = quantiles + if variance is not UNSET: + field_dict["variance"] = variance if iqr is not UNSET: field_dict["IQR"] = iqr if max_ is not UNSET: @@ -74,12 +80,6 @@ def to_dict(self) -> Dict[str, Any]: field_dict["mean"] = mean if median is not UNSET: field_dict["median"] = median - if min_ is not UNSET: - field_dict["min"] = min_ - if quantiles is not UNSET: - field_dict["quantiles"] = quantiles - if variance is not UNSET: - field_dict["variance"] = variance return field_dict @@ -99,6 +99,12 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: variable = d.pop("variable", UNSET) + min_ = d.pop("min", UNSET) + + quantiles = cast(List[float], d.pop("quantiles", UNSET)) + + variance = d.pop("variance", UNSET) + iqr = d.pop("IQR", UNSET) max_ = d.pop("max", UNSET) @@ -107,23 +113,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: median = d.pop("median", UNSET) - min_ = d.pop("min", UNSET) - - quantiles = cast(List[float], d.pop("quantiles", UNSET)) - - variance = d.pop("variance", UNSET) - statistic_result = cls( filter_=filter_, name=name, variable=variable, + min_=min_, + quantiles=quantiles, + variance=variance, iqr=iqr, max_=max_, mean=mean, median=median, - min_=min_, - quantiles=quantiles, - variance=variance, ) statistic_result.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/statistical_aggregation.py b/src/tuneinsight/api/sdk/models/statistical_aggregation.py index 888f8e8..8641cc7 100644 --- a/src/tuneinsight/api/sdk/models/statistical_aggregation.py +++ b/src/tuneinsight/api/sdk/models/statistical_aggregation.py @@ -21,88 +21,98 @@ class StatisticalAggregation: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. + aggregation_columns (Union[Unset, List[str]]): list of columns where all data is aggregated binning_operations (Union[Unset, List['BinningOperation']]): list of binning operations to apply before aggregating the results include_dataset_length (Union[Unset, bool]): whether or not to compute the total dataset length - aggregation_columns (Union[Unset, List[str]]): list of columns where all data is aggregated """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET + aggregation_columns: Union[Unset, List[str]] = UNSET binning_operations: Union[Unset, List["BinningOperation"]] = UNSET include_dataset_length: Union[Unset, bool] = UNSET - aggregation_columns: Union[Unset, List[str]] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + aggregation_columns: Union[Unset, List[str]] = UNSET + if not isinstance(self.aggregation_columns, Unset): + aggregation_columns = self.aggregation_columns + binning_operations: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.binning_operations, Unset): binning_operations = [] @@ -112,9 +122,6 @@ def to_dict(self) -> Dict[str, Any]: binning_operations.append(binning_operations_item) include_dataset_length = self.include_dataset_length - aggregation_columns: Union[Unset, List[str]] = UNSET - if not isinstance(self.aggregation_columns, Unset): - aggregation_columns = self.aggregation_columns field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) @@ -123,42 +130,44 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object + if aggregation_columns is not UNSET: + field_dict["aggregationColumns"] = aggregation_columns if binning_operations is not UNSET: field_dict["binningOperations"] = binning_operations if include_dataset_length is not UNSET: field_dict["includeDatasetLength"] = include_dataset_length - if aggregation_columns is not UNSET: - field_dict["aggregationColumns"] = aggregation_columns return field_dict @@ -173,7 +182,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -182,15 +191,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -199,22 +213,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -223,6 +232,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + + aggregation_columns = cast(List[str], d.pop("aggregationColumns", UNSET)) + binning_operations = [] _binning_operations = d.pop("binningOperations", UNSET) for binning_operations_item_data in _binning_operations or []: @@ -232,28 +245,27 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: include_dataset_length = d.pop("includeDatasetLength", UNSET) - aggregation_columns = cast(List[str], d.pop("aggregationColumns", UNSET)) - statistical_aggregation = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, + aggregation_columns=aggregation_columns, binning_operations=binning_operations, include_dataset_length=include_dataset_length, - aggregation_columns=aggregation_columns, ) statistical_aggregation.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/storage_definition.py b/src/tuneinsight/api/sdk/models/storage_definition.py index 9cc2552..d9cf825 100644 --- a/src/tuneinsight/api/sdk/models/storage_definition.py +++ b/src/tuneinsight/api/sdk/models/storage_definition.py @@ -17,25 +17,24 @@ class StorageDefinition: """specification of the storage operation Attributes: - current_key (Union[Unset, str]): currently used b64-formatted encryption key, needs to be specified when running - 'decrypt' or 'rotate' encrypt_unencrypted (Union[Unset, bool]): when performing a rotation, if true, then unencrypted values get encrypted new_key (Union[Unset, str]): new b64-formatted key to use on the storage, needs to be specified when running 'encrypt' or 'rotate' operation (Union[Unset, StorageOperation]): operation to perform on the storage backup_definition (Union[Unset, BackupDefinition]): backup parameters + current_key (Union[Unset, str]): currently used b64-formatted encryption key, needs to be specified when running + 'decrypt' or 'rotate' """ - current_key: Union[Unset, str] = UNSET encrypt_unencrypted: Union[Unset, bool] = UNSET new_key: Union[Unset, str] = UNSET operation: Union[Unset, StorageOperation] = UNSET backup_definition: Union[Unset, "BackupDefinition"] = UNSET + current_key: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - current_key = self.current_key encrypt_unencrypted = self.encrypt_unencrypted new_key = self.new_key operation: Union[Unset, str] = UNSET @@ -46,11 +45,11 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.backup_definition, Unset): backup_definition = self.backup_definition.to_dict() + current_key = self.current_key + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if current_key is not UNSET: - field_dict["currentKey"] = current_key if encrypt_unencrypted is not UNSET: field_dict["encryptUnencrypted"] = encrypt_unencrypted if new_key is not UNSET: @@ -59,6 +58,8 @@ def to_dict(self) -> Dict[str, Any]: field_dict["operation"] = operation if backup_definition is not UNSET: field_dict["backupDefinition"] = backup_definition + if current_key is not UNSET: + field_dict["currentKey"] = current_key return field_dict @@ -67,8 +68,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.backup_definition import BackupDefinition d = src_dict.copy() - current_key = d.pop("currentKey", UNSET) - encrypt_unencrypted = d.pop("encryptUnencrypted", UNSET) new_key = d.pop("newKey", UNSET) @@ -87,12 +86,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: backup_definition = BackupDefinition.from_dict(_backup_definition) + current_key = d.pop("currentKey", UNSET) + storage_definition = cls( - current_key=current_key, encrypt_unencrypted=encrypt_unencrypted, new_key=new_key, operation=operation, backup_definition=backup_definition, + current_key=current_key, ) storage_definition.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/survival.py b/src/tuneinsight/api/sdk/models/survival.py index fd43d82..e0f2dce 100644 --- a/src/tuneinsight/api/sdk/models/survival.py +++ b/src/tuneinsight/api/sdk/models/survival.py @@ -17,6 +17,9 @@ class Survival: """ Attributes: type (PreprocessingOperationType): type of preprocessing operation + num_frames (Union[Unset, int]): the number of time frames to take into account starting from the start of the + survival + start_event (Union[Unset, str]): the event column that must contain the timestamps of the start of the trial duration_col (Union[Unset, str]): the name of the column that stores the duration for each sample, the values stored must be integers Default: 'duration'. end_event (Union[Unset, str]): the column that must contain the timestamps of the end event (can be empty if no @@ -25,24 +28,23 @@ class Survival: 'event'. event_val (Union[Unset, str]): the event value indicating a survival event (i.e. death) interval (Union[Unset, Duration]): definition of a date-independent time interval - num_frames (Union[Unset, int]): the number of time frames to take into account starting from the start of the - survival - start_event (Union[Unset, str]): the event column that must contain the timestamps of the start of the trial """ type: PreprocessingOperationType + num_frames: Union[Unset, int] = UNSET + start_event: Union[Unset, str] = UNSET duration_col: Union[Unset, str] = "duration" end_event: Union[Unset, str] = UNSET event_col: Union[Unset, str] = "event" event_val: Union[Unset, str] = UNSET interval: Union[Unset, "Duration"] = UNSET - num_frames: Union[Unset, int] = UNSET - start_event: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value + num_frames = self.num_frames + start_event = self.start_event duration_col = self.duration_col end_event = self.end_event event_col = self.event_col @@ -51,9 +53,6 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.interval, Unset): interval = self.interval.to_dict() - num_frames = self.num_frames - start_event = self.start_event - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -61,6 +60,10 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) + if num_frames is not UNSET: + field_dict["numFrames"] = num_frames + if start_event is not UNSET: + field_dict["startEvent"] = start_event if duration_col is not UNSET: field_dict["durationCol"] = duration_col if end_event is not UNSET: @@ -71,10 +74,6 @@ def to_dict(self) -> Dict[str, Any]: field_dict["eventVal"] = event_val if interval is not UNSET: field_dict["interval"] = interval - if num_frames is not UNSET: - field_dict["numFrames"] = num_frames - if start_event is not UNSET: - field_dict["startEvent"] = start_event return field_dict @@ -85,6 +84,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = PreprocessingOperationType(d.pop("type")) + num_frames = d.pop("numFrames", UNSET) + + start_event = d.pop("startEvent", UNSET) + duration_col = d.pop("durationCol", UNSET) end_event = d.pop("endEvent", UNSET) @@ -100,19 +103,15 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: interval = Duration.from_dict(_interval) - num_frames = d.pop("numFrames", UNSET) - - start_event = d.pop("startEvent", UNSET) - survival = cls( type=type, + num_frames=num_frames, + start_event=start_event, duration_col=duration_col, end_event=end_event, event_col=event_col, event_val=event_val, interval=interval, - num_frames=num_frames, - start_event=start_event, ) survival.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/survival_aggregation.py b/src/tuneinsight/api/sdk/models/survival_aggregation.py index 88df867..9b75f96 100644 --- a/src/tuneinsight/api/sdk/models/survival_aggregation.py +++ b/src/tuneinsight/api/sdk/models/survival_aggregation.py @@ -23,31 +23,37 @@ class SurvivalAggregation: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation + input_data_object (Union[Unset, str]): Shared identifier of a data object. + matching_organization (Union[Unset, str]): when secure matching is enabled, the organization with whom to match + records with secure_matching (Union[Unset, bool]): if true then a cohort is created by matching records with a specified organization subgroups (Union[Unset, List['SurvivalAggregationSubgroupsItem']]): list of filters to create survival subgroups @@ -55,64 +61,65 @@ class SurvivalAggregation: encrypted_matching (Union[Unset, bool]): if true, then the resulting matches are kept encrypted before aggregating the survival data (slower) matching_columns (Union[Unset, List['MatchingColumn']]): The columns on which the data should be matched - matching_organization (Union[Unset, str]): when secure matching is enabled, the organization with whom to match - records with """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET + input_data_object: Union[Unset, str] = UNSET + matching_organization: Union[Unset, str] = UNSET secure_matching: Union[Unset, bool] = UNSET subgroups: Union[Unset, List["SurvivalAggregationSubgroupsItem"]] = UNSET survival_parameters: Union[Unset, "Survival"] = UNSET encrypted_matching: Union[Unset, bool] = UNSET matching_columns: Union[Unset, List["MatchingColumn"]] = UNSET - matching_organization: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() + input_data_object = self.input_data_object + matching_organization = self.matching_organization secure_matching = self.secure_matching subgroups: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.subgroups, Unset): @@ -135,8 +142,6 @@ def to_dict(self) -> Dict[str, Any]: matching_columns.append(matching_columns_item) - matching_organization = self.matching_organization - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -144,36 +149,40 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object + if matching_organization is not UNSET: + field_dict["matchingOrganization"] = matching_organization if secure_matching is not UNSET: field_dict["secureMatching"] = secure_matching if subgroups is not UNSET: @@ -184,8 +193,6 @@ def to_dict(self) -> Dict[str, Any]: field_dict["encryptedMatching"] = encrypted_matching if matching_columns is not UNSET: field_dict["matchingColumns"] = matching_columns - if matching_organization is not UNSET: - field_dict["matchingOrganization"] = matching_organization return field_dict @@ -202,7 +209,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -211,15 +218,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -228,22 +240,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -252,6 +259,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) + input_data_object = d.pop("inputDataObject", UNSET) + + matching_organization = d.pop("matchingOrganization", UNSET) + secure_matching = d.pop("secureMatching", UNSET) subgroups = [] @@ -277,31 +288,30 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: matching_columns.append(matching_columns_item) - matching_organization = d.pop("matchingOrganization", UNSET) - survival_aggregation = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, + input_data_object=input_data_object, + matching_organization=matching_organization, secure_matching=secure_matching, subgroups=subgroups, survival_parameters=survival_parameters, encrypted_matching=encrypted_matching, matching_columns=matching_columns, - matching_organization=matching_organization, ) survival_aggregation.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/survival_aggregation_subgroups_item.py b/src/tuneinsight/api/sdk/models/survival_aggregation_subgroups_item.py index 22a183c..fdce83d 100644 --- a/src/tuneinsight/api/sdk/models/survival_aggregation_subgroups_item.py +++ b/src/tuneinsight/api/sdk/models/survival_aggregation_subgroups_item.py @@ -15,28 +15,27 @@ class SurvivalAggregationSubgroupsItem: """ Attributes: - filter_ (Union[Unset, Filter]): name (Union[Unset, str]): + filter_ (Union[Unset, Filter]): """ - filter_: Union[Unset, "Filter"] = UNSET name: Union[Unset, str] = UNSET + filter_: Union[Unset, "Filter"] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + name = self.name filter_: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.filter_, Unset): filter_ = self.filter_.to_dict() - name = self.name - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if filter_ is not UNSET: - field_dict["filter"] = filter_ if name is not UNSET: field_dict["name"] = name + if filter_ is not UNSET: + field_dict["filter"] = filter_ return field_dict @@ -45,6 +44,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.filter_ import Filter d = src_dict.copy() + name = d.pop("name", UNSET) + _filter_ = d.pop("filter", UNSET) filter_: Union[Unset, Filter] if isinstance(_filter_, Unset): @@ -52,11 +53,9 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: filter_ = Filter.from_dict(_filter_) - name = d.pop("name", UNSET) - survival_aggregation_subgroups_item = cls( - filter_=filter_, name=name, + filter_=filter_, ) survival_aggregation_subgroups_item.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/threshold.py b/src/tuneinsight/api/sdk/models/threshold.py index 7416440..a416efa 100644 --- a/src/tuneinsight/api/sdk/models/threshold.py +++ b/src/tuneinsight/api/sdk/models/threshold.py @@ -13,40 +13,43 @@ class Threshold: """represents a threshold, which can be made relative of the dataset size Attributes: - type (Union[Unset, ThresholdType]): fixed_value (Union[Unset, int]): value of the fixed threshold relative_factor (Union[Unset, float]): when the threshold is relative to the dataset size, factor of this dataset size + type (Union[Unset, ThresholdType]): """ - type: Union[Unset, ThresholdType] = UNSET fixed_value: Union[Unset, int] = UNSET relative_factor: Union[Unset, float] = UNSET + type: Union[Unset, ThresholdType] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: + fixed_value = self.fixed_value + relative_factor = self.relative_factor type: Union[Unset, str] = UNSET if not isinstance(self.type, Unset): type = self.type.value - fixed_value = self.fixed_value - relative_factor = self.relative_factor - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if type is not UNSET: - field_dict["type"] = type if fixed_value is not UNSET: field_dict["fixedValue"] = fixed_value if relative_factor is not UNSET: field_dict["relativeFactor"] = relative_factor + if type is not UNSET: + field_dict["type"] = type return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() + fixed_value = d.pop("fixedValue", UNSET) + + relative_factor = d.pop("relativeFactor", UNSET) + _type = d.pop("type", UNSET) type: Union[Unset, ThresholdType] if isinstance(_type, Unset): @@ -54,14 +57,10 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: type = ThresholdType(_type) - fixed_value = d.pop("fixedValue", UNSET) - - relative_factor = d.pop("relativeFactor", UNSET) - threshold = cls( - type=type, fixed_value=fixed_value, relative_factor=relative_factor, + type=type, ) threshold.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/user_definition.py b/src/tuneinsight/api/sdk/models/user_definition.py index cb6963e..1ac750a 100644 --- a/src/tuneinsight/api/sdk/models/user_definition.py +++ b/src/tuneinsight/api/sdk/models/user_definition.py @@ -18,71 +18,75 @@ class UserDefinition: """ Attributes: - attributes (Union[Unset, UserDefinitionAttributes]): - totp (Union[Unset, bool]): - access (Union[Unset, UserDefinitionAccess]): - realm_roles (Union[Unset, List[str]]): + created_timestamp (Union[Unset, int]): required_actions (Union[Unset, List[str]]): - client_roles (Union[Unset, UserDefinitionClientRoles]): service_account_client_id (Union[Unset, str]): - username (Union[Unset, str]): - id (Union[Unset, str]): - disableable_credential_types (Union[Unset, List['UserDefinitionDisableableCredentialTypesItem']]): - email (Union[Unset, str]): + client_roles (Union[Unset, UserDefinitionClientRoles]): email_verified (Union[Unset, bool]): enabled (Union[Unset, bool]): federation_link (Union[Unset, str]): - first_name (Union[Unset, str]): groups (Union[Unset, List[str]]): - created_timestamp (Union[Unset, int]): last_name (Union[Unset, str]): + realm_roles (Union[Unset, List[str]]): + totp (Union[Unset, bool]): + attributes (Union[Unset, UserDefinitionAttributes]): + username (Union[Unset, str]): + disableable_credential_types (Union[Unset, List['UserDefinitionDisableableCredentialTypesItem']]): + id (Union[Unset, str]): + access (Union[Unset, UserDefinitionAccess]): + first_name (Union[Unset, str]): + email (Union[Unset, str]): """ - attributes: Union[Unset, "UserDefinitionAttributes"] = UNSET - totp: Union[Unset, bool] = UNSET - access: Union[Unset, "UserDefinitionAccess"] = UNSET - realm_roles: Union[Unset, List[str]] = UNSET + created_timestamp: Union[Unset, int] = UNSET required_actions: Union[Unset, List[str]] = UNSET - client_roles: Union[Unset, "UserDefinitionClientRoles"] = UNSET service_account_client_id: Union[Unset, str] = UNSET - username: Union[Unset, str] = UNSET - id: Union[Unset, str] = UNSET - disableable_credential_types: Union[Unset, List["UserDefinitionDisableableCredentialTypesItem"]] = UNSET - email: Union[Unset, str] = UNSET + client_roles: Union[Unset, "UserDefinitionClientRoles"] = UNSET email_verified: Union[Unset, bool] = UNSET enabled: Union[Unset, bool] = UNSET federation_link: Union[Unset, str] = UNSET - first_name: Union[Unset, str] = UNSET groups: Union[Unset, List[str]] = UNSET - created_timestamp: Union[Unset, int] = UNSET last_name: Union[Unset, str] = UNSET + realm_roles: Union[Unset, List[str]] = UNSET + totp: Union[Unset, bool] = UNSET + attributes: Union[Unset, "UserDefinitionAttributes"] = UNSET + username: Union[Unset, str] = UNSET + disableable_credential_types: Union[Unset, List["UserDefinitionDisableableCredentialTypesItem"]] = UNSET + id: Union[Unset, str] = UNSET + access: Union[Unset, "UserDefinitionAccess"] = UNSET + first_name: Union[Unset, str] = UNSET + email: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - attributes: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.attributes, Unset): - attributes = self.attributes.to_dict() - - totp = self.totp - access: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.access, Unset): - access = self.access.to_dict() - - realm_roles: Union[Unset, List[str]] = UNSET - if not isinstance(self.realm_roles, Unset): - realm_roles = self.realm_roles - + created_timestamp = self.created_timestamp required_actions: Union[Unset, List[str]] = UNSET if not isinstance(self.required_actions, Unset): required_actions = self.required_actions + service_account_client_id = self.service_account_client_id client_roles: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.client_roles, Unset): client_roles = self.client_roles.to_dict() - service_account_client_id = self.service_account_client_id + email_verified = self.email_verified + enabled = self.enabled + federation_link = self.federation_link + groups: Union[Unset, List[str]] = UNSET + if not isinstance(self.groups, Unset): + groups = self.groups + + last_name = self.last_name + realm_roles: Union[Unset, List[str]] = UNSET + if not isinstance(self.realm_roles, Unset): + realm_roles = self.realm_roles + + totp = self.totp + attributes: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.attributes, Unset): + attributes = self.attributes.to_dict() + username = self.username - id = self.id disableable_credential_types: Union[Unset, List[Dict[str, Any]]] = UNSET if not isinstance(self.disableable_credential_types, Unset): disableable_credential_types = [] @@ -91,57 +95,53 @@ def to_dict(self) -> Dict[str, Any]: disableable_credential_types.append(disableable_credential_types_item) - email = self.email - email_verified = self.email_verified - enabled = self.enabled - federation_link = self.federation_link - first_name = self.first_name - groups: Union[Unset, List[str]] = UNSET - if not isinstance(self.groups, Unset): - groups = self.groups + id = self.id + access: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.access, Unset): + access = self.access.to_dict() - created_timestamp = self.created_timestamp - last_name = self.last_name + first_name = self.first_name + email = self.email field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if attributes is not UNSET: - field_dict["attributes"] = attributes - if totp is not UNSET: - field_dict["totp"] = totp - if access is not UNSET: - field_dict["access"] = access - if realm_roles is not UNSET: - field_dict["realmRoles"] = realm_roles + if created_timestamp is not UNSET: + field_dict["createdTimestamp"] = created_timestamp if required_actions is not UNSET: field_dict["requiredActions"] = required_actions - if client_roles is not UNSET: - field_dict["clientRoles"] = client_roles if service_account_client_id is not UNSET: field_dict["serviceAccountClientID"] = service_account_client_id - if username is not UNSET: - field_dict["username"] = username - if id is not UNSET: - field_dict["id"] = id - if disableable_credential_types is not UNSET: - field_dict["disableableCredentialTypes"] = disableable_credential_types - if email is not UNSET: - field_dict["email"] = email + if client_roles is not UNSET: + field_dict["clientRoles"] = client_roles if email_verified is not UNSET: field_dict["emailVerified"] = email_verified if enabled is not UNSET: field_dict["enabled"] = enabled if federation_link is not UNSET: field_dict["federationLink"] = federation_link - if first_name is not UNSET: - field_dict["firstName"] = first_name if groups is not UNSET: field_dict["groups"] = groups - if created_timestamp is not UNSET: - field_dict["createdTimestamp"] = created_timestamp if last_name is not UNSET: field_dict["lastName"] = last_name + if realm_roles is not UNSET: + field_dict["realmRoles"] = realm_roles + if totp is not UNSET: + field_dict["totp"] = totp + if attributes is not UNSET: + field_dict["attributes"] = attributes + if username is not UNSET: + field_dict["username"] = username + if disableable_credential_types is not UNSET: + field_dict["disableableCredentialTypes"] = disableable_credential_types + if id is not UNSET: + field_dict["id"] = id + if access is not UNSET: + field_dict["access"] = access + if first_name is not UNSET: + field_dict["firstName"] = first_name + if email is not UNSET: + field_dict["email"] = email return field_dict @@ -155,26 +155,12 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: ) d = src_dict.copy() - _attributes = d.pop("attributes", UNSET) - attributes: Union[Unset, UserDefinitionAttributes] - if isinstance(_attributes, Unset): - attributes = UNSET - else: - attributes = UserDefinitionAttributes.from_dict(_attributes) - - totp = d.pop("totp", UNSET) - - _access = d.pop("access", UNSET) - access: Union[Unset, UserDefinitionAccess] - if isinstance(_access, Unset): - access = UNSET - else: - access = UserDefinitionAccess.from_dict(_access) - - realm_roles = cast(List[str], d.pop("realmRoles", UNSET)) + created_timestamp = d.pop("createdTimestamp", UNSET) required_actions = cast(List[str], d.pop("requiredActions", UNSET)) + service_account_client_id = d.pop("serviceAccountClientID", UNSET) + _client_roles = d.pop("clientRoles", UNSET) client_roles: Union[Unset, UserDefinitionClientRoles] if isinstance(_client_roles, Unset): @@ -182,11 +168,28 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: client_roles = UserDefinitionClientRoles.from_dict(_client_roles) - service_account_client_id = d.pop("serviceAccountClientID", UNSET) + email_verified = d.pop("emailVerified", UNSET) - username = d.pop("username", UNSET) + enabled = d.pop("enabled", UNSET) - id = d.pop("id", UNSET) + federation_link = d.pop("federationLink", UNSET) + + groups = cast(List[str], d.pop("groups", UNSET)) + + last_name = d.pop("lastName", UNSET) + + realm_roles = cast(List[str], d.pop("realmRoles", UNSET)) + + totp = d.pop("totp", UNSET) + + _attributes = d.pop("attributes", UNSET) + attributes: Union[Unset, UserDefinitionAttributes] + if isinstance(_attributes, Unset): + attributes = UNSET + else: + attributes = UserDefinitionAttributes.from_dict(_attributes) + + username = d.pop("username", UNSET) disableable_credential_types = [] _disableable_credential_types = d.pop("disableableCredentialTypes", UNSET) @@ -197,41 +200,38 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: disableable_credential_types.append(disableable_credential_types_item) - email = d.pop("email", UNSET) - - email_verified = d.pop("emailVerified", UNSET) - - enabled = d.pop("enabled", UNSET) + id = d.pop("id", UNSET) - federation_link = d.pop("federationLink", UNSET) + _access = d.pop("access", UNSET) + access: Union[Unset, UserDefinitionAccess] + if isinstance(_access, Unset): + access = UNSET + else: + access = UserDefinitionAccess.from_dict(_access) first_name = d.pop("firstName", UNSET) - groups = cast(List[str], d.pop("groups", UNSET)) - - created_timestamp = d.pop("createdTimestamp", UNSET) - - last_name = d.pop("lastName", UNSET) + email = d.pop("email", UNSET) user_definition = cls( - attributes=attributes, - totp=totp, - access=access, - realm_roles=realm_roles, + created_timestamp=created_timestamp, required_actions=required_actions, - client_roles=client_roles, service_account_client_id=service_account_client_id, - username=username, - id=id, - disableable_credential_types=disableable_credential_types, - email=email, + client_roles=client_roles, email_verified=email_verified, enabled=enabled, federation_link=federation_link, - first_name=first_name, groups=groups, - created_timestamp=created_timestamp, last_name=last_name, + realm_roles=realm_roles, + totp=totp, + attributes=attributes, + username=username, + disableable_credential_types=disableable_credential_types, + id=id, + access=access, + first_name=first_name, + email=email, ) user_definition.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/user_list_query.py b/src/tuneinsight/api/sdk/models/user_list_query.py index b02d86c..6ab94f2 100644 --- a/src/tuneinsight/api/sdk/models/user_list_query.py +++ b/src/tuneinsight/api/sdk/models/user_list_query.py @@ -11,134 +11,134 @@ class UserListQuery: """ Attributes: - brief_representation (Union[Unset, bool]): - enabled (Union[Unset, bool]): - search (Union[Unset, str]): - username (Union[Unset, str]): - email (Union[Unset, str]): - email_verified (Union[Unset, bool]): idp_alias (Union[Unset, str]): - idp_user_id (Union[Unset, str]): - q (Union[Unset, str]): - first_name (Union[Unset, str]): last_name (Union[Unset, str]): - exact (Union[Unset, bool]): + search (Union[Unset, str]): + email_verified (Union[Unset, bool]): first (Union[Unset, int]): + first_name (Union[Unset, str]): + brief_representation (Union[Unset, bool]): max_ (Union[Unset, int]): + exact (Union[Unset, bool]): + idp_user_id (Union[Unset, str]): + q (Union[Unset, str]): + email (Union[Unset, str]): + enabled (Union[Unset, bool]): + username (Union[Unset, str]): """ - brief_representation: Union[Unset, bool] = UNSET - enabled: Union[Unset, bool] = UNSET - search: Union[Unset, str] = UNSET - username: Union[Unset, str] = UNSET - email: Union[Unset, str] = UNSET - email_verified: Union[Unset, bool] = UNSET idp_alias: Union[Unset, str] = UNSET - idp_user_id: Union[Unset, str] = UNSET - q: Union[Unset, str] = UNSET - first_name: Union[Unset, str] = UNSET last_name: Union[Unset, str] = UNSET - exact: Union[Unset, bool] = UNSET + search: Union[Unset, str] = UNSET + email_verified: Union[Unset, bool] = UNSET first: Union[Unset, int] = UNSET + first_name: Union[Unset, str] = UNSET + brief_representation: Union[Unset, bool] = UNSET max_: Union[Unset, int] = UNSET + exact: Union[Unset, bool] = UNSET + idp_user_id: Union[Unset, str] = UNSET + q: Union[Unset, str] = UNSET + email: Union[Unset, str] = UNSET + enabled: Union[Unset, bool] = UNSET + username: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - brief_representation = self.brief_representation - enabled = self.enabled - search = self.search - username = self.username - email = self.email - email_verified = self.email_verified idp_alias = self.idp_alias - idp_user_id = self.idp_user_id - q = self.q - first_name = self.first_name last_name = self.last_name - exact = self.exact + search = self.search + email_verified = self.email_verified first = self.first + first_name = self.first_name + brief_representation = self.brief_representation max_ = self.max_ + exact = self.exact + idp_user_id = self.idp_user_id + q = self.q + email = self.email + enabled = self.enabled + username = self.username field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if brief_representation is not UNSET: - field_dict["briefRepresentation"] = brief_representation - if enabled is not UNSET: - field_dict["enabled"] = enabled - if search is not UNSET: - field_dict["search"] = search - if username is not UNSET: - field_dict["username"] = username - if email is not UNSET: - field_dict["email"] = email - if email_verified is not UNSET: - field_dict["emailVerified"] = email_verified if idp_alias is not UNSET: field_dict["idpAlias"] = idp_alias - if idp_user_id is not UNSET: - field_dict["idpUserId"] = idp_user_id - if q is not UNSET: - field_dict["q"] = q - if first_name is not UNSET: - field_dict["firstName"] = first_name if last_name is not UNSET: field_dict["lastName"] = last_name - if exact is not UNSET: - field_dict["exact"] = exact + if search is not UNSET: + field_dict["search"] = search + if email_verified is not UNSET: + field_dict["emailVerified"] = email_verified if first is not UNSET: field_dict["first"] = first + if first_name is not UNSET: + field_dict["firstName"] = first_name + if brief_representation is not UNSET: + field_dict["briefRepresentation"] = brief_representation if max_ is not UNSET: field_dict["max"] = max_ + if exact is not UNSET: + field_dict["exact"] = exact + if idp_user_id is not UNSET: + field_dict["idpUserId"] = idp_user_id + if q is not UNSET: + field_dict["q"] = q + if email is not UNSET: + field_dict["email"] = email + if enabled is not UNSET: + field_dict["enabled"] = enabled + if username is not UNSET: + field_dict["username"] = username return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() - brief_representation = d.pop("briefRepresentation", UNSET) + idp_alias = d.pop("idpAlias", UNSET) - enabled = d.pop("enabled", UNSET) + last_name = d.pop("lastName", UNSET) search = d.pop("search", UNSET) - username = d.pop("username", UNSET) + email_verified = d.pop("emailVerified", UNSET) - email = d.pop("email", UNSET) + first = d.pop("first", UNSET) - email_verified = d.pop("emailVerified", UNSET) + first_name = d.pop("firstName", UNSET) - idp_alias = d.pop("idpAlias", UNSET) + brief_representation = d.pop("briefRepresentation", UNSET) - idp_user_id = d.pop("idpUserId", UNSET) + max_ = d.pop("max", UNSET) - q = d.pop("q", UNSET) + exact = d.pop("exact", UNSET) - first_name = d.pop("firstName", UNSET) + idp_user_id = d.pop("idpUserId", UNSET) - last_name = d.pop("lastName", UNSET) + q = d.pop("q", UNSET) - exact = d.pop("exact", UNSET) + email = d.pop("email", UNSET) - first = d.pop("first", UNSET) + enabled = d.pop("enabled", UNSET) - max_ = d.pop("max", UNSET) + username = d.pop("username", UNSET) user_list_query = cls( - brief_representation=brief_representation, - enabled=enabled, - search=search, - username=username, - email=email, - email_verified=email_verified, idp_alias=idp_alias, - idp_user_id=idp_user_id, - q=q, - first_name=first_name, last_name=last_name, - exact=exact, + search=search, + email_verified=email_verified, first=first, + first_name=first_name, + brief_representation=brief_representation, max_=max_, + exact=exact, + idp_user_id=idp_user_id, + q=q, + email=email, + enabled=enabled, + username=username, ) user_list_query.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/v_binned_aggregation.py b/src/tuneinsight/api/sdk/models/v_binned_aggregation.py index 9a59ac1..5f9eb86 100644 --- a/src/tuneinsight/api/sdk/models/v_binned_aggregation.py +++ b/src/tuneinsight/api/sdk/models/v_binned_aggregation.py @@ -21,91 +21,95 @@ class VBinnedAggregation: """ Attributes: type (ComputationType): Type of the computation. - join_id (Union[Unset, str]): Unique identifier of a data object. + local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured + the network) local_input (Union[Unset, LocalInput]): If a local input is provided, the node initiating the computation will use it instead of querying the datasource. This data is *not* shared to other nodes, only used for the duration of the computation. The local input columns/values must be in the form {: [, , ...], ...} - wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. - input_data_object (Union[Unset, str]): Shared identifier of a data object. + preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters + applied to the input retrieved from the datasource, if applicable cohort_id (Union[Unset, str]): Unique identifier of a data object. + end_to_end_encrypted (Union[Unset, bool]): if the end to end encrypted mode is set to true, + then when release results is set to true and the output + is initially encrypted with a network collective key, then it is key switched to + the initiating user's public key. + local_input_id (Union[Unset, str]): Unique identifier of a data object. + wait (Union[Unset, bool]): Whether to wait synchronously for the computation result. + dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various + disclosure prevention mechanisms + join_id (Union[Unset, str]): Unique identifier of a data object. + timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. + encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. + owner (Union[Unset, str]): The username of the end user who requested the computation. project_id (Union[Unset, str]): Unique identifier of a project. release_results (Union[Unset, bool]): flag to set to true if the computation should directly release the output results. If set, then encrypted results are automatically key switched and decrypted and a Result entity is saved - dp_policy (Union[Unset, DPPolicy]): represents the disclosure prevention policy that enables toggling various - disclosure prevention mechanisms - encrypted (Union[Unset, bool]): True if computation result should be encrypted with the collective public key. - local (Union[Unset, bool]): True if the project's computation should run only with local data (not configured - the network) - local_input_id (Union[Unset, str]): Unique identifier of a data object. - owner (Union[Unset, str]): The username of the end user who requested the computation. - preprocessing_parameters (Union[Unset, ComputationPreprocessingParameters]): dataframe pre-processing parameters - applied to the input retrieved from the datasource, if applicable - timeout (Union[Unset, int]): The maximum amount of time in seconds the computation is allowed to run. data_source_parameters (Union[Unset, ComputationDataSourceParameters]): Parameters used to query the datasource from each node before the computation - aggregation_column (Union[Unset, str]): the column on which to aggregate - binning_column (Union[Unset, str]): the column on which to bin the data + input_data_object (Union[Unset, str]): Shared identifier of a data object. binning_parameters (Union[Unset, BinningParameters]): parameters used to bin data identifiable_columns (Union[Unset, List[str]]): + aggregation_column (Union[Unset, str]): the column on which to aggregate + binning_column (Union[Unset, str]): the column on which to bin the data """ type: ComputationType - join_id: Union[Unset, str] = UNSET + local: Union[Unset, bool] = UNSET local_input: Union[Unset, "LocalInput"] = UNSET - wait: Union[Unset, bool] = UNSET - input_data_object: Union[Unset, str] = UNSET + preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET cohort_id: Union[Unset, str] = UNSET - project_id: Union[Unset, str] = UNSET - release_results: Union[Unset, bool] = UNSET + end_to_end_encrypted: Union[Unset, bool] = UNSET + local_input_id: Union[Unset, str] = UNSET + wait: Union[Unset, bool] = UNSET dp_policy: Union[Unset, "DPPolicy"] = UNSET + join_id: Union[Unset, str] = UNSET + timeout: Union[Unset, int] = UNSET encrypted: Union[Unset, bool] = UNSET - local: Union[Unset, bool] = UNSET - local_input_id: Union[Unset, str] = UNSET owner: Union[Unset, str] = UNSET - preprocessing_parameters: Union[Unset, "ComputationPreprocessingParameters"] = UNSET - timeout: Union[Unset, int] = UNSET + project_id: Union[Unset, str] = UNSET + release_results: Union[Unset, bool] = UNSET data_source_parameters: Union[Unset, "ComputationDataSourceParameters"] = UNSET - aggregation_column: Union[Unset, str] = UNSET - binning_column: Union[Unset, str] = UNSET + input_data_object: Union[Unset, str] = UNSET binning_parameters: Union[Unset, "BinningParameters"] = UNSET identifiable_columns: Union[Unset, List[str]] = UNSET + aggregation_column: Union[Unset, str] = UNSET + binning_column: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: type = self.type.value - join_id = self.join_id + local = self.local local_input: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.local_input, Unset): local_input = self.local_input.to_dict() - wait = self.wait - input_data_object = self.input_data_object + preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.preprocessing_parameters, Unset): + preprocessing_parameters = self.preprocessing_parameters.to_dict() + cohort_id = self.cohort_id - project_id = self.project_id - release_results = self.release_results + end_to_end_encrypted = self.end_to_end_encrypted + local_input_id = self.local_input_id + wait = self.wait dp_policy: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.dp_policy, Unset): dp_policy = self.dp_policy.to_dict() + join_id = self.join_id + timeout = self.timeout encrypted = self.encrypted - local = self.local - local_input_id = self.local_input_id owner = self.owner - preprocessing_parameters: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.preprocessing_parameters, Unset): - preprocessing_parameters = self.preprocessing_parameters.to_dict() - - timeout = self.timeout + project_id = self.project_id + release_results = self.release_results data_source_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data_source_parameters, Unset): data_source_parameters = self.data_source_parameters.to_dict() - aggregation_column = self.aggregation_column - binning_column = self.binning_column + input_data_object = self.input_data_object binning_parameters: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.binning_parameters, Unset): binning_parameters = self.binning_parameters.to_dict() @@ -114,6 +118,9 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.identifiable_columns, Unset): identifiable_columns = self.identifiable_columns + aggregation_column = self.aggregation_column + binning_column = self.binning_column + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -121,44 +128,46 @@ def to_dict(self) -> Dict[str, Any]: "type": type, } ) - if join_id is not UNSET: - field_dict["joinId"] = join_id + if local is not UNSET: + field_dict["local"] = local if local_input is not UNSET: field_dict["localInput"] = local_input - if wait is not UNSET: - field_dict["wait"] = wait - if input_data_object is not UNSET: - field_dict["inputDataObject"] = input_data_object + if preprocessing_parameters is not UNSET: + field_dict["preprocessingParameters"] = preprocessing_parameters if cohort_id is not UNSET: field_dict["cohortId"] = cohort_id - if project_id is not UNSET: - field_dict["projectId"] = project_id - if release_results is not UNSET: - field_dict["releaseResults"] = release_results + if end_to_end_encrypted is not UNSET: + field_dict["endToEndEncrypted"] = end_to_end_encrypted + if local_input_id is not UNSET: + field_dict["localInputID"] = local_input_id + if wait is not UNSET: + field_dict["wait"] = wait if dp_policy is not UNSET: field_dict["DPPolicy"] = dp_policy + if join_id is not UNSET: + field_dict["joinId"] = join_id + if timeout is not UNSET: + field_dict["timeout"] = timeout if encrypted is not UNSET: field_dict["encrypted"] = encrypted - if local is not UNSET: - field_dict["local"] = local - if local_input_id is not UNSET: - field_dict["localInputID"] = local_input_id if owner is not UNSET: field_dict["owner"] = owner - if preprocessing_parameters is not UNSET: - field_dict["preprocessingParameters"] = preprocessing_parameters - if timeout is not UNSET: - field_dict["timeout"] = timeout + if project_id is not UNSET: + field_dict["projectId"] = project_id + if release_results is not UNSET: + field_dict["releaseResults"] = release_results if data_source_parameters is not UNSET: field_dict["dataSourceParameters"] = data_source_parameters - if aggregation_column is not UNSET: - field_dict["aggregationColumn"] = aggregation_column - if binning_column is not UNSET: - field_dict["binningColumn"] = binning_column + if input_data_object is not UNSET: + field_dict["inputDataObject"] = input_data_object if binning_parameters is not UNSET: field_dict["binningParameters"] = binning_parameters if identifiable_columns is not UNSET: field_dict["identifiableColumns"] = identifiable_columns + if aggregation_column is not UNSET: + field_dict["aggregationColumn"] = aggregation_column + if binning_column is not UNSET: + field_dict["binningColumn"] = binning_column return field_dict @@ -173,7 +182,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() type = ComputationType(d.pop("type")) - join_id = d.pop("joinId", UNSET) + local = d.pop("local", UNSET) _local_input = d.pop("localInput", UNSET) local_input: Union[Unset, LocalInput] @@ -182,15 +191,20 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: local_input = LocalInput.from_dict(_local_input) - wait = d.pop("wait", UNSET) - - input_data_object = d.pop("inputDataObject", UNSET) + _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) + preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] + if isinstance(_preprocessing_parameters, Unset): + preprocessing_parameters = UNSET + else: + preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) cohort_id = d.pop("cohortId", UNSET) - project_id = d.pop("projectId", UNSET) + end_to_end_encrypted = d.pop("endToEndEncrypted", UNSET) - release_results = d.pop("releaseResults", UNSET) + local_input_id = d.pop("localInputID", UNSET) + + wait = d.pop("wait", UNSET) _dp_policy = d.pop("DPPolicy", UNSET) dp_policy: Union[Unset, DPPolicy] @@ -199,22 +213,17 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: dp_policy = DPPolicy.from_dict(_dp_policy) - encrypted = d.pop("encrypted", UNSET) + join_id = d.pop("joinId", UNSET) - local = d.pop("local", UNSET) + timeout = d.pop("timeout", UNSET) - local_input_id = d.pop("localInputID", UNSET) + encrypted = d.pop("encrypted", UNSET) owner = d.pop("owner", UNSET) - _preprocessing_parameters = d.pop("preprocessingParameters", UNSET) - preprocessing_parameters: Union[Unset, ComputationPreprocessingParameters] - if isinstance(_preprocessing_parameters, Unset): - preprocessing_parameters = UNSET - else: - preprocessing_parameters = ComputationPreprocessingParameters.from_dict(_preprocessing_parameters) + project_id = d.pop("projectId", UNSET) - timeout = d.pop("timeout", UNSET) + release_results = d.pop("releaseResults", UNSET) _data_source_parameters = d.pop("dataSourceParameters", UNSET) data_source_parameters: Union[Unset, ComputationDataSourceParameters] @@ -223,9 +232,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: data_source_parameters = ComputationDataSourceParameters.from_dict(_data_source_parameters) - aggregation_column = d.pop("aggregationColumn", UNSET) - - binning_column = d.pop("binningColumn", UNSET) + input_data_object = d.pop("inputDataObject", UNSET) _binning_parameters = d.pop("binningParameters", UNSET) binning_parameters: Union[Unset, BinningParameters] @@ -236,27 +243,32 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: identifiable_columns = cast(List[str], d.pop("identifiableColumns", UNSET)) + aggregation_column = d.pop("aggregationColumn", UNSET) + + binning_column = d.pop("binningColumn", UNSET) + v_binned_aggregation = cls( type=type, - join_id=join_id, + local=local, local_input=local_input, - wait=wait, - input_data_object=input_data_object, + preprocessing_parameters=preprocessing_parameters, cohort_id=cohort_id, - project_id=project_id, - release_results=release_results, + end_to_end_encrypted=end_to_end_encrypted, + local_input_id=local_input_id, + wait=wait, dp_policy=dp_policy, + join_id=join_id, + timeout=timeout, encrypted=encrypted, - local=local, - local_input_id=local_input_id, owner=owner, - preprocessing_parameters=preprocessing_parameters, - timeout=timeout, + project_id=project_id, + release_results=release_results, data_source_parameters=data_source_parameters, - aggregation_column=aggregation_column, - binning_column=binning_column, + input_data_object=input_data_object, binning_parameters=binning_parameters, identifiable_columns=identifiable_columns, + aggregation_column=aggregation_column, + binning_column=binning_column, ) v_binned_aggregation.additional_properties = d diff --git a/src/tuneinsight/api/sdk/models/workflow_item.py b/src/tuneinsight/api/sdk/models/workflow_item.py index 4c85498..7cd2a66 100644 --- a/src/tuneinsight/api/sdk/models/workflow_item.py +++ b/src/tuneinsight/api/sdk/models/workflow_item.py @@ -16,36 +16,32 @@ class WorkflowItem: """ Attributes: - target_handle (Union[Unset, str]): not used - UI specific - type (Union[Unset, str]): id (Union[Unset, str]): progress (Union[Unset, int]): + type (Union[Unset, str]): + data (Union[Unset, WorkflowItemData]): + position (Union[Unset, WorkflowItemPosition]): source (Union[Unset, str]): not used - UI specific source_handle (Union[Unset, str]): not used - UI specific target (Union[Unset, str]): not used - UI specific - data (Union[Unset, WorkflowItemData]): - position (Union[Unset, WorkflowItemPosition]): + target_handle (Union[Unset, str]): not used - UI specific """ - target_handle: Union[Unset, str] = UNSET - type: Union[Unset, str] = UNSET id: Union[Unset, str] = UNSET progress: Union[Unset, int] = UNSET + type: Union[Unset, str] = UNSET + data: Union[Unset, "WorkflowItemData"] = UNSET + position: Union[Unset, "WorkflowItemPosition"] = UNSET source: Union[Unset, str] = UNSET source_handle: Union[Unset, str] = UNSET target: Union[Unset, str] = UNSET - data: Union[Unset, "WorkflowItemData"] = UNSET - position: Union[Unset, "WorkflowItemPosition"] = UNSET + target_handle: Union[Unset, str] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - target_handle = self.target_handle - type = self.type id = self.id progress = self.progress - source = self.source - source_handle = self.source_handle - target = self.target + type = self.type data: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.data, Unset): data = self.data.to_dict() @@ -54,27 +50,32 @@ def to_dict(self) -> Dict[str, Any]: if not isinstance(self.position, Unset): position = self.position.to_dict() + source = self.source + source_handle = self.source_handle + target = self.target + target_handle = self.target_handle + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) - if target_handle is not UNSET: - field_dict["targetHandle"] = target_handle - if type is not UNSET: - field_dict["type"] = type if id is not UNSET: field_dict["id"] = id if progress is not UNSET: field_dict["progress"] = progress + if type is not UNSET: + field_dict["type"] = type + if data is not UNSET: + field_dict["data"] = data + if position is not UNSET: + field_dict["position"] = position if source is not UNSET: field_dict["source"] = source if source_handle is not UNSET: field_dict["sourceHandle"] = source_handle if target is not UNSET: field_dict["target"] = target - if data is not UNSET: - field_dict["data"] = data - if position is not UNSET: - field_dict["position"] = position + if target_handle is not UNSET: + field_dict["targetHandle"] = target_handle return field_dict @@ -84,19 +85,11 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.workflow_item_position import WorkflowItemPosition d = src_dict.copy() - target_handle = d.pop("targetHandle", UNSET) - - type = d.pop("type", UNSET) - id = d.pop("id", UNSET) progress = d.pop("progress", UNSET) - source = d.pop("source", UNSET) - - source_handle = d.pop("sourceHandle", UNSET) - - target = d.pop("target", UNSET) + type = d.pop("type", UNSET) _data = d.pop("data", UNSET) data: Union[Unset, WorkflowItemData] @@ -112,16 +105,24 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: else: position = WorkflowItemPosition.from_dict(_position) + source = d.pop("source", UNSET) + + source_handle = d.pop("sourceHandle", UNSET) + + target = d.pop("target", UNSET) + + target_handle = d.pop("targetHandle", UNSET) + workflow_item = cls( - target_handle=target_handle, - type=type, id=id, progress=progress, + type=type, + data=data, + position=position, source=source, source_handle=source_handle, target=target, - data=data, - position=position, + target_handle=target_handle, ) workflow_item.additional_properties = d diff --git a/src/tuneinsight/client/auth.py b/src/tuneinsight/client/auth.py index 6aac892..0563894 100644 --- a/src/tuneinsight/client/auth.py +++ b/src/tuneinsight/client/auth.py @@ -1,4 +1,6 @@ from typing import Dict + + from time import time from ast import literal_eval from attr import define @@ -16,35 +18,56 @@ class KeycloakClient(client.AuthenticatedClient): oidc_config: config.OIDCConfiguration username: str password: str - device_code: str = '' + device_code: str = "" tokens: dict = {} kc_open_id: KeycloakOpenID = None token_timeout: float = 0 refresh_token_timeout: float = 0 refresh_delay_seconds: float = 10 + verify_ssl: bool = True + proxies: dict = {"http": "", "https": ""} def __attrs_post_init__(self): + self.kc_open_id = KeycloakOpenID( + server_url=self.oidc_config.oidc_url, + client_id=self.oidc_config.oidc_client_id, + client_secret_key=self.oidc_config.oidc_client_secret, + realm_name=self.oidc_config.oidc_realm, + ) - self.kc_open_id = KeycloakOpenID(server_url=self.oidc_config.oidc_url, - client_id=self.oidc_config.oidc_client_id, - client_secret_key=self.oidc_config.oidc_client_secret, - realm_name=self.oidc_config.oidc_realm) + self.kc_open_id = KeycloakOpenID( + server_url=self.oidc_config.oidc_url, + client_id=self.oidc_config.oidc_client_id, + client_secret_key=self.oidc_config.oidc_client_secret, + realm_name=self.oidc_config.oidc_realm, + verify=self.verify_ssl, + proxies=self.proxies, + ) - def update_tokens(self,tokens): + def update_tokens(self, tokens): self.tokens = tokens - self.token_timeout = time() + float(self.tokens['expires_in']) - self.refresh_delay_seconds - self.refresh_token_timeout = time() + float(self.tokens['refresh_expires_in']) - self.refresh_delay_seconds + self.token_timeout = ( + time() + float(self.tokens["expires_in"]) - self.refresh_delay_seconds + ) + self.refresh_token_timeout = ( + time() + + float(self.tokens["refresh_expires_in"]) + - self.refresh_delay_seconds + ) def get_token(self) -> dict: - # If a oidc_client_secret is provided, use client credentials flow for service accounts if self.oidc_config.oidc_client_secret != "": self.tokens = self.kc_open_id.token( - self.username, grant_type="client_credentials") + self.username, grant_type="client_credentials" + ) # If a device_code is provided, use the device authorization grant flow - elif self.device_code !='': + elif self.device_code != "": self.tokens = self.kc_open_id.token( - self.username, grant_type="urn:ietf:params:oauth:grant-type:device_code", device_code=self.device_code) + self.username, + grant_type="urn:ietf:params:oauth:grant-type:device_code", + device_code=self.device_code, + ) else: # Otherwise, use password flow for user accounts self.tokens = self.kc_open_id.token(self.username, self.password) @@ -58,12 +81,17 @@ def get_device_code(self) -> dict: "client_id": self.kc_open_id.client_id, } - url = self.oidc_config.oidc_url + 'realms/' + self.oidc_config.oidc_realm + '/protocol/openid-connect/auth/device' + url = ( + self.oidc_config.oidc_url + + "realms/" + + self.oidc_config.oidc_realm + + "/protocol/openid-connect/auth/device" + ) resp = self.kc_open_id.connection.raw_post(url, payload) if resp.status_code in [200, 201, 204]: decoded_resp = literal_eval(resp.content.decode()) - self.device_code = decoded_resp['device_code'] + self.device_code = decoded_resp["device_code"] # return literal_eval(resp.content.decode()) return raise_error_from_response(resp, KeycloakError) @@ -76,4 +104,7 @@ def get_headers(self) -> Dict[str, str]: self.get_token() else: self.refresh_token() - return {"Authorization": f"Bearer {self.tokens['access_token']}", **self.headers} + return { + "Authorization": f"Bearer {self.tokens['access_token']}", + **self.headers, + } diff --git a/src/tuneinsight/client/computations.py b/src/tuneinsight/client/computations.py index cbed549..2586c48 100644 --- a/src/tuneinsight/client/computations.py +++ b/src/tuneinsight/client/computations.py @@ -1,10 +1,15 @@ from typing import Any, List, Callable, TypeVar +import pandas as pd from IPython.display import display, Markdown from tuneinsight.api.sdk import models from tuneinsight.api.sdk import Client from tuneinsight.api.sdk.types import UNSET -from tuneinsight.api.sdk.types import Response -from tuneinsight.api.sdk.api.api_computations import compute, get_computation, documentation +from tuneinsight.api.sdk.types import Response +from tuneinsight.api.sdk.api.api_computations import ( + compute, + get_computation, + documentation, +) from tuneinsight.api.sdk.api.api_dataobject import get_data_object from tuneinsight.computations.queries import QueryBuilder from tuneinsight.computations.preprocessing import PreprocessingBuilder @@ -13,11 +18,14 @@ from tuneinsight.computations.errors import raise_computation_error import tuneinsight.utils.time_tools as time -ComputationLauncher = TypeVar("ComputationLauncher",bound=Callable[[models.ComputationDefinition,bool],models.Computation]) +ComputationLauncher = TypeVar( + "ComputationLauncher", + bound=Callable[[models.ComputationDefinition, bool], models.Computation], +) -# @attr.s(auto_attribs=True) -class ComputationRunner(): +# @attr.s(auto_attribs=True) +class ComputationRunner: client: Client project_id: str preprocessing: PreprocessingBuilder = None @@ -26,9 +34,9 @@ class ComputationRunner(): polling_initial_interval: int max_sleep_time: int recorded_computations: List[models.Computation] + local_input: models.LocalInput - - def __init__(self, project_id:str = "", client:Client = UNSET): + def __init__(self, project_id: str = "", client: Client = UNSET): self.client = client self.project_id = project_id self.preprocessing = PreprocessingBuilder() @@ -37,6 +45,7 @@ def __init__(self, project_id:str = "", client:Client = UNSET): self.polling_initial_interval = 100 * time.millisecond self.max_sleep_time = 30 * time.second self.recorded_computations = [] + self.local_input = None @staticmethod def field_is_set(field: Any) -> bool: @@ -44,9 +53,12 @@ def field_is_set(field: Any) -> bool: return False return True - - def update_computation_input(self,comp: models.ComputationDefinition): - if comp.type in [models.ComputationType.COLLECTIVEKEYSWITCH, models.ComputationType.ENCRYPTEDPREDICTION, models.ComputationType.PRIVATESEARCH]: + def update_computation_input(self, comp: models.ComputationDefinition): + if comp.type in [ + models.ComputationType.COLLECTIVEKEYSWITCH, + models.ComputationType.ENCRYPTEDPREDICTION, + models.ComputationType.PRIVATESEARCH, + ]: return if self.datasource.query_set: comp.data_source_parameters = self.datasource.get_parameters() @@ -54,38 +66,46 @@ def update_computation_input(self,comp: models.ComputationDefinition): if not self.field_is_set(comp.input_data_object): comp.data_source_parameters = models.ComputationDataSourceParameters() - - def update_computation_fields(self,comp: models.ComputationDefinition): + def update_computation_fields(self, comp: models.ComputationDefinition): comp.wait = False if not self.field_is_set(comp.project_id): comp.project_id = self.project_id comp.timeout = int(self.max_timeout / time.second) self.update_computation_input(comp=comp) + if self.local_input is not None: + comp.local_input = self.local_input def display_documentation(self, comp: models.ComputationDefinition): - ''' + """ display_documentation displays the documentation given a computation definition this first overrides the fields accordingly before calling POST documentation Args: comp (models.ComputationDefinition): the computation definition - ''' + """ self.post_preprocessing(comp) self.update_computation_fields(comp) - response : Response[models.DocumentationResponse200] = documentation.sync_detailed(client=self.client,json_body=comp) + response: Response[models.DocumentationResponse200] = ( + documentation.sync_detailed(client=self.client, json_body=comp) + ) validate_response(response) display(Markdown(response.parsed.description)) @staticmethod def is_done(comp: models.Computation) -> bool: - waiting = comp.status not in (models.ComputationStatus.ERROR, models.ComputationStatus.SUCCESS) + waiting = comp.status not in ( + models.ComputationStatus.ERROR, + models.ComputationStatus.SUCCESS, + ) return not waiting - def refresh(self,comp: models.Computation) -> models.Computation: - response: Response[models.Computation] = get_computation.sync_detailed(client=self.client,computation_id=comp.id) + def refresh(self, comp: models.Computation) -> models.Computation: + response: Response[models.Computation] = get_computation.sync_detailed( + client=self.client, computation_id=comp.id + ) validate_response(response) return response.parsed - def poll_computation(self,comp: models.Computation) -> List[DataObject]: + def poll_computation(self, comp: models.Computation) -> List[DataObject]: # define initial sleeping time and start time timeStart = time.now() sleep_time = self.polling_initial_interval @@ -101,7 +121,9 @@ def poll_computation(self,comp: models.Computation) -> List[DataObject]: sleep_time = int(sleep_time * 1.05) # Raise an exception if there is an error - if (current_comp.status == models.ComputationStatus.ERROR) or (len(comp.errors) > 0): + if (current_comp.status == models.ComputationStatus.ERROR) or ( + len(comp.errors) > 0 + ): raise_computation_error(current_comp.errors) if len(current_comp.results) < 1: @@ -110,23 +132,32 @@ def poll_computation(self,comp: models.Computation) -> List[DataObject]: # Update recorded computation self.recorded_computations.append(current_comp) # Get Result models - results : List[DataObject] = [] + results: List[DataObject] = [] for doID in current_comp.results: - response: Response[models.DataObject] = get_data_object.sync_detailed(client=self.client,data_object_id=doID) + response: Response[models.DataObject] = get_data_object.sync_detailed( + client=self.client, data_object_id=doID + ) validate_response(response) - results.append(DataObject(model=response.parsed,client=self.client)) + results.append(DataObject(model=response.parsed, client=self.client)) return results - def key_switch(self,dataObject: DataObject) -> DataObject: - ksDef = models.CollectiveKeySwitch(type=models.ComputationType.COLLECTIVEKEYSWITCH,cipher_vector=dataObject.get_id(),local=False) - computation = self.launch_computation(comp=ksDef,local=False) + def key_switch(self, dataObject: DataObject) -> DataObject: + ksDef = models.CollectiveKeySwitch( + type=models.ComputationType.COLLECTIVEKEYSWITCH, + cipher_vector=dataObject.get_id(), + local=False, + ) + computation = self.launch_computation(comp=ksDef, local=False) return self.poll_computation(computation)[0] - - def launch_computation(self,comp:models.ComputationDefinition,local:bool=False) -> models.Computation: + def launch_computation( + self, comp: models.ComputationDefinition, local: bool = False + ) -> models.Computation: comp.local = local self.update_computation_fields(comp=comp) - response : Response[models.Computation] = compute.sync_detailed(client=self.client,json_body=comp) + response: Response[models.Computation] = compute.sync_detailed( + client=self.client, json_body=comp + ) validate_response(response) return response.parsed @@ -137,7 +168,14 @@ def post_preprocessing(self, comp: models.ComputationDefinition): comp.preprocessing_parameters = models.ComputationPreprocessingParameters() comp.preprocessing_parameters = self.preprocessing.get_params() - def run_computation(self,comp: models.ComputationDefinition, local: bool=False, keyswitch: bool=True, decrypt: bool=True,release: bool = False) -> List[DataObject]: + def run_computation( + self, + comp: models.ComputationDefinition, + local: bool = False, + keyswitch: bool = True, + decrypt: bool = True, + release: bool = False, + ) -> List[DataObject]: """ Runs a computation using the given ComputationDefinition object. @@ -160,12 +198,28 @@ def run_computation(self,comp: models.ComputationDefinition, local: bool=False, decrypt = False keyswitch = False - computation = self.launch_computation(comp,local=local) + computation = self.launch_computation(comp, local=local) results = self.poll_computation(comp=computation) if keyswitch: - for i,dataobject in enumerate(results): + for i, dataobject in enumerate(results): results[i] = self.key_switch(dataobject) if decrypt: results[i] = results[i].decrypt() return results + + def set_local_input(self, df: pd.DataFrame): + """ + set_local_input sets the local user-provided plaintext input to the computation (only shared with the requesting instance) + + Args: + df (pd.DataFrame): the dataframe to use as a local input + """ + cols = df.columns + local_input = models.LocalInput() + for col in cols: + col_list = df[col].to_list() + for i, v in enumerate(col_list): + col_list[i] = str(v) + local_input.additional_properties[str(col)] = col_list + self.local_input = local_input diff --git a/src/tuneinsight/client/config.py b/src/tuneinsight/client/config.py index b494b33..c89510e 100644 --- a/src/tuneinsight/client/config.py +++ b/src/tuneinsight/client/config.py @@ -6,7 +6,7 @@ def to_dict(obj): - if not hasattr(obj,"__dict__"): + if not hasattr(obj, "__dict__"): return obj result = {} for key, val in obj.__dict__.items(): @@ -21,21 +21,37 @@ def to_dict(obj): result[key] = element return result + class OIDCConfiguration: oidc_client_id: str oidc_client_secret: str oidc_url: str oidc_realm: str - def __init__(self, oidc_client_id: str, oidc_client_secret: str, oidc_url: str, oidc_realm: str): + def __init__( + self, + oidc_client_id: str, + oidc_client_secret: str, + oidc_url: str, + oidc_realm: str, + ): self.oidc_client_id = "python-sdk" if oidc_client_id is None else oidc_client_id - self.oidc_client_secret = "" if oidc_client_secret is None else oidc_client_secret - self.oidc_url = "https://auth.tuneinsight.com/auth/" if oidc_url is None else oidc_url + self.oidc_client_secret = ( + "" if oidc_client_secret is None else oidc_client_secret + ) + self.oidc_url = ( + "https://auth.tuneinsight.com/auth/" if oidc_url is None else oidc_url + ) self.oidc_realm = "ti-realm" if oidc_realm is None else oidc_realm @staticmethod def from_json(json_dct): - return OIDCConfiguration(json_dct.get('oidc_client_id'), json_dct.get('oidc_client_secret'), json_dct.get('oidc_url'), json_dct.get('oidc_realm')) + return OIDCConfiguration( + json_dct.get("oidc_client_id"), + json_dct.get("oidc_client_secret"), + json_dct.get("oidc_url"), + json_dct.get("oidc_realm"), + ) class Security: @@ -45,46 +61,93 @@ class Security: verify_ssl: bool oidc_config: OIDCConfiguration - def __init__(self, oidc_config: OIDCConfiguration, static_token: str, username: str, password: str, verify_ssl: bool): + def __init__( + self, + oidc_config: OIDCConfiguration, + static_token: str, + username: str, + password: str, + verify_ssl: bool, + ): self.verify_ssl = True if verify_ssl is None else verify_ssl - self.static_token = '' if static_token is None else static_token - self.username = '' if username is None else username - self.password = '' if password is None else password + self.static_token = "" if static_token is None else static_token + self.username = "" if username is None else username + self.password = "" if password is None else password self.oidc_config = oidc_config @staticmethod def from_json(json_dct): - oidc_config = OIDCConfiguration.from_json(json_dct.get('oidc_config')) - return Security(oidc_config, json_dct.get('static_token'), json_dct.get('username'), json_dct.get('password'), json_dct.get('verify_ssl')) + oidc_config = OIDCConfiguration.from_json(json_dct.get("oidc_config")) + return Security( + oidc_config, + json_dct.get("static_token"), + json_dct.get("username"), + json_dct.get("password"), + json_dct.get("verify_ssl"), + ) class Client: url: str security: Security - - def __init__(self, url, security): + http_proxy: str + https_proxy: str + + def __init__(self, url, security, http_proxy: str = None, https_proxy: str = None): + """ + Initialize a client. + + Args: + url (str): The URL of the Tune Insight API + security (str): The security settings of the configuration. + http_proxy (str, optional): The HTTP proxy to be used. Defaults to None. + https_proxy (str, optional): The HTTPS proxy to be used. Defaults to None. + """ self.url = url self.security = security + self.http_proxy = http_proxy + self.https_proxy = https_proxy - def save(self,filepath: str): - with open(filepath,"w",encoding='utf-8') as f: + def save(self, filepath: str): + with open(filepath, "w", encoding="utf-8") as f: res = to_dict(self) - yaml.safe_dump(res,f) + yaml.safe_dump(res, f) @staticmethod def from_json(json_dct): - security = Security.from_json(json_dct.get('security')) - return Client(json_dct.get('url'), security) + """ + Create a Client object from a JSON dictionary. + + Args: + json_dct (dict): The JSON dictionary containing the client configuration. + + Returns: + Client: The created Client object. + """ + security = Security.from_json(json_dct.get("security")) + client = Client(json_dct.get("url"), security) + client.http_proxy = ( + json_dct.get("http_proxy") + if json_dct.get("http_proxy") is not None + else client.http_proxy + ) + client.https_proxy = ( + json_dct.get("https_proxy") + if json_dct.get("https_proxy") is not None + else client.https_proxy + ) + return client + def LoadClient(filepath: str) -> Client: - with open(filepath,encoding='utf-8') as f: + with open(filepath, encoding="utf-8") as f: dm = yaml.safe_load(f) dumped = json.dumps(dm) client = Client.from_json(json.loads(dumped)) return client -def LoadEnvClient(envpath: str = None) -> Client: +def LoadEnvClient(envpath: str = None) -> Client: if envpath is not None: # Verify that the file exists if not os.path.exists(envpath): @@ -95,24 +158,42 @@ def LoadEnvClient(envpath: str = None) -> Client: raise Exception("No environment variable found") # Verify that the environment variables are set - if os.getenv('NODE_URL') is None: + if os.getenv("NODE_URL") is None: raise Exception("Missing environments: NODE_URL is not set") - if os.getenv('TI_USERNAME') is None and os.getenv('TI_PASSWORD') is None and os.getenv('TI_STATIC_TOKEN') is None: - raise Exception("Missing environments: need to set either TI_USERNAME and TI_PASSWORD or TI_STATIC_TOKEN") + if ( + os.getenv("TI_USERNAME") is None + and os.getenv("TI_PASSWORD") is None + and os.getenv("TI_STATIC_TOKEN") is None + ): + raise Exception( + "Missing environments: need to set either TI_USERNAME and TI_PASSWORD or TI_STATIC_TOKEN" + ) oidc_config = OIDCConfiguration( - oidc_url=os.getenv('OIDC_URL'), - oidc_realm=os.getenv('OIDC_REALM'), - oidc_client_id=os.getenv('OIDC_CLIENT_ID'), - oidc_client_secret=os.getenv('OIDC_CLIENT_SECRET'), + oidc_url=os.getenv("OIDC_URL"), + oidc_realm=os.getenv("OIDC_REALM"), + oidc_client_id=os.getenv("OIDC_CLIENT_ID"), + oidc_client_secret=os.getenv("OIDC_CLIENT_SECRET"), ) security_config = Security( - username=os.getenv('TI_USERNAME'), - password=os.getenv('TI_PASSWORD'), - static_token=os.getenv('TI_STATIC_TOKEN'), - verify_ssl=literal_eval(os.getenv('TI_VERIFY_SSL')), - oidc_config=oidc_config + username=os.getenv("TI_USERNAME"), + password=os.getenv("TI_PASSWORD"), + static_token=os.getenv("TI_STATIC_TOKEN"), + verify_ssl=literal_eval(os.getenv("TI_VERIFY_SSL")), + oidc_config=oidc_config, + ) + client = Client(url=os.getenv("NODE_URL"), security=security_config) + + client.http_proxy = ( + os.getenv("HTTP_PROXY") + if os.getenv("HTTP_PROXY") is not None + else client.http_proxy ) - client = Client(url=os.getenv('NODE_URL'), security=security_config) + client.https_proxy = ( + os.getenv("HTTPS_PROXY") + if os.getenv("HTTPS_PROXY") is not None + else client.https_proxy + ) + return client diff --git a/src/tuneinsight/client/datagen.py b/src/tuneinsight/client/datagen.py new file mode 100644 index 0000000..b86a10c --- /dev/null +++ b/src/tuneinsight/client/datagen.py @@ -0,0 +1,378 @@ +"""Module to generate mock datasets in the Tune Insight instance.""" + +import json + +from enum import Enum + +import pandas as pd + +from tuneinsight.utils.code import get_code +from tuneinsight.api.sdk.types import Response +from tuneinsight.api.sdk import Client + +from tuneinsight.api.sdk.api.api_datagen import post_mock_dataset +from tuneinsight.api.sdk.models.post_mock_dataset_method import PostMockDatasetMethod + +from tuneinsight.client.validation import validate_response +from tuneinsight.client.datasource import DataSource + + +class MockGenerator: + """Generic class for a Mock generator. Use method-specific subclasses instead.""" + + def __init__(self, method: PostMockDatasetMethod): + self.method = method + self.datasource = None + + def get_config(self) -> dict: + """Get the optional configuration for this generator, to be sent in request body as JSON.""" + return {} + + def generate( + self, client: Client, num_rows: int, table_name: str = None, seed: str = None + ) -> DataSource: + """ + Generate a mock dataset. + + Args: + client (Client): Diapason client instance to connect to the server. + num_rows (int): number of records to generate. + table_name (str, optional): name of the database table to generate. + seed (str, optional): seed of the pseudo-random number generator. + + Raises: + httpx.TimeoutException: If the request takes longer than Client.timeout. + InvalidResponseError: If the request to the server fails. + + """ + config: str = json.dumps(self.get_config()) + response: Response = post_mock_dataset.sync_detailed( + client=client, + json_body=config, + method=self.method, + name=table_name or f"mock_{self.method}", + numrows=num_rows, + seed=seed, + ) + validate_response(response=response) + # The response contains the description of the datasource created by the call. + self.datasource = DataSource(model=response.parsed, client=client) + return self.datasource + + @property + def df(self) -> pd.DataFrame: + """Download the last dataframe generated by this generator.""" + if self.datasource is None: + raise RuntimeError("No Dataframe found. Run the generator first.") + return self.datasource.get_dataframe( + query=f"SELECT * FROM {self.datasource.model.name}" + ) + + +class AlertsGenerator(MockGenerator): + """Mock Alerts generation. + + This produces mock alerts from a network monitoring system. This has the attributes: + - src_ip: source IP address (str). + - dst_ip: destination IP address (str). + - protocol: one of TCP, UDP or ICMP (str). + - fingerprint: a SHA hash (str). + - type: the attack type (str). Also 1-hot encoded. + - severity: one of low, medium, high or critical (str). Also 1-hot encoded. + + """ + + def __init__(self): + MockGenerator.__init__(self, PostMockDatasetMethod.ALERTS) + + +class PatientsGenerator(MockGenerator): + """Mock Patients generation. + + This produces mock simple patients records that can be used for survival analysis. + The data has the following attributes: + - patient_no: a pseudorandom identifier (str). + - age: patient age (int). + - sex: one of "female" or "male" (str). + - height: patient height in cm (float). + - weight: patient weight in kg (float). + - observation: a disease/symptom, or empty string (str). + - treatments: treatment received by the patient, or empty string (str). + - diagnosis: timestamp at which an observation was recorded (timestamp). + - death: timestamp at which the patient died or NaT if not dead (timestamp). + + """ + + def __init__(self): + MockGenerator.__init__(self, PostMockDatasetMethod.PATIENTS) + + +class NeurologyObservationsGenerator(MockGenerator): + """Mock Neurology Observations Generation + + This produces mock neurological observations for patients. It has the attributes: + - patient_id: a pseudorandom identifier (str). + - disease_type: a neurological disease (str, 5 possible values). + - diagnosis_dt: time of diagnosis (timestamp). + - birthdate: birth date of the patient (timestamp). + - pain_score: float between 0 and 11, or empty string (str). + - mri_anomaly_detected: one of "NaN", "yes" or "no" (str). + - surgery_required: one of "yes", "no", "" (str). + + """ + + def __init__(self): + MockGenerator.__init__(self, PostMockDatasetMethod.NEUROLOGY_OBSERVATIONS) + + +class PricesGenerator(MockGenerator): + """Mock Prices Generation + + This produces a dataset of mock prices for commodities at a given time. + - commodity (str): what is being sold. + - delivery_start, delivery_end (date): period of delivery for this commodity. + - currency (str): currency in which the price is expressed. + - currency_multiplier (float): conversion rate to USD for the currency. + - unit (str): unit in which the quantity is expressed. + - unit_multiplier (float): conversion from the unit to a standard unit. + - contributor (str): who is selling. + - price (float): price per unit at which the commodity is sold. + + """ + + def __init__(self): + MockGenerator.__init__(self, PostMockDatasetMethod.PRICES) + + +class SKUGenerator(MockGenerator): + """Mock SKU Generation + + This produces mock stock keeping units (grocery products) with the following attributes: + - sku_number: pseudorandom numerical value that is generated from the name of the product + - product_name: name of the product. + - product_type: type of product (spaghetti,chocolate etc...) + - category: higher level category for the product (meat,drinks,dairy etc...) + - manufacturer: name of the manufacturer. + - supplier: name of the supplier. + - price: price of individual product. + - quantity: quantity produced for the current timestamp. + - timestamp: the current timestamp. + + """ + + def __init__(self): + MockGenerator.__init__(self, PostMockDatasetMethod.SKUS) + + +class PersonsGenerator(MockGenerator): + """Mock Persons Generation + + This produces a mock dataset representing individuals with the following attributes: + - name: full name of the person. + - age: age of the person. + - country: country in which the person lives. + + """ + + def __init__(self): + MockGenerator.__init__(self, PostMockDatasetMethod.PERSONS) + + +class CustomFunctionGenerator(MockGenerator): + """Custom Function Generation + + This generator enables users to specify an arbitrary Python function to generate + mock records. The signature of the function must be () -> pd.DataFrame. The + DataFrame it outputs contains one or more records related to one "user". This + function is called until enough records are produced (if too many records are + produced, the last output is downsampled such that the mock dataset has exactly + the required size). + + The custom function should not use `import` statements, but can use the following + libraries without importing them: datetime, dateutil, math, random, itertools, + np (numpy), pd (pandas). + + The code of the custom function should be randomized and not seeded, as the + generator takes care of seeding. + + Note: you can and should use this class as a decorator over the custom function. + + """ + + def __init__(self, f): + MockGenerator.__init__(self, PostMockDatasetMethod.CUSTOM_FUNCTION) + self.f = f + + def get_config(self): + return {"function": get_code(self.f)} + + def __call__(self): + """Call the inner function of this generator (for decoration purposes).""" + return self.f() + + +class GenericGenerator(MockGenerator): + """Mock generator for arbitrary data + + This generator can be configured to produce mock tabular data of any format, and + to add arbitrary distributions and correlations for attributes. + + This generator requires a data format describing the attributes. This can be either + from a JSON file, or by manually adding columns through method columns. + + Additionally, "measurements" (the simulated results of queries) can be provided + to describe the distribution of the mock data. + + """ + + def __init__(self, data_format=None): + MockGenerator.__init__(self, PostMockDatasetMethod.GENERIC) + self.data_format = data_format if data_format is not None else [] + # Validation of the data format and addition of attributes is done through the + # self.attributes object of the generator. + self.attrs = self.attributes = _AttributeParser(self.data_format) + + @classmethod + def from_file(cls, data_format_file): + """Load a Generic generator from a data format described in a JSON file.""" + with open(data_format_file, "r", encoding="utf-8") as ff: + data_format = json.load(ff) + return GenericGenerator(data_format) + + def save(self, json_filename): + """Save the data format of this generator to a JSON file.""" + with open(json_filename, "w", encoding="utf-8") as ff: + json.dump(self.data_format, ff) + + def get_config(self): + return {"data-format": self.data_format} + + +class _AttributeParser: + """Attribute parser for the generic mock generator. + + _AttributeParser objects ensure that the arguments sent to the generic generator are + valid, and allows users to add new attributes through a safe and easy interface. + + """ + + class TYPE(str, Enum): + """Attribute types that are recognized by the generic generator.""" + + CONTINUOUS = "continuous" + INTEGER = "integer" + CATEGORICAL = "categorical" + + # Maps the name of the attribute to a pair of (required, optional) parameters. + FORMAT = { + TYPE.CONTINUOUS: ( + {"min_value", "max_value"}, + { + "num_bins", + }, + ), + TYPE.INTEGER: ( + {"min_value", "max_value"}, + { + "num_bins", + }, + ), + TYPE.CATEGORICAL: ({"possible_values"}, set()), + } + + def __init__(self, data_format): + # Check that the data format is valid. + self.validate(data_format) + # The *object* itself (a list). + self.data_format = data_format + + def validate(self, list_of_attributes: list[dict]): + """Assert that a data format is valid. Raises error if not.""" + for attr in list_of_attributes: + assert "name" in attr, "Missing attribute name." + assert "type" in attr, "Missing attribute type." + c = _AttributeParser.FORMAT.get(attr["type"]) + assert c is not None, f"Unknown type {c}." + required, optional = c + for req in required: + assert req in attr, f"Missing {req} attribute for type {attr['type']}." + allowed = required.union(optional.union({"name", "type"})) + for v in attr: + assert ( + v in allowed + ), f"Attribute {v} not allowed for type {attr['type']}." + + def _add_attribute(self, name: str, atype: TYPE, **attributes): + attr_values = {"name": name, "type": atype, **attributes} + self.validate([attr_values]) + # Remove optional arguments from the payload. + # This is because providing a None (null) value will overwrite defaults. + for optional_arg in _AttributeParser.FORMAT[atype][1]: + if optional_arg in attributes and attributes[optional_arg] is None: + del attr_values[optional_arg] + self.data_format.append(attr_values) + + # The following methods are intended as a public interface to this class. + + def add_continuous( + self, name: str, min_value: float, max_value: float, num_bins: int = None + ): + """ + Add a continuous-valued attribute to the data description. + + Args: + name: the name of this attribute (column) in the database. + min_value: the minimum value that this attribute can take. + min_value: the maximum value that this attribute can take. + num_bins (optional, default 10): number of bins to discretize this attribute into. + + The range of this attribute (min and max) is required for the mock generator, which + discretizes possible values in histogram bins. + + """ + self._add_attribute( + name, + _AttributeParser.TYPE.CONTINUOUS, + min_value=min_value, + max_value=max_value, + num_bins=num_bins, + ) + + def add_integer( + self, name: str, min_value: int, max_value: int, num_bins: int = None + ): + """ + Add an integer-valued attribute to the data description. + + Args: + name: the name of this attribute (column) in the database. + min_value: the minimum value that this attribute can take. + min_value: the maximum value that this attribute can take. + num_bins (optional, default 10): number of bins to group possible values. + + Like continuous attributes, the possible values of an integer attribute are grouped + in consecutive bins. This is to avoid representing distributions with a very large + number of parameters. This only occurs if num_bins < max_value - min_value. + + """ + self._add_attribute( + name, + _AttributeParser.TYPE.INTEGER, + min_value=min_value, + max_value=max_value, + num_bins=num_bins, + ) + + def add_categorical(self, name: str, possible_values: list[str]): + """ + Add a categorical-valued attribute to the data description. + + Args: + name: the name of this attribute (column) in the database. + possible_values: the (exhaustive!) list of all possible values that this + attribute can take. + + """ + self._add_attribute( + name, _AttributeParser.TYPE.CATEGORICAL, possible_values=possible_values + ) diff --git a/src/tuneinsight/client/dataobject.py b/src/tuneinsight/client/dataobject.py index e94736f..129fea3 100644 --- a/src/tuneinsight/client/dataobject.py +++ b/src/tuneinsight/client/dataobject.py @@ -1,6 +1,6 @@ from __future__ import annotations import io -from typing import Dict,Callable +from typing import Dict, Callable import attr import pandas as pd @@ -16,7 +16,6 @@ from tuneinsight.client.validation import validate_response - def float_matrix_to_dataframe(fm: models.FloatMatrix) -> pd.DataFrame: """ float_matrix_to_dataframe converts FloatMatrix content to a dataframe @@ -27,7 +26,7 @@ def float_matrix_to_dataframe(fm: models.FloatMatrix) -> pd.DataFrame: Returns: pd.DataFrame: the output dataframe """ - return pd.DataFrame(data=fm.data,columns=fm.columns) + return pd.DataFrame(data=fm.data, columns=fm.columns) def string_matrix_to_dataframe(t: models.StringMatrix) -> pd.DataFrame: @@ -40,16 +39,16 @@ def string_matrix_to_dataframe(t: models.StringMatrix) -> pd.DataFrame: Returns: pd.DataFrame: the output dataframe """ - return pd.DataFrame(data=t.data,columns=t.columns) - + return pd.DataFrame(data=t.data, columns=t.columns) # Maps content type to their appropriate dataframe converter -content_to_dataframe: Dict[type,Callable[[models.Content],pd.DataFrame]] = { +content_to_dataframe: Dict[type, Callable[[models.Content], pd.DataFrame]] = { models.FloatMatrix: float_matrix_to_dataframe, models.StringMatrix: string_matrix_to_dataframe, } + @attr.s(auto_attribs=True) class DataObject: """ @@ -64,9 +63,16 @@ class DataObject: model: models.DataObject client: Client - @classmethod - def create(cls,client: Client,do_type: models.DataObjectType,session_id: str = "",encrypted: bool = False,key_info: models.KeyInfo = None,data:bytes=None): + def create( + cls, + client: Client, + do_type: models.DataObjectType, + session_id: str = "", + encrypted: bool = False, + key_info: models.KeyInfo = None, + data: bytes = None, + ): body = models.PostDataObjectJsonBody() body.method = models.DataObjectCreationMethod.CREATE body.encrypted = encrypted @@ -74,10 +80,12 @@ def create(cls,client: Client,do_type: models.DataObjectType,session_id: str = " body.session_id = session_id if key_info is not None: body.key_info = key_info - response: Response[models.DataObject] = post_data_object.sync_detailed(client=client,json_body=body) + response: Response[models.DataObject] = post_data_object.sync_detailed( + client=client, json_body=body + ) validate_response(response) - data_object = cls(model=response.parsed,client=client) + data_object = cls(model=response.parsed, client=client) if data is not None: data_object.load_data_from_bytes(data) return data_object @@ -91,7 +99,6 @@ def get_id(self) -> str: """ return self.model.unique_id - def get_content(self) -> models.Content: """ get_content returns the content of the dataobject @@ -99,7 +106,9 @@ def get_content(self) -> models.Content: Returns: models.Content the content which can be of multiple of types """ - response: Response[models.Content] = get_data_object_data.sync_detailed(client=self.client,data_object_id=self.get_id()) + response: Response[models.Content] = get_data_object_data.sync_detailed( + client=self.client, data_object_id=self.get_id() + ) validate_response(response) return response.parsed @@ -132,12 +141,10 @@ def get_string_matrix(self) -> models.StringMatrix: sm: models.StringMatrix = self.get_content() return sm - def get_stats(self) -> models.Statistics: stats: models.Statistics = self.get_content() return stats - def get_dataframe(self) -> pd.DataFrame: """ get_dataframe returns the content of the dataobject as a dataframe @@ -155,15 +162,15 @@ def get_dataframe(self) -> pd.DataFrame: converter = content_to_dataframe[content_type] return converter(content) - def delete(self): """ delete requests a deletion of the dataobject """ - response: Response[models.Any]= delete_data_object.sync_detailed(client=self.client,data_object_id=self.get_id()) # pylint: disable=no-member + response: Response["models.Any"] = delete_data_object.sync_detailed( + client=self.client, data_object_id=self.get_id() + ) # pylint: disable=no-member validate_response(response) - def decrypt(self) -> DataObject: """ decrypt requests a decryption of the dataobject yielding a new decrypted dataobject @@ -171,20 +178,31 @@ def decrypt(self) -> DataObject: Returns: DataObject: the decrypted dataobject """ - method = models.DataObjectCreationMethod(models.DataObjectCreationMethod.DECRYPT) - definition = models.PostDataObjectJsonBody(method=method,data_object_id=self.get_id()) - doResp: Response[models.DataObject] = post_data_object.sync_detailed(client=self.client,json_body=definition) + method = models.DataObjectCreationMethod( + models.DataObjectCreationMethod.DECRYPT + ) + definition = models.PostDataObjectJsonBody( + method=method, data_object_id=self.get_id() + ) + doResp: Response[models.DataObject] = post_data_object.sync_detailed( + client=self.client, json_body=definition + ) validate_response(doResp) - return DataObject(model=doResp.parsed,client=self.client) - - - def load_data_from_bytes(self,data: bytes): - definition = models.PutDataObjectDataMultipartData(File(payload=io.BytesIO(initial_bytes=data),file_name="test")) - doResp: Response[models.DataObject] = put_data_object_data.sync_detailed(data_object_id=self.get_id(),client=self.client,multipart_data=definition) + return DataObject(model=doResp.parsed, client=self.client) + + def load_data_from_bytes(self, data: bytes): + definition = models.PutDataObjectDataMultipartData( + File(payload=io.BytesIO(initial_bytes=data), file_name="test") + ) + doResp: Response[models.DataObject] = put_data_object_data.sync_detailed( + data_object_id=self.get_id(), client=self.client, multipart_data=definition + ) validate_response(doResp) self.model = doResp.parsed - def get_raw_data(self) -> bytes : - resp: Response[File] = get_data_object_raw_data.sync_detailed(data_object_id=self.get_id(),client=self.client) + def get_raw_data(self) -> bytes: + resp: Response[File] = get_data_object_raw_data.sync_detailed( + data_object_id=self.get_id(), client=self.client + ) validate_response(resp) return resp.content diff --git a/src/tuneinsight/client/datasource.py b/src/tuneinsight/client/datasource.py index ad69b77..85e20aa 100644 --- a/src/tuneinsight/client/datasource.py +++ b/src/tuneinsight/client/datasource.py @@ -14,6 +14,7 @@ from tuneinsight.client.validation import validate_response from tuneinsight.client.dataobject import DataObject + @attr.define class DataSource: """ @@ -24,7 +25,7 @@ class DataSource: client: Client @classmethod - def from_definition(cls,client: Client,definition: models.DataSourceDefinition): + def from_definition(cls, client: Client, definition: models.DataSourceDefinition): """ from_definition creates a new datasource on the backend given the data source definition @@ -32,13 +33,14 @@ def from_definition(cls,client: Client,definition: models.DataSourceDefinition): client (Client): the client to use to interact with the datasource definition (models.DataSourceDefinition): the definition of the datasource """ - response: Response[models.DataSource] = post_data_source.sync_detailed(client=client,json_body=definition) + response: Response[models.DataSource] = post_data_source.sync_detailed( + client=client, json_body=definition + ) validate_response(response) - return cls(model=response.parsed,client=client) - + return cls(model=response.parsed, client=client) @classmethod - def local(cls,client: Client, name: str, clear_if_exists: bool = False): + def local(cls, client: Client, name: str, clear_if_exists: bool = False): """ local creates a new local datasource without any data @@ -54,11 +56,16 @@ def local(cls,client: Client, name: str, clear_if_exists: bool = False): definition.config = ds_conf definition.type = "local" - - return cls.from_definition(client,definition=definition) + return cls.from_definition(client, definition=definition) @classmethod - def postgres(cls,client: Client,config: models.DatabaseConnectionInfo, name: str, clear_if_exists: bool = False): + def postgres( + cls, + client: Client, + config: models.DatabaseConnectionInfo, + name: str, + clear_if_exists: bool = False, + ): """ postgres creates a new postgres database datasource @@ -72,16 +79,32 @@ def postgres(cls,client: Client,config: models.DatabaseConnectionInfo, name: str definition.clear_if_exists = clear_if_exists definition.type = "database" ds_config_type = models.DataSourceConfigType.DATABASEDATASOURCECONFIG - credentials = models.Credentials(username=config.user,password=config.password,id="db-creds") - local_creds = models.LocalCredentialsProvider(type=models.CredentialsProviderType.LOCALCREDENTIALSPROVIDER,credentials=[credentials]) - ds_config = models.DatabaseDataSourceConfig(type=ds_config_type,connection_info=config) + credentials = models.Credentials( + username=config.user, password=config.password, id="db-creds" + ) + local_creds = models.LocalCredentialsProvider( + type=models.CredentialsProviderType.LOCALCREDENTIALSPROVIDER, + credentials=[credentials], + ) + ds_config = models.DatabaseDataSourceConfig( + type=ds_config_type, connection_info=config + ) definition.credentials_provider = local_creds definition.config = ds_config - return cls.from_definition(client,definition=definition) + return cls.from_definition(client, definition=definition) @classmethod - def from_api(cls,client: Client, api_type: models.APIConnectionInfoType, api_url: str, api_token: str, name: str, clear_if_exists: bool = False, cert: str = ""): + def from_api( + cls, + client: Client, + api_type: models.APIConnectionInfoType, + api_url: str, + api_token: str, + name: str, + clear_if_exists: bool = False, + cert: str = "", + ): """ from_api creates a new api datasource @@ -97,26 +120,31 @@ def from_api(cls,client: Client, api_type: models.APIConnectionInfoType, api_url definition.clear_if_exists = clear_if_exists definition.type = "api" - ds_config = models.ApiDataSourceConfig(type=models.DataSourceConfigType.APIDATASOURCECONFIG) - ds_config.connection_info = models.APIConnectionInfo(api_token=api_token, api_url=api_url, type=api_type, cert=cert) + ds_config = models.ApiDataSourceConfig( + type=models.DataSourceConfigType.APIDATASOURCECONFIG + ) + ds_config.connection_info = models.APIConnectionInfo( + api_token=api_token, api_url=api_url, type=api_type, cert=cert + ) definition.config = ds_config - return cls.from_definition(client,definition=definition) - - + return cls.from_definition(client, definition=definition) @classmethod - def from_dataframe(cls,client: Client,dataframe: pd.DataFrame, name: str, clear_if_exists: bool = False): + def from_dataframe( + cls, + client: Client, + dataframe: pd.DataFrame, + name: str, + clear_if_exists: bool = False, + ): ds = cls.local(client, name, clear_if_exists) ds.load_dataframe(df=dataframe) return ds - - def __str__(self): model = self.model - return f'id: {model.unique_id}, name: {model.name}, type: {model.type}, createdAt: {model.created_at}' - + return f"id: {model.unique_id}, name: {model.name}, type: {model.type}, createdAt: {model.created_at}" def get_id(self) -> str: """ @@ -127,8 +155,9 @@ def get_id(self) -> str: """ return self.model.unique_id - - def adapt(self,do_type: models.DataObjectType,query: Any = "",json_path: str = "") -> DataObject: + def adapt( + self, do_type: models.DataObjectType, query: Any = "", json_path: str = "" + ) -> DataObject: """ adapt adapts the data source into a dataobject @@ -140,53 +169,68 @@ def adapt(self,do_type: models.DataObjectType,query: Any = "",json_path: str = " DataObject: _description_ """ method = models.DataObjectCreationMethod.DATASOURCE - definition = models.PostDataObjectJsonBody(method=method,data_source_id=self.get_id(),type=do_type,query=query,json_path=json_path) - response: Response[models.DataObject] = post_data_object.sync_detailed(client=self.client,json_body=definition) + definition = models.PostDataObjectJsonBody( + method=method, + data_source_id=self.get_id(), + type=do_type, + query=query, + json_path=json_path, + ) + response: Response[models.DataObject] = post_data_object.sync_detailed( + client=self.client, json_body=definition + ) validate_response(response) - return DataObject(model=response.parsed,client=self.client) + return DataObject(model=response.parsed, client=self.client) - def load_csv_data(self,path: str): + def load_csv_data(self, path: str): """ loadData loads csv data stored in the file "path" to the datasources Args: path (_type_): path to the csv file """ - with open(path,mode='+rb') as f: - fileType = File(payload=f,file_name="test") - mpd = models.PutDataSourceDataMultipartData(data_source_request_data=fileType) - response: Response[models.DataSource] = put_data_source_data.sync_detailed(client=self.client,data_source_id=self.model.unique_id,multipart_data=mpd) + with open(path, mode="+rb") as f: + fileType = File(payload=f, file_name="test") + mpd = models.PutDataSourceDataMultipartData( + data_source_request_data=fileType + ) + response: Response[models.DataSource] = put_data_source_data.sync_detailed( + client=self.client, + data_source_id=self.model.unique_id, + multipart_data=mpd, + ) f.close() validate_response(response) - - def load_dataframe(self,df: pd.DataFrame): + def load_dataframe(self, df: pd.DataFrame): f = StringIO(initial_value="") - df.to_csv(f,index=False) - mpd = models.PutDataSourceDataMultipartData(data_source_request_data_raw=f.getvalue()) - response: Response[models.DataSource] = put_data_source_data.sync_detailed(client=self.client,data_source_id=self.model.unique_id,multipart_data=mpd) + df.to_csv(f, index=False) + mpd = models.PutDataSourceDataMultipartData( + data_source_request_data_raw=f.getvalue() + ) + response: Response[models.DataSource] = put_data_source_data.sync_detailed( + client=self.client, data_source_id=self.model.unique_id, multipart_data=mpd + ) validate_response(response) - - def get_dataframe(self,query: Any = "",json_path: str = "") -> pd.DataFrame: - do = self.adapt(do_type=models.DataObjectType.TABLE,query=query,json_path=json_path) + def get_dataframe(self, query: Any = "", json_path: str = "") -> pd.DataFrame: + do = self.adapt( + do_type=models.DataObjectType.TABLE, query=query, json_path=json_path + ) df = do.get_dataframe() do.delete() return df - def delete(self): """ delete deletes the datasource """ - response: Response[Any]= delete_data_source.sync_detailed(client=self.client,data_source_id=self.model.unique_id) + response: Response[Any] = delete_data_source.sync_detailed( + client=self.client, data_source_id=self.model.unique_id + ) validate_response(response) - - - - def default_datasource_definition() -> models.DataSourceDefinition: """ default_datasource_definition returns a default-valued DataSourceDefinition @@ -194,13 +238,36 @@ def default_datasource_definition() -> models.DataSourceDefinition: Returns: models.DataSourceDefinition: the definition with default values """ - return models.DataSourceDefinition(consent_type=models.DataSourceConsentType.UNKNOWN) - - - -def new_postgres_config(host: str,port: str,name: str,user: str,password: str) -> models.DatabaseConnectionInfo: - return models.DatabaseConnectionInfo(type=models.DatabaseType.POSTGRES,host=host,port=port,database=name,user=user,password=password) - - -def new_mariadb_config(host: str="mariadb",port: str="3306",name:str = "geco_0",user: str="geco",password:str = "geco")-> models.DatabaseConnectionInfo: - return models.DatabaseConnectionInfo(type=models.DatabaseType.MYSQL,host=host,port=port,database=name,user=user,password=password) + return models.DataSourceDefinition( + consent_type=models.DataSourceConsentType.UNKNOWN + ) + + +def new_postgres_config( + host: str, port: str, name: str, user: str, password: str +) -> models.DatabaseConnectionInfo: + return models.DatabaseConnectionInfo( + type=models.DatabaseType.POSTGRES, + host=host, + port=port, + database=name, + user=user, + password=password, + ) + + +def new_mariadb_config( + host: str = "mariadb", + port: str = "3306", + name: str = "geco_0", + user: str = "geco", + password: str = "geco", +) -> models.DatabaseConnectionInfo: + return models.DatabaseConnectionInfo( + type=models.DatabaseType.MYSQL, + host=host, + port=port, + database=name, + user=user, + password=password, + ) diff --git a/src/tuneinsight/client/diapason.py b/src/tuneinsight/client/diapason.py index 74d8d94..bbd84cd 100644 --- a/src/tuneinsight/client/diapason.py +++ b/src/tuneinsight/client/diapason.py @@ -1,7 +1,8 @@ from typing import List import warnings +import webbrowser -from keycloak.exceptions import KeycloakConnectionError +from keycloak.exceptions import KeycloakConnectionError, KeycloakGetError import attr import pandas as pd @@ -10,7 +11,11 @@ from tuneinsight.api.sdk.api.api_project import post_project from tuneinsight.api.sdk.api.api_project import get_project from tuneinsight.api.sdk.api.api_project import get_project_list -from tuneinsight.api.sdk.api.api_datasource import get_data_source_list, get_data_source, delete_data_source +from tuneinsight.api.sdk.api.api_datasource import ( + get_data_source_list, + get_data_source, + delete_data_source, +) from tuneinsight.api.sdk.api.api_dataobject import get_data_object from tuneinsight.api.sdk import models from tuneinsight.client.dataobject import DataObject @@ -33,19 +38,33 @@ class Diapason: Args: client (client.AuthenticatedClient): underlying client used to perform the requests """ + conf: config.Client client: api_client.Client = None - maas = None # expected type ModelManager not included to avoid cryptolib dependency + maas: "ModelManager" = ( + None # expected type ModelManager not included to avoid cryptolib dependency + ) def __attrs_post_init__(self): if self.conf.security.static_token != "": - self.client = api_client.AuthenticatedClient(base_url=self.conf.url,token=self.conf.security.static_token,verify_ssl=self.conf.security.verify_ssl) + self.client = api_client.AuthenticatedClient( + base_url=self.conf.url, + token=self.conf.security.static_token, + verify_ssl=self.conf.security.verify_ssl, + ) else: - self.client = auth.KeycloakClient(base_url=self.conf.url,token="", - oidc_config=self.conf.security.oidc_config, - username=self.conf.security.username, - password=self.conf.security.password, - verify_ssl=self.conf.security.verify_ssl) + self.client = auth.KeycloakClient( + base_url=self.conf.url, + token="", + oidc_config=self.conf.security.oidc_config, + username=self.conf.security.username, + password=self.conf.security.password, + verify_ssl=self.conf.security.verify_ssl, + proxies={ + "http": self.conf.http_proxy, + "https": self.conf.https_proxy, + }, + ) @classmethod def from_config_path(cls, path: str, url: str = None): @@ -61,10 +80,10 @@ def from_config_path(cls, path: str, url: str = None): conf = config.LoadClient(path) if url is not None: conf.url = url - return cls(conf = conf) + return cls(conf=conf) @classmethod - def from_env(cls,path: str = None): + def from_env(cls, path: str = None): """ from_env creates a client from the environment variables or a "dotenv" file @@ -72,12 +91,62 @@ def from_env(cls,path: str = None): path (str): path to the dotenv file. If None, it uses environment variables """ conf = config.LoadEnvClient(path) - return cls(conf = conf) + return cls(conf=conf) + + @classmethod + def from_config( + cls, + api_url: str, + oidc_client_id: str, + oidc_realm: str = "ti-realm", + oidc_url: str = "https://auth.tuneinsight.com/auth/", + http_proxy: str = "", + https_proxy: str = "", + verify_ssl: bool = True, + ): + """ + from_config creates a client from the specified attributes. - def login(self): + This is meant as a convenient way to define a client when default settings apply. + Only the url endpoint and OIDC client ID need to be specified. Please use client.login() + after creating this object to authenticate this client to Keycloak. + + Args: + api_url (str): the URL of the API endpoint. + oidc_client_id (str): the OIDC client ID of this user. + oidc_realm (str): the OIDC realm (default ti-realm). + oidc_url (str): where to find the OIDC auth server (default is the Tune Insight auth endpoint). + http_proxy (str): the HTTP proxy to use (default is none). + https_proxy (str): the HTTPS proxy to use (default is none). + verify_ssl (bool): whether to verify SSL certificates (default is True). + + """ + conf = { + "security": { + "oidc_config": { + "oidc_client_id": oidc_client_id, + "oidc_realm": oidc_realm, + "oidc_url": oidc_url, + }, + "verify_ssl": verify_ssl, + }, + "url": api_url, + "http_proxy": http_proxy, + "https_proxy": https_proxy, + } + conf = config.Client.from_json(conf) + return cls(conf) + + def login(self, open_page=True, blocking=True): """ login provides users with a link to log in from a browser + Args: + open_page (bool, True): whether to use the browser to open the login link. + blocking (bool, True): whether to wait until the user has logged in. + + Returns: + login_url (str): the URL to use to log in, or None if blocking is True. Raises: AttributeError: if the client is not a keycloak client """ @@ -85,8 +154,13 @@ def login(self): raise AttributeError("client is not a KeycloakClient") device_resp = self.client.get_device_code() - login_url = device_resp['verification_uri_complete'] + login_url = device_resp["verification_uri_complete"] print("Follow this link to login: " + login_url) + if open_page: + webbrowser.open(login_url) + if blocking: + self.wait_ready(sleep_seconds=1) + return None return login_url def get_client(self): @@ -97,7 +171,9 @@ def get_client(self): def add_models(self, model_manager): self.maas = model_manager - def new_datasource(self, dataframe: pd.DataFrame, name: str, clear_if_exists: bool = False) -> DataSource: + def new_datasource( + self, dataframe: pd.DataFrame, name: str, clear_if_exists: bool = False + ) -> DataSource: """ new_datasource creates a new datasource from a dataframe. It uploads the dataframe to the created datasource. @@ -109,9 +185,19 @@ def new_datasource(self, dataframe: pd.DataFrame, name: str, clear_if_exists: bo Returns: DataSource: the newly created datasource """ - return DataSource.from_dataframe(self.get_client(),dataframe,name,clear_if_exists) - - def new_api_datasource(self, api_type: models.APIConnectionInfoType, api_url: str, name: str, api_token: str = "", clear_if_exists: bool = False, cert: str = "") -> DataSource: + return DataSource.from_dataframe( + self.get_client(), dataframe, name, clear_if_exists + ) + + def new_api_datasource( + self, + api_type: models.APIConnectionInfoType, + api_url: str, + name: str, + api_token: str = "", + clear_if_exists: bool = False, + cert: str = "", + ) -> DataSource: """ new_api_datasource creates a new API datasource. @@ -124,9 +210,13 @@ def new_api_datasource(self, api_type: models.APIConnectionInfoType, api_url: st Returns: DataSource: the newly created datasource """ - return DataSource.from_api(self.get_client(),api_type,api_url,api_token,name,clear_if_exists,cert) + return DataSource.from_api( + self.get_client(), api_type, api_url, api_token, name, clear_if_exists, cert + ) - def new_csv_datasource(self,csv: str, name: str, clear_if_exists: bool = False) -> DataSource: + def new_csv_datasource( + self, csv: str, name: str, clear_if_exists: bool = False + ) -> DataSource: """ new_csv_datasource creates a new datasource and upload the given csv file to it @@ -139,11 +229,18 @@ def new_csv_datasource(self,csv: str, name: str, clear_if_exists: bool = False) Returns: DataSource: the newly created datasource """ - ds = DataSource.local(client= self.get_client(),name=name,clear_if_exists=clear_if_exists) + ds = DataSource.local( + client=self.get_client(), name=name, clear_if_exists=clear_if_exists + ) ds.load_csv_data(path=csv) return ds - def new_database(self,pg_config: models.DatabaseConnectionInfo, name: str, clear_if_exists: bool = False) -> DataSource: + def new_database( + self, + pg_config: models.DatabaseConnectionInfo, + name: str, + clear_if_exists: bool = False, + ) -> DataSource: """ new_database creates a new Postgres datasource @@ -155,10 +252,22 @@ def new_database(self,pg_config: models.DatabaseConnectionInfo, name: str, clear Returns: DataSource: the newly created datasource """ - return DataSource.postgres(client=self.get_client(),config=pg_config,name=name,clear_if_exists=clear_if_exists) - - def new_project(self, name: str, clear_if_exists: bool = False, - topology: models.Topology = UNSET, authorized_users: list = None) -> Project: + return DataSource.postgres( + client=self.get_client(), + config=pg_config, + name=name, + clear_if_exists=clear_if_exists, + ) + + def new_project( + self, + name: str, + clear_if_exists: bool = False, + topology: models.Topology = UNSET, + authorized_users: list = None, + participants: list = None, + non_contributor: bool = False, + ) -> Project: """new_project creates a new project Args: @@ -169,6 +278,8 @@ def new_project(self, name: str, clear_if_exists: bool = False, topology (Union[Unset, Topology]): Network Topologies. 'star' or 'tree'. In star topology all nodes are connected to a central node. In tree topology all nodes are connected and aware of each other. authorized_users (Union[Unset, List[str]]): The IDs of the users who can run the project + participants (Union[Unset, List[str]]): The IDs of the users who participate in the project. + non_contributor (bool, default False): indicates that this participant participates in the computations but does not contribute any data. Raises: Exception: in case the project already exists and clear_if_exists is False. @@ -181,23 +292,39 @@ def new_project(self, name: str, clear_if_exists: bool = False, if authorized_users is None: authorized_users = [] + if participants is None: + participants = [] + if name in [p.get_name() for p in self.get_projects()]: if clear_if_exists: - warnings.warn("""A project with the same name was removed on this node, but has not have been deleted on other nodes. \ + warnings.warn( + """A project with the same name was removed on this node, but has not have been deleted on other nodes. \ This can cause an error when attempting to share the project, because of conflicting names. \ -To avoid this, delete the project on other nodes, or create a differently-named project instead.""") +To avoid this, delete the project on other nodes, or create a differently-named project instead.""" + ) self.clear_project(name=name) else: raise ValueError(f"project {name} already exists") - proj_def = models.ProjectDefinition(name=name,local=True,allow_shared_edit=True, - topology=topology,created_with_client=models.Client.DIAPASON_PY, - authorized_users=authorized_users) - proj_response: Response[models.Project] = post_project.sync_detailed(client=self.client,json_body=proj_def) + proj_def = models.ProjectDefinition( + name=name, + local=True, + allow_shared_edit=True, + topology=topology, + created_with_client=models.Client.DIAPASON_PY, + authorized_users=authorized_users, + participants=participants, + non_contributor=non_contributor, + ) + # authorization_status = models.AuthorizationStatus.UNAUTHORIZED) + proj_response: Response[models.Project] = post_project.sync_detailed( + client=self.client, json_body=proj_def + ) validate_response(proj_response) - return Project(model=proj_response.parsed,client=self.client) + p = Project(model=proj_response.parsed, client=self.client) + return p - def get_project(self, project_id: str= "",name: str = "") -> Project: + def get_project(self, project_id: str = "", name: str = "") -> Project: """get_project returns a project by id or name Args: @@ -209,9 +336,11 @@ def get_project(self, project_id: str= "",name: str = "") -> Project: """ if project_id == "": return self.get_project_by_name(name=name) - proj_response: Response[models.Project] = get_project.sync_detailed(client=self.client,project_id=project_id) + proj_response: Response[models.Project] = get_project.sync_detailed( + client=self.client, project_id=project_id + ) validate_response(proj_response) - return Project(model=proj_response.parsed,client=self.client) + return Project(model=proj_response.parsed, client=self.client) def get_projects(self) -> List[Project]: """ @@ -220,26 +349,51 @@ def get_projects(self) -> List[Project]: Returns: List[Project]: list of projects """ - response: Response[List[models.Project]] = get_project_list.sync_detailed(client=self.client) + response: Response[List[models.Project]] = get_project_list.sync_detailed( + client=self.client + ) validate_response(response) projects = [] for project in response.parsed: - projects.append(Project(model=project,client=self.client)) + projects.append(Project(model=project, client=self.client)) return projects - def get_project_by_name(self, name:str) -> Project: - response: Response[List[models.Project]] = get_project_list.sync_detailed(client=self.client,name=name) + def get_project_by_name(self, name: str) -> Project: + """get_project_by_name returns a project by name + + Args: + name (str): name of the project + + Raises: + LookupError: if the project is not found + + Returns: + Project: the project + """ + response: Response[List[models.Project]] = get_project_list.sync_detailed( + client=self.client, name=name + ) validate_response(response) if len(response.parsed): - return Project(model=response.parsed[0],client=self.client) + return Project(model=response.parsed[0], client=self.client) raise LookupError("project not found") - def get_datasources(self, name: str="") -> List[DataSource]: - response: Response[List[models.DataSource]] = get_data_source_list.sync_detailed(client=self.client, name=name) + def get_datasources(self, name: str = "") -> List[DataSource]: + """get_datasources returns all the datasources of the instance + + Args: + name (str, optional): name of the datasource. If provided, it will be used to filter the datasources. Defaults to "". + + Returns: + List[DataSource]: _description_ + """ + response: Response[List[models.DataSource]] = ( + get_data_source_list.sync_detailed(client=self.client, name=name) + ) validate_response(response) datasources = [] for datasource in response.parsed: - datasources.append(DataSource(model=datasource,client=self.client)) + datasources.append(DataSource(model=datasource, client=self.client)) return datasources def delete_datasource(self, ds: DataSource) -> List[DataSource]: @@ -251,10 +405,12 @@ def delete_datasource(self, ds: DataSource) -> List[DataSource]: Returns: List[DataSource]: updated list of datasources """ - response = delete_data_source.sync_detailed(client=self.client, data_source_id=ds.get_id()) + response = delete_data_source.sync_detailed( + client=self.client, data_source_id=ds.get_id() + ) validate_response(response) - def get_datasource(self, ds_id: str= "", name: str = "") -> DataSource: + def get_datasource(self, ds_id: str = "", name: str = "") -> DataSource: """get_datasource returns a datasource by id or name Args: @@ -266,11 +422,13 @@ def get_datasource(self, ds_id: str= "", name: str = "") -> DataSource: """ if ds_id == "": return self.get_datasources(name=name)[0] - ds_response: Response[models.DataSource] = get_data_source.sync_detailed(client=self.client,data_source_id=ds_id) + ds_response: Response[models.DataSource] = get_data_source.sync_detailed( + client=self.client, data_source_id=ds_id + ) validate_response(ds_response) - return DataSource(model=ds_response.parsed,client=self.client) + return DataSource(model=ds_response.parsed, client=self.client) - def clear_project(self, project_id: str = "", name: str = "") : + def clear_project(self, project_id: str = "", name: str = ""): p = self.get_project(project_id=project_id, name=name) p.delete() @@ -283,12 +441,14 @@ def get_dataobject(self, do_id: str) -> DataObject: Returns: DataObject: the dataobject """ - do_response: Response[models.DataObject] = get_data_object.sync_detailed(client=self.client,data_object_id=do_id) + do_response: Response[models.DataObject] = get_data_object.sync_detailed( + client=self.client, data_object_id=do_id + ) validate_response(do_response) - return DataObject(model=do_response.parsed,client=self.client) + return DataObject(model=do_response.parsed, client=self.client) - def wait_ready(self,repeat: int = 50,sleep_seconds: int = 5): - ''' + def wait_ready(self, repeat: int = 50, sleep_seconds: int = 5): + """ wait_ready polls the API until it answers by using the get_projects() endpoint Args: @@ -297,14 +457,21 @@ def wait_ready(self,repeat: int = 50,sleep_seconds: int = 5): Raises: TimeoutError: if the API has not answered - ''' + """ num_tries = repeat sleep_time = sleep_seconds * time.second + last_ex = None for _ in range(num_tries): try: self.get_projects() return - except (ConnectionError,KeycloakConnectionError,InvalidResponseError): + except ( + ConnectionError, + KeycloakConnectionError, + KeycloakGetError, + InvalidResponseError, + ) as ex: time.sleep(sleep_time) + last_ex = ex continue - raise TimeoutError() + raise last_ex diff --git a/src/tuneinsight/client/local_data_selection.py b/src/tuneinsight/client/local_data_selection.py index bf2ded3..f995aff 100644 --- a/src/tuneinsight/client/local_data_selection.py +++ b/src/tuneinsight/client/local_data_selection.py @@ -1,4 +1,3 @@ - from typing import Callable from tuneinsight.api.sdk.models import LocalDataSelection as SelectionModel from tuneinsight.api.sdk.models import LocalDataSelectionDefinition as DefinitionModel @@ -6,49 +5,48 @@ from tuneinsight.computations.preprocessing import PreprocessingBuilder - class LocalDataSelection: - ''' + """ Represents a data selection it comprises of both data source and preprocessing parameters - ''' - + """ - update_func: Callable[[DefinitionModel],SelectionModel] + update_func: Callable[[DefinitionModel], SelectionModel] preprocessing: PreprocessingBuilder = None datasource: QueryBuilder = None description: str = "" name: str = "" - - def __init__(self, - update: Callable[[DefinitionModel],SelectionModel], - name: str = "", - description: str = ""): + def __init__( + self, + update: Callable[[DefinitionModel], SelectionModel], + name: str = "", + description: str = "", + ): self.preprocessing = PreprocessingBuilder() self.datasource = QueryBuilder() self.update_func = update self.name = name self.description = description - def _get_definition(self) -> DefinitionModel: - ''' + """ _get_definition returns the definition schema of the selection Returns: DefinitionModel: the schema definition - ''' - definition = DefinitionModel(data_selection = self.datasource.get_parameters(), - preprocessing= self.preprocessing.get_params()) + """ + definition = DefinitionModel( + data_selection=self.datasource.get_parameters(), + preprocessing=self.preprocessing.get_params(), + ) definition.name = self.name definition.description = self.description return definition - def save(self): - ''' + """ save saves the selection to the backend - ''' + """ definition = self._get_definition() self.update_func(definition) diff --git a/src/tuneinsight/client/models.py b/src/tuneinsight/client/models.py index 761bc74..805c917 100644 --- a/src/tuneinsight/client/models.py +++ b/src/tuneinsight/client/models.py @@ -1,81 +1,102 @@ -from typing import Any,List +from typing import Any, List from enum import Enum import numpy as np from tuneinsight.api.sdk import Client from tuneinsight.api.sdk.models import Model as APIModel -from tuneinsight.api.sdk.models import RegressionType,ModelDefinition,PredictionParams,ApproximationParams,EncryptedPrediction,ComputationType -from tuneinsight.api.sdk.models import SessionDefinition,Session,DataObjectType,KeyInfo -from tuneinsight.api.sdk.api.api_ml import get_model_list,get_model,post_model,delete_model +from tuneinsight.api.sdk.models import ( + RegressionType, + ModelDefinition, + PredictionParams, + ApproximationParams, + EncryptedPrediction, + ComputationType, +) +from tuneinsight.api.sdk.models import ( + SessionDefinition, + Session, + DataObjectType, + KeyInfo, +) +from tuneinsight.api.sdk.api.api_ml import ( + get_model_list, + get_model, + post_model, + delete_model, +) from tuneinsight.api.sdk.api.api_sessions import post_session from tuneinsight.client.validation import validate_response -from tuneinsight.cryptolib.cryptolib import new_hefloat_operator_from_b64_scheme_context,get_relin_key_bytes,encrypt_prediction_dataset,decrypt_prediction +from tuneinsight.cryptolib.cryptolib import ( + new_hefloat_operator_from_b64_scheme_context, + get_relin_key_bytes, + encrypt_prediction_dataset, + decrypt_prediction, +) from tuneinsight.client.dataobject import DataObject from tuneinsight.api.sdk.types import Response -from tuneinsight.utils.io import data_to_bytes,data_from_bytes +from tuneinsight.utils.io import data_to_bytes, data_from_bytes from tuneinsight.client.computations import ComputationRunner + class Type(Enum): - ''' + """ Type enumerates the different machine learning model types - ''' + """ + LINEAR = RegressionType.LINEAR LOGISTIC = RegressionType.LOGISTIC POISSON = RegressionType.POISSON class Model: - ''' + """ Represents a model stored on the agent - ''' + """ client: Client model: APIModel - - - def __init__(self,client: Client,model: APIModel): - ''' + def __init__(self, client: Client, model: APIModel): + """ __init__ initializes the model class given the client and API model Args: client (Client): the client used to communicate with the corresponding agent model (APIModel): the API model - ''' + """ self.client = client self.model = model - - - def __str__(self) ->str: - ''' + def __str__(self) -> str: + """ __str__ returns a string representation of the mode Returns: str: the model's string representation - ''' - return f'Model(name={self.model.name}, type={self.model.type}, training algorithm={self.model.training_algorithm})' - + """ + return f"Model(name={self.model.name}, type={self.model.type}, training algorithm={self.model.training_algorithm})" def delete(self): - ''' + """ delete deletes the model from the agent (can only be done by the model owner) - ''' - resp = delete_model.sync_detailed(client=self.client,model_id=self.model.model_id) + """ + resp = delete_model.sync_detailed( + client=self.client, model_id=self.model.model_id + ) validate_response(resp) - def refresh(self): - ''' + """ refresh refreshes this local model class with the data stored on the remote agent - ''' - resp: Response[APIModel] = get_model.sync_detailed(client=self.client,model_id=self.model.model_id) + """ + resp: Response[APIModel] = get_model.sync_detailed( + client=self.client, model_id=self.model.model_id + ) validate_response(resp) self.model = resp.parsed - - def compute_prediction(self,data: Any) -> np.ndarray: - ''' + def compute_prediction(self, data: Any) -> np.ndarray: + """ compute_prediction computes an encrypted prediction on the model given the dataset the dataset is first encrypted locally with ephemeral keys and then sent to the agent owning the model to compute the prediction homomorphically, the encrypted result is then @@ -86,7 +107,7 @@ def compute_prediction(self,data: Any) -> np.ndarray: Returns: np.ndarray: the decrypted predicted values - ''' + """ self.refresh() # Create a new session on the agent s_id = self._new_session() @@ -95,7 +116,7 @@ def compute_prediction(self,data: Any) -> np.ndarray: cs_id = self._upload_eval_keys(s_id) # encrypt and upload dataset - ct = self._encrypt_dataset(cs_id,data) + ct = self._encrypt_dataset(cs_id, data) input_id = self._upload_dataset(s_id, ct) # run the prediction computation @@ -105,69 +126,84 @@ def compute_prediction(self,data: Any) -> np.ndarray: return result_df - # Helpers for the prediction def _new_session(self) -> str: # Create a Session sess_def = SessionDefinition(params=self.model.model_params.cryptolib_params) - sess_resp: Response[Session] = post_session.sync_detailed(client=self.client,json_body=sess_def) + sess_resp: Response[Session] = post_session.sync_detailed( + client=self.client, json_body=sess_def + ) validate_response(sess_resp) s_id = sess_resp.parsed.id return s_id - - def _upload_eval_keys(self,s_id:str) -> bytes: + def _upload_eval_keys(self, s_id: str) -> bytes: # Load the parameters into a cryptosystem - cs_id = new_hefloat_operator_from_b64_scheme_context(str(self.model.model_params.cryptolib_params)) + cs_id = new_hefloat_operator_from_b64_scheme_context( + str(self.model.model_params.cryptolib_params) + ) # Generate and upload relinearization key rlk_bytes = get_relin_key_bytes(cs_id) key_info = KeyInfo(collective=False) do_type = DataObjectType.RLWE_RELINEARIZATION_KEY - DataObject.create(client=self.client, do_type=do_type,session_id=s_id,encrypted=False,key_info=key_info,data=rlk_bytes) + DataObject.create( + client=self.client, + do_type=do_type, + session_id=s_id, + encrypted=False, + key_info=key_info, + data=rlk_bytes, + ) return cs_id - def _encrypt_dataset(self,cs_id: bytes,data: Any) -> bytes: - csv_bytes = data_to_bytes(data,remove_header=True) - return encrypt_prediction_dataset(cs_id, csv_bytes, self.model.model_params.prediction_params, False) + def _encrypt_dataset(self, cs_id: bytes, data: Any) -> bytes: + csv_bytes = data_to_bytes(data, remove_header=True) + return encrypt_prediction_dataset( + cs_id, csv_bytes, self.model.model_params.prediction_params, False + ) - def _upload_dataset(self,s_id:str,ct: bytes) -> str: + def _upload_dataset(self, s_id: str, ct: bytes) -> str: do_type = DataObjectType.ENCRYPTED_PREDICTION_DATASET - data_object = DataObject.create(client=self.client,do_type=do_type,session_id=s_id,encrypted=True,data=ct) + data_object = DataObject.create( + client=self.client, + do_type=do_type, + session_id=s_id, + encrypted=True, + data=ct, + ) return data_object.model.unique_id - - def _run_prediction(self,input_id:str) -> bytes: - comp_runner = ComputationRunner(project_id="",client=self.client) + def _run_prediction(self, input_id: str) -> bytes: + comp_runner = ComputationRunner(project_id="", client=self.client) definition = EncryptedPrediction(type=ComputationType.ENCRYPTEDPREDICTION) definition.data = input_id definition.model = self.model.data_object.unique_id - results = comp_runner.run_computation(definition,local=True,keyswitch=False,decrypt=False) + results = comp_runner.run_computation( + definition, local=True, keyswitch=False, decrypt=False + ) return results[0].get_raw_data() @staticmethod - def _decrypt_prediction(cs_id: bytes,ct:bytes) -> np.ndarray: + def _decrypt_prediction(cs_id: bytes, ct: bytes) -> np.ndarray: result = decrypt_prediction(cs_id, ct) - return np.array(data_from_bytes(result,no_header=True)) - + return np.array(data_from_bytes(result, no_header=True)) class ModelManager: - ''' + """ Exposes useful methods for interacting with Tune Insight's Machine Learning Models API - ''' - + """ client: Client - - def __init__(self,client: Client): - ''' + def __init__(self, client: Client): + """ __init__ initializes itself given a valid API client Args: client (Client): the API client - ''' + """ self.client = client @staticmethod @@ -176,26 +212,28 @@ def _validate_weights(data: List[List[Any]]) -> List[List[float]]: new_data = [[float(element) for element in row] for row in data] return new_data except Exception as exception: - raise AttributeError("could not convert weights to valid float values") from exception - + raise AttributeError( + "could not convert weights to valid float values" + ) from exception def get_models(self) -> List[Model]: - ''' + """ get_models Returns the list of models stored on the agent Returns: List[Model]: the list of models - ''' - response: Response[List[APIModel]] = get_model_list.sync_detailed(client=self.client) + """ + response: Response[List[APIModel]] = get_model_list.sync_detailed( + client=self.client + ) validate_response(response) result = [] for model in response.parsed: - result.append(Model(client=self.client,model=model)) + result.append(Model(client=self.client, model=model)) return result - - def get_model(self,model_id: str= "",name: str = "") -> Model: - ''' + def get_model(self, model_id: str = "", name: str = "") -> Model: + """ get_model returns the model schema given either its model id or the common name, returning the first match found if the two arguments are non-empty. @@ -209,7 +247,7 @@ def get_model(self,model_id: str= "",name: str = "") -> Model: Returns: Model: _description_ - ''' + """ if model_id == "" and name == "": raise AttributeError("model id or name must be specified") models = self.get_models() @@ -222,8 +260,14 @@ def get_model(self,model_id: str= "",name: str = "") -> Model: return m raise NameError(f"model with id:{model_id} or name:{name} was not found") - def new_model(self,name: str,regression_type: Type,data: List[List[Any]],delete_if_exists: bool = False) -> Model: - ''' + def new_model( + self, + name: str, + regression_type: Type, + data: List[List[Any]], + delete_if_exists: bool = False, + ) -> Model: + """ new_model Uploads a new model to the agent Args: @@ -237,7 +281,7 @@ def new_model(self,name: str,regression_type: Type,data: List[List[Any]],delete_ Returns: Model: _description_ - ''' + """ models = self.get_models() for m in models: if name == m.model.name: @@ -249,7 +293,11 @@ def new_model(self,name: str,regression_type: Type,data: List[List[Any]],delete_ pred_params.regression_type = regression_type pred_params.approximation_params = ApproximationParams() weights = self._validate_weights(data) - model_definition = ModelDefinition(name=name,prediction_params=pred_params,weights=weights) - response: Response[APIModel] = post_model.sync_detailed(client=self.client,json_body=model_definition) + model_definition = ModelDefinition( + name=name, prediction_params=pred_params, weights=weights + ) + response: Response[APIModel] = post_model.sync_detailed( + client=self.client, json_body=model_definition + ) validate_response(response) return Model(self.client, response.parsed) diff --git a/src/tuneinsight/client/project.py b/src/tuneinsight/client/project.py index 7ebd6a7..41db926 100644 --- a/src/tuneinsight/client/project.py +++ b/src/tuneinsight/client/project.py @@ -1,8 +1,9 @@ from typing import List +from dateutil.parser import isoparse import attr import pandas as pd from IPython.display import display, HTML, Markdown -from tuneinsight.api.sdk.types import UNSET,Unset +from tuneinsight.api.sdk.types import UNSET, Unset from tuneinsight.api.sdk.types import Response from tuneinsight.api.sdk import models from tuneinsight.api.sdk.api.api_project import patch_project, post_project_computation @@ -10,14 +11,20 @@ from tuneinsight.api.sdk.api.api_project import delete_project from tuneinsight.api.sdk.api.api_datasource import get_data_source from tuneinsight.computations.enc_aggregation import EncryptedAggregation +from tuneinsight.computations.encrypted_mean import EncryptedMean from tuneinsight.computations.gwas import GWAS +from tuneinsight.computations.intersection import SetIntersection from tuneinsight.computations.survival_aggregation import SurvivalAggregation -from tuneinsight.computations.regression import LinearRegression, LogisticRegression, PoissonRegression +from tuneinsight.computations.regression import ( + LinearRegression, + LogisticRegression, + PoissonRegression, +) from tuneinsight.computations.cohort import Cohort from tuneinsight.computations.secure_join import SecureJoin from tuneinsight.computations.hybrid_fl import HybridFL from tuneinsight.computations.stats import DatasetStatistics -from tuneinsight.computations.policy import Policy,display_policy +from tuneinsight.computations.policy import Policy, display_policy from tuneinsight.computations.types import Type from tuneinsight.computations.dataset_schema import DatasetSchema from tuneinsight.client.validation import validate_response @@ -26,15 +33,17 @@ from tuneinsight.client.datasource import DataSource from tuneinsight.client.dataobject import DataObject from tuneinsight.client.local_data_selection import LocalDataSelection +from tuneinsight.api.sdk.api.api_computations import documentation @attr.s(auto_attribs=True) class Project: """ - Represents a project from the backend Agent + Represents a project saved in the Tune Insight instance """ - model: models.Project # The underlying model - client: UNSET # the client used to access the api + + model: models.Project # The underlying model + client: UNSET # the client used to access the api def get_id(self) -> str: """ @@ -65,18 +74,20 @@ def __str__(self): res += f" (organization: {org.name})" res += "\n" - if p.input_metadata != UNSET\ - and UNSET not in (p.input_metadata,p.input_metadata.tables)\ - and len(p.input_metadata.tables): + if ( + p.input_metadata != UNSET + and UNSET not in (p.input_metadata, p.input_metadata.tables) + and len(p.input_metadata.tables) + ): res += "\tinput tables :\n" tables: List[models.DataSourceTable] = p.input_metadata.tables for table in tables: - res += f'\t\ttable name: {table.name}\n' - res += '\t\tcolumns:\n' + res += f"\t\ttable name: {table.name}\n" + res += "\t\tcolumns:\n" cols: List[models.DataSourceColumn] = table.columns for col in cols: - res += f'\t\t\tname: {col.name}, type: {col.type} type group: {col.type_group}\n' - res += '\n' + res += f"\t\t\tname: {col.name}, type: {col.type} type group: {col.type_group}\n" + res += "\n" res += "\n" if self.model.computations != UNSET and len(self.model.computations) > 0: res += "computations: \n" @@ -94,6 +105,15 @@ def get_name(self) -> str: """ return self.model.name + def get_description(self) -> str: + """ + get_description returns the description of the project + + Returns: + str: the description of the project + """ + return self.model.description + def get_topology(self) -> str: """ get_topology returns the topology of the project @@ -104,6 +124,9 @@ def get_topology(self) -> str: return str(self.model.topology) def display_datasources(self): + """ + display_datasources lists the datasources linked to the project and the structure of their schemas. + """ self.refresh() participants = self.model.participants for p in participants: @@ -112,70 +135,99 @@ def display_datasources(self): for t in tables: # print(f"Table name: {t.name}") display(Markdown("#### Table name: " + "`" + str(t.name) + "`")) - data={'Column': [], 'Type': []} + data = {"Column": [], "Type": []} num_cols = len(t.columns) for i in range(num_cols): - data['Column'].append(t.columns[i].name) - data['Type'].append(t.columns[i].type) - df = pd.DataFrame(data['Type'], index=data['Column']).T + data["Column"].append(t.columns[i].name) + data["Type"].append(t.columns[i].type) + df = pd.DataFrame(data["Type"], index=data["Column"]).T display(HTML(df.to_html(index=False))) print("\n") - - def set_input_schema(self,schema: DatasetSchema): - ''' + def set_input_schema(self, schema: DatasetSchema): + """ set_input_schema sets an expected schema to enforce on the inputs. Args: schema (DatasetSchema): the schema definition - ''' + """ lds = self.local_data_selection() lds.preprocessing.schema = schema lds.save() def delete(self): - resp: Response[str] = delete_project.sync_detailed(client=self.client,project_id=self.get_id()) + """ + deletes the project from the backend + """ + resp: Response[str] = delete_project.sync_detailed( + client=self.client, project_id=self.get_id() + ) validate_response(response=resp) - - def refresh(self): """ refresh refreshes the project's model with its backend state """ - resp: Response[models.Project] = get_project.sync_detailed(client=self.client,project_id=self.get_id()) + resp: Response[models.Project] = get_project.sync_detailed( + client=self.client, project_id=self.get_id() + ) validate_response(response=resp) self.model = resp.parsed - - def query_datasource(self,query: str) -> pd.DataFrame: + def query_datasource(self, query: str) -> pd.DataFrame: ds = self.get_input_datasource() return ds.get_dataframe(query=query) - - def patch(self,proj_def: models.ProjectDefinition): + def patch(self, proj_def: models.ProjectDefinition): """ patch perform a patch operation on the project Args: proj_def (models.ProjectDefinition): the definition to patch with """ - resp: Response[models.Project] = patch_project.sync_detailed(client=self.client,project_id=self.get_id(),json_body=proj_def) + resp: Response[models.Project] = patch_project.sync_detailed( + client=self.client, project_id=self.get_id(), json_body=proj_def + ) validate_response(response=resp) self.model = resp.parsed def authorize(self): - self.patch(proj_def=models.ProjectDefinition(authorization_status=models.AuthorizationStatus.AUTHORIZED)) - + """ + authorize authorizes the project. + This means that other participants will be able to run collective computations with it. + """ + self.patch( + proj_def=models.ProjectDefinition( + authorization_status=models.AuthorizationStatus.AUTHORIZED + ) + ) def unauthorize(self): - self.patch(proj_def=models.ProjectDefinition(authorization_status=models.AuthorizationStatus.UNAUTHORIZED)) + """ + unauthorize sets the project to unauthorized. + This means that other participants will not be able to run collective computations with it. + """ + self.patch( + proj_def=models.ProjectDefinition( + authorization_status=models.AuthorizationStatus.UNAUTHORIZED + ) + ) + + def get_authorization_status(self) -> models.AuthorizationStatus: + """ + get_authorization_status returns the project's authorization status + + Returns: + models.AuthorizationStatus: the project's authorization status + """ + self.refresh() + return self.model.authorization_status def share(self): """ share shares the project with the network """ - proj_def = models.ProjectDefinition(shared=True,local=False) + proj_def = models.ProjectDefinition(shared=True, local=False) self.patch(proj_def=proj_def) def unshare(self): @@ -185,14 +237,28 @@ def unshare(self): proj_def = models.ProjectDefinition(shared=False) self.patch(proj_def=proj_def) - def set_computation(self,definition: models.ComputationDefinition): + def set_computation(self, definition: models.ComputationDefinition): """ set_computation Sets the project's current computation definition Args: definition (models.ComputationDefinition): the definition to apply """ - self.patch(proj_def=models.ProjectDefinition(computation_definition=definition,broadcast=True)) + self.patch( + proj_def=models.ProjectDefinition( + computation_definition=definition, broadcast=True + ) + ) + + def get_computation(self) -> models.ComputationDefinition: + """ + get_computation returns the project's current computation definition + + Returns: + models.ComputationDefinition: the project's current computation definition + """ + self.refresh() + return self.model.computation_definition def set_input_datasource_id(self, datasourceId: str): """ @@ -204,7 +270,7 @@ def set_input_datasource_id(self, datasourceId: str): proj_def = models.ProjectDefinition(data_source_id=datasourceId) self.patch(proj_def=proj_def) - def set_input_datasource(self,ds: DataSource): + def set_input_datasource(self, ds: DataSource): """ set_input_datasource sets the project's input datasource @@ -214,7 +280,6 @@ def set_input_datasource(self,ds: DataSource): proj_def = models.ProjectDefinition(data_source_id=ds.get_id()) self.patch(proj_def=proj_def) - def get_input_datasource(self) -> DataSource: """ get_input_datasource returns the datasource linked to the project @@ -228,9 +293,11 @@ def get_input_datasource(self) -> DataSource: self.refresh() if self.model.data_source_id == "": raise Exception("no data source set to project") - resp: Response[models.DataSource] = get_data_source.sync_detailed(data_source_id=self.model.data_source_id,client=self.client) + resp: Response[models.DataSource] = get_data_source.sync_detailed( + data_source_id=self.model.data_source_id, client=self.client + ) validate_response(resp) - return DataSource(model=resp.parsed,client=self.client) + return DataSource(model=resp.parsed, client=self.client) def get_runner(self) -> ComputationRunner: """ @@ -239,7 +306,7 @@ def get_runner(self) -> ComputationRunner: Returns: ComputationRunner: the computation runner set with the project id """ - return ComputationRunner(client=self.client,project_id=self.get_id()) + return ComputationRunner(client=self.client, project_id=self.get_id()) def get_participants(self) -> List[str]: """ @@ -250,8 +317,22 @@ def get_participants(self) -> List[str]: """ return [p.node.name for p in self.model.participants] + def get_authorized_users(self) -> List[str]: + """ + get_authorized_users returns the email addresses of the authorized users - def run_computation(self,comp: models.ComputationDefinition,local: bool=False,keyswitch: bool=True,decrypt: bool=True) -> List[DataObject]: + Returns: + List[str]: a list of the email addresses of the authorized users + """ + return self.model.authorized_users + + def run_computation( + self, + comp: models.ComputationDefinition, + local: bool = False, + keyswitch: bool = True, + decrypt: bool = True, + ) -> List[DataObject]: """ run_computation runs the given computation definition and returns the list of resulting dataobjects @@ -266,7 +347,9 @@ def run_computation(self,comp: models.ComputationDefinition,local: bool=False,ke """ runner = self.get_runner() comp.data_source_parameters = models.ComputationDataSourceParameters() - return runner.run_computation(comp=comp,local=local,keyswitch=keyswitch,decrypt=decrypt) + return runner.run_computation( + comp=comp, local=local, keyswitch=keyswitch, decrypt=decrypt + ) def run_project(self) -> models.Project: """Run the computation defined on the project @@ -274,15 +357,12 @@ def run_project(self) -> models.Project: Returns: models.Project: Project Computation Created """ - response : Response[models.Project] = post_project_computation.sync_detailed( - project_id=self.get_id(), - client=self.client, - json_body=None + response: Response[models.Project] = post_project_computation.sync_detailed( + project_id=self.get_id(), client=self.client, json_body=None ) validate_response(response) return response.parsed - def new_aggregation(self) -> Aggregation: """ new_aggregation returns a new Aggregation Computation which can be computed by running the project @@ -290,8 +370,7 @@ def new_aggregation(self) -> Aggregation: Returns: StatisticalAggregation: The aggregation computation """ - return Aggregation(client=self.client,project_id=self.get_id()) - + return Aggregation(client=self.client, project_id=self.get_id()) def new_enc_aggregation(self) -> EncryptedAggregation: """ @@ -300,8 +379,14 @@ def new_enc_aggregation(self) -> EncryptedAggregation: Returns: Aggregation: The aggregation computation """ - return EncryptedAggregation(client=self.client,project_id=self.get_id()) + return EncryptedAggregation(client=self.client, project_id=self.get_id()) + def new_enc_mean(self) -> EncryptedMean: + """ + new_enc_mean returns a new EncryptedMean computation runner. + + """ + return EncryptedMean(client=self.client, project_id=self.get_id()) def new_cohort(self) -> Cohort: """ @@ -310,7 +395,7 @@ def new_cohort(self) -> Cohort: Returns: Cohort: The cohort """ - return Cohort(client=self.client,project_id=self.get_id()) + return Cohort(client=self.client, project_id=self.get_id()) def new_gwas(self) -> GWAS: """ @@ -319,23 +404,36 @@ def new_gwas(self) -> GWAS: Returns: GWAS: The GWAS computation """ - return GWAS(client=self.client,project_id=self.get_id()) + return GWAS(client=self.client, project_id=self.get_id()) - def new_linear_regression(self, continuous_labels:bool = False) -> LinearRegression: + def new_linear_regression( + self, continuous_labels: bool = False + ) -> LinearRegression: """ new_linear_regression returns a new LinearRegression which can be computed by running the project Returns: LinearRegression: The linear regression computation """ - return LinearRegression(client=self.client,project_id=self.get_id(), continuous_labels=continuous_labels) + return LinearRegression( + client=self.client, + project_id=self.get_id(), + continuous_labels=continuous_labels, + ) - def new_logistic_regression(self, approximation_params:models.approximation_params.ApproximationParams=UNSET) -> LogisticRegression: + def new_logistic_regression( + self, + approximation_params: models.approximation_params.ApproximationParams = UNSET, + ) -> LogisticRegression: """ new_logistic_regression returns a new LogisticRegression which can be computed by running the project Returns: LogisticRegression: The logistic regression computation """ - return LogisticRegression(client=self.client,project_id=self.get_id(), approximation_params=approximation_params) + return LogisticRegression( + client=self.client, + project_id=self.get_id(), + approximation_params=approximation_params, + ) def new_poisson_regression(self) -> PoissonRegression: """ @@ -343,82 +441,89 @@ def new_poisson_regression(self) -> PoissonRegression: Returns: PoissonRegressor: The poisson regression computation """ - return PoissonRegression(client=self.client,project_id=self.get_id()) - + return PoissonRegression(client=self.client, project_id=self.get_id()) def new_survival_aggregation(self) -> SurvivalAggregation: """ - new_survival_aggregation returns a new SurvivalAggregation which can be computed by running the project - Returns: - SurvivalAggregation: The survival aggregation computation - """ - return SurvivalAggregation(client=self.client,project_id=self.get_id()) - + new_survival_aggregation returns a new SurvivalAggregation which can be computed by running the project + Returns: + SurvivalAggregation: The survival aggregation computation + """ + return SurvivalAggregation(client=self.client, project_id=self.get_id()) def new_secure_join(self) -> SecureJoin: - ''' + """ new_secure_join returns a new SecureJoin which can be computed by running the project Returns: SecureJoin: the secure join computation instance - ''' - return SecureJoin(client=self.client,project_id=self.get_id()) + """ + return SecureJoin(client=self.client, project_id=self.get_id()) def new_hybrid_fl(self) -> HybridFL: - ''' + """ new_hybrid_fl returns a new HybridFL which can be computed by running the project Returns: HybridFL: the hybrid federated learning computation instance - ''' + """ return HybridFL(client=self.client, project_id=self.get_id()) def new_statistics(self) -> DatasetStatistics: - ''' + """ new_statistics returns a new DatasetStatistics instance which can run statistics on the project Returns: DatasetStatistics: the dataset statistics computation instance - ''' - return DatasetStatistics(client=self.client,project_id=self.get_id()) + """ + return DatasetStatistics(client=self.client, project_id=self.get_id()) + def new_intersection(self) -> SetIntersection: + """ + new_intersection creates a new set intersection computation instance + + Returns: + SetIntersection: the set intersection computation instance + """ + return SetIntersection(client=self.client, project_id=self.get_id()) - def set_policy(self,policy: Policy): - ''' + def set_policy(self, policy: Policy): + """ set_policy sets the policy to the project Args: policy (Policy): the policy to add to the project - ''' + """ proj_def = models.ProjectDefinition(policy=policy) self.patch(proj_def=proj_def) - def display_policy(self,detailed:bool = False,show_queries: bool = False): - ''' + def display_policy(self, detailed: bool = False, show_queries: bool = False): + """ display_policy displays the policies associated to the project Args: detailed (bool, optional): shows additional policy details if set to true such as the json of the policy. Defaults to False. show_queries (bool, optional): shows the set of authorized SQL queries. Defaults to False. - ''' + """ policy = self.model.policy - display(Markdown(f'# {self.model.name} Policy')) + display(Markdown(f"# {self.model.name} Policy")) if policy is Unset: print("project has no policy") - if isinstance(policy,Unset): + if isinstance(policy, Unset): return - display_policy(policy,detailed=detailed,show_queries=show_queries) - + display_policy(policy, detailed=detailed, show_queries=show_queries) def local_data_selection(self) -> LocalDataSelection: - ''' + """ local_data_selection returns the local data selection settings for the project Returns: LocalDataSelection: the data selection settings that can be updated by the user - ''' + """ - def update_func(definition: models.LocalDataSelectionDefinition) -> models.LocalDataSelection: + def update_func( + definition: models.LocalDataSelectionDefinition, + ) -> models.LocalDataSelection: proj_def = models.ProjectDefinition() proj_def.local_data_selection_definition = definition self.patch(proj_def) @@ -429,15 +534,83 @@ def update_func(definition: models.LocalDataSelectionDefinition) -> models.Local self.refresh() return lds - def display_workflow(self): - ''' + """ display_workflow displays the workflow description for the project - ''' + """ self.refresh() display(Markdown(self.model.workflow_description)) + def set_computation_type(self, comp_type: Type): + """set_computation_type sets the computation type of the project's computation definition - def set_computation_type(self,comp_type: Type): + Args: + comp_type (Type): _description_ + """ definition = models.ComputationDefinition(type=comp_type) self.set_computation(definition) + + def set_contribution_status(self, contributes: bool): + """ + set_contribution_status sets the local contributing status of the instance. + If 'contributes' is set to false, then the instance will not contribute any data when + running computation. + + Args: + contributes (bool): whether the instance contributes data. + """ + proj_def = models.ProjectDefinition() + proj_def.non_contributor = not contributes + self.patch(proj_def=proj_def) + + def get_latest_measurements(self): + """ + get_latest_measurements returns a dictionary that contains the benchmarking measurement of the last computation + that was run in the project. The dictionary contains benchmarking information about the processing time and + memory allocation at each phase of the computation. + + Returns: + dict: a dictionary containing the measurements. + """ + self.refresh() + res = {} + latest_comp = None + comps: List[models.Computation] = self.model.computations + for comp in comps: + if comp.definition.type != models.ComputationType.COLLECTIVEKEYSWITCH: + latest_comp = comp + break + if latest_comp is None: + return res + measurements: List[models.Measurement] = latest_comp.measurements + for measure in measurements: + start_datetime = isoparse(measure.start[:-1]) + end_datetime = isoparse(measure.end[:-1]) + time_diff_seconds = (end_datetime - start_datetime).total_seconds() + measurement = { + "Processing time (seconds)": round(time_diff_seconds, 3), + "Memory allocated (bytes)": measure.allocated, + } + res[measure.name] = measurement + start_datetime = isoparse(latest_comp.started_at[:-1]) + end_datetime = isoparse(latest_comp.ended_at[:-1]) + time_diff_seconds = (end_datetime - start_datetime).total_seconds() + res["Total time"] = round(time_diff_seconds, 3) + return res + + def display_previous_workflow(self): + """ + display_previous_workflow displays (compatible with jupyter notebooks) a markdown workflow summary of the last computation + that was run in the project. + """ + self.refresh() + if len(self.model.computations) == 0: + display(Markdown("No computation have been run with this project")) + return + comp = self.model.computations[0].definition + response: Response[models.DocumentationResponse200] = ( + documentation.sync_detailed(client=self.client, json_body=comp) + ) + validate_response(response) + md_doc = response.parsed.description.replace("\n", "\n\n") + display(Markdown(md_doc)) diff --git a/src/tuneinsight/client/session.py b/src/tuneinsight/client/session.py index 429ec8a..48a64ed 100644 --- a/src/tuneinsight/client/session.py +++ b/src/tuneinsight/client/session.py @@ -1,17 +1,26 @@ import pandas as pd from tuneinsight.api.sdk import Client -from tuneinsight.api.sdk.models import SessionDefinition,DataObjectType,KeyInfo,PrivateSearchDatabase +from tuneinsight.api.sdk.models import ( + SessionDefinition, + DataObjectType, + KeyInfo, + PrivateSearchDatabase, +) from tuneinsight.api.sdk.models import Session as APISession from tuneinsight.client.validation import validate_response from tuneinsight.api.sdk.api.api_sessions import post_session from tuneinsight.api.sdk.types import Response -from tuneinsight.cryptolib.cryptolib import new_hefloat_operator_from_b64_scheme_context,get_relin_key_bytes,PIRContext +from tuneinsight.cryptolib.cryptolib import ( + new_hefloat_operator_from_b64_scheme_context, + get_relin_key_bytes, + PIRContext, +) from tuneinsight.client.dataobject import DataObject from tuneinsight.utils.io import data_from_bytes -class Session: - session_id : str +class Session: + session_id: str client: Client cryptolib_params: str @@ -27,7 +36,6 @@ def __init__(self, client: Client, cryptolib_params: str): self.cryptolib_params = cryptolib_params self.session_id = self._new_session() - def _new_session(self) -> str: """ Creates a new session @@ -36,7 +44,9 @@ def _new_session(self) -> str: str: id of the new session """ sess_def = SessionDefinition(params=self.cryptolib_params) - sess_resp: Response[APISession] = post_session.sync_detailed(client=self.client,json_body=sess_def) + sess_resp: Response[APISession] = post_session.sync_detailed( + client=self.client, json_body=sess_def + ) validate_response(sess_resp) s_id = sess_resp.parsed.id return s_id @@ -53,7 +63,14 @@ def upload_eval_keys(self) -> bytes: rlk_bytes = get_relin_key_bytes(cs_id) key_info = KeyInfo(collective=False) do_type = DataObjectType.RLWE_RELINEARIZATION_KEY - DataObject.create(client=self.client, do_type=do_type,session_id=self.session_id,encrypted=False,key_info=key_info,data=rlk_bytes) + DataObject.create( + client=self.client, + do_type=do_type, + session_id=self.session_id, + encrypted=False, + key_info=key_info, + data=rlk_bytes, + ) return cs_id def upload_data(self, data: bytes, do_type: DataObjectType) -> str: @@ -66,33 +83,40 @@ def upload_data(self, data: bytes, do_type: DataObjectType) -> str: Returns: str: id of the uploaded data object """ - data_object = DataObject.create(client=self.client,do_type=do_type,session_id=self.session_id,encrypted=True,data=data) + data_object = DataObject.create( + client=self.client, + do_type=do_type, + session_id=self.session_id, + encrypted=True, + data=data, + ) return data_object.model.unique_id class PIRSession(Session): - """Session for private information retrieval protocol - - """ + """Session for private information retrieval protocol""" ctx: PIRContext - def __init__(self, client, pir_db: PrivateSearchDatabase): - super().__init__(client,pir_db.cryptosystem_params) - self.ctx = PIRContext(pir_db.database_params,pir_db.database_index) - + super().__init__(client, pir_db.cryptosystem_params) + self.ctx = PIRContext(pir_db.database_params, pir_db.database_index) def upload_eval_keys(self): - """Upload evaluation keys to PIR session - """ + """Upload evaluation keys to PIR session""" evk = self.ctx.get_eva_key() key_info = KeyInfo(collective=False) do_type = DataObjectType.RLWE_MEM_EVALUATION_KEY_SET - DataObject.create(client=self.client, do_type=do_type,session_id=self.session_id,encrypted=False,key_info=key_info,data=evk) - - - def encrypt_query(self,query:str) -> str: + DataObject.create( + client=self.client, + do_type=do_type, + session_id=self.session_id, + encrypted=False, + key_info=key_info, + data=evk, + ) + + def encrypt_query(self, query: str) -> str: """Encrypt a private search query Args: @@ -104,8 +128,7 @@ def encrypt_query(self,query:str) -> str: query_data = self.ctx.encrypt_query(query) return self.upload_data(query_data, DataObjectType.ENCRYPTED_PIR_SEARCH) - - def decrypt_response(self,response:bytes) -> pd.DataFrame: + def decrypt_response(self, response: bytes) -> pd.DataFrame: """Decrypt a private search response Args: diff --git a/src/tuneinsight/client/validation.py b/src/tuneinsight/client/validation.py index b0834fc..2e1c2c3 100644 --- a/src/tuneinsight/client/validation.py +++ b/src/tuneinsight/client/validation.py @@ -1,4 +1,3 @@ - from tuneinsight.api.sdk.types import Response from tuneinsight.utils.errors import hidden_traceback_scope @@ -21,21 +20,22 @@ def validate_response(response: Response): class AuthorizationError(Exception): - ''' + """ AuthorizationError is the exception used when the response status code is 403 Args: Exception: the base exception class - ''' + """ - def __init__(self,response: Response): + def __init__(self, response: Response): pattern = '"message":"' content = str(response.content) ind = content.find(pattern) - content = content[ind + len(pattern):] - msg = content.split('"}',maxsplit=1)[0] + content = content[ind + len(pattern) :] + msg = content.split('"}', maxsplit=1)[0] super().__init__(msg) + class InvalidResponseError(Exception): """ InvalidResponseError represents an exception when the response status code is erroneous @@ -44,10 +44,13 @@ class InvalidResponseError(Exception): Exception: the base exception class """ - def __init__(self,response: Response): - message = f'Got Invalid Response with status code {response.status_code} and message {response.content}' - if b'when parsing token' in response.content or b'unsuccessful token validation' in response.content: + def __init__(self, response: Response): + message = f"Got Invalid Response with status code {response.status_code} and message {response.content}" + if ( + b"when parsing token" in response.content + or b"unsuccessful token validation" in response.content + ): message += "\n\nInvalid or expired token used. To obtain a valid token log in with your credentials at sdk.tuneinsight.com and insert the token in the static_token field of the sdk-config.yml file." - elif b'permission denied by the authorization provider' in response.content: + elif b"permission denied by the authorization provider" in response.content: message += "\n\nCheck credentials or token validity. To obtain a valid token log in with your credentials at sdk.tuneinsight.com and insert the token in the static_token field of the sdk-config.yml file." super().__init__(message) diff --git a/src/tuneinsight/computations/cohort.py b/src/tuneinsight/computations/cohort.py index eb98b44..4a083eb 100644 --- a/src/tuneinsight/computations/cohort.py +++ b/src/tuneinsight/computations/cohort.py @@ -10,12 +10,16 @@ class Cohort(ComputationRunner): - cohort_id: str = "" join_id: str = "" - def create_from_matching(self, matching_columns: List[str], result_format: models.SetIntersectionOutputFormat, local_input: models.LocalInput = None) -> List[DataObject]: - """ Create a cohort from matching columns + def create_from_matching( + self, + matching_columns: List[str], + result_format: models.SetIntersectionOutputFormat, + local_input: models.LocalInput = None, + ) -> List[DataObject]: + """Create a cohort from matching columns Args: matching_columns (List[str]): a list of column names to match on @@ -33,13 +37,15 @@ def create_from_matching(self, matching_columns: List[str], result_format: model if local_input: model.local_input = local_input - dataobjects = super().run_computation(comp=model,local=False,release=True) + dataobjects = super().run_computation(comp=model, local=False, release=True) self.cohort_id = dataobjects[0].get_id() return dataobjects @staticmethod - def get_psi_ratio(all_parties: List[str], dataobjects: List[DataObject]) -> pd.DataFrame: - """ Add a column to the PSI result indicating for each record the percentage of participants at which the record was observed + def get_psi_ratio( + all_parties: List[str], dataobjects: List[DataObject] + ) -> pd.DataFrame: + """Add a column to the PSI result indicating for each record the percentage of participants at which the record was observed Args: all_parties (List[str]): list of the names of the parties involved in the private set intersection @@ -51,18 +57,18 @@ def get_psi_ratio(all_parties: List[str], dataobjects: List[DataObject]) -> pd.D df = dataobjects[0].get_dataframe() num_parties = len(all_parties) percentages = [] - results = df.to_dict(orient='records') + results = df.to_dict(orient="records") for row in results: match = 0 for org in all_parties: - if org in row and row[org] == 'true': + if org in row and row[org] == "true": match += 1 - percentages.append(match/num_parties*100) - df['psi_ratio'] = pd.Series(percentages) + percentages.append(match / num_parties * 100) + df["psi_ratio"] = pd.Series(percentages) return df def create_from_join(self, target_columns: List[str], join_columns: List[str]): - """ Create a cohort from vertically partitioned data + """Create a cohort from vertically partitioned data Args: target_columns (List[str]): column names of target columns @@ -73,13 +79,14 @@ def create_from_join(self, target_columns: List[str], join_columns: List[str]): model.join_columns = join_columns model.project_id = self.project_id - dataobjects = super().run_computation(comp=model,local=False,keyswitch=False,decrypt=False) + dataobjects = super().run_computation( + comp=model, local=False, keyswitch=False, decrypt=False + ) self.join_id = dataobjects[0].get_id() - - def get_size(self,nodes: List[str]) -> pd.DataFrame: - """ Get the size of each participants dataset in the cohort + def get_size(self, nodes: List[str]) -> pd.DataFrame: + """Get the size of each participants dataset in the cohort Args: nodes (List[str]): list of participant names to get the dataset size of @@ -89,15 +96,15 @@ def get_size(self,nodes: List[str]) -> pd.DataFrame: """ agg = self.new_aggregation() for n in nodes: - agg.preprocessing.counts(output_column_name=n,nodes=[n]) - agg.preprocessing.select(columns=nodes,create_if_missing=True,dummy_value="0",nodes=[n]) - agg.preprocessing.select(columns=nodes,create_if_missing=True,dummy_value="0") + agg.preprocessing.counts(output_column_name=n, nodes=[n]) + agg.preprocessing.select( + columns=nodes, create_if_missing=True, dummy_value="0", nodes=[n] + ) + agg.preprocessing.select(columns=nodes, create_if_missing=True, dummy_value="0") return agg.get_aggregation() - - def new_aggregation(self) -> EncryptedAggregation: - """ Create an aggregation computation on the cohort + """Create an aggregation computation on the cohort Raises: Exception: if the cohort hasn't been created beforehand @@ -108,13 +115,15 @@ def new_aggregation(self) -> EncryptedAggregation: if self.cohort_id == "" and self.join_id == "": raise Exception("cohort must be created before running an aggregation") - aggregation = EncryptedAggregation(client=self.client, project_id=self.project_id) + aggregation = EncryptedAggregation( + client=self.client, project_id=self.project_id + ) aggregation.cohort_id = self.cohort_id aggregation.join_id = self.join_id return aggregation def new_gwas(self) -> GWAS: - """ Create an GWAS computation on the cohort + """Create an GWAS computation on the cohort Raises: Exception: if the cohort hasn't been created beforehand @@ -132,7 +141,7 @@ def new_gwas(self) -> GWAS: @staticmethod def plot_psi(x, y, title, x_label, y_label): - """ Plot the PSI results as a bar graph + """Plot the PSI results as a bar graph Args: x (_type_): x values diff --git a/src/tuneinsight/computations/dataset_schema.py b/src/tuneinsight/computations/dataset_schema.py index 86c08aa..e616a4c 100644 --- a/src/tuneinsight/computations/dataset_schema.py +++ b/src/tuneinsight/computations/dataset_schema.py @@ -1,37 +1,38 @@ -from typing import Dict,List,Any +from typing import Dict, List, Any from tuneinsight.api.sdk import models class DatasetSchema: - ''' + """ DatasetSchema represents a user-defined dataset schema that inputs must comply to - ''' + """ model: models.DatasetSchema - ''' + """ API model for the schema - ''' - cols: Dict[str,models.ColumnSchema] - ''' + """ + cols: Dict[str, models.ColumnSchema] + """ Dictionary from column names to column schema - ''' - - + """ def __init__(self): self.cols = {} self.model = models.DatasetSchema(columns=models.DatasetSchemaColumns()) self.model.columns.additional_properties = self.cols - - def drop_invalid(self,drop: bool = True): + def drop_invalid(self, drop: bool = True): self.model.drop_invalid_rows = drop - def add_column(self,name:str,dtype: str = None, - coerce: bool = False, - nullable: bool = False, - required: bool = True) -> models.ColumnSchema: - ''' + def add_column( + self, + name: str, + dtype: str = None, + coerce: bool = False, + nullable: bool = False, + required: bool = True, + ) -> models.ColumnSchema: + """ add_column creates a new column and adds it to the dataset schema Args: @@ -43,16 +44,16 @@ def add_column(self,name:str,dtype: str = None, Returns: ColumnSchema: the newly created column schema model - ''' - col = models.ColumnSchema(nullable=nullable,coerce=coerce,required=required) + """ + col = models.ColumnSchema(nullable=nullable, coerce=coerce, required=required) if dtype is not None: col.dtype = dtype col.checks = models.ColumnSchemaChecks() self.cols[name] = col return col - def get_column(self,name:str) -> models.ColumnSchema: - ''' + def get_column(self, name: str) -> models.ColumnSchema: + """ get_column returns the corresponding column schema Args: @@ -60,13 +61,13 @@ def get_column(self,name:str) -> models.ColumnSchema: Returns: models.ColumnSchema: the corresponding column schema - ''' + """ if name not in self.cols: return self.add_column(name=name) return self.cols[name] - def lt(self,name:str,val: Any): - ''' + def lt(self, name: str, val: Any): + """ lt requires values from the column to be less than 'val' Args: @@ -74,13 +75,13 @@ def lt(self,name:str,val: Any): val (Any): the upper bound value Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.checks.lt = val return self - def le(self,name:str,val: Any): - ''' + def le(self, name: str, val: Any): + """ le requires values from the column to be less than or equal to 'val' Args: @@ -88,13 +89,13 @@ def le(self,name:str,val: Any): val (Any): the upper bound value Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.checks.le = val return self - def eq(self,name:str,val: Any): - ''' + def eq(self, name: str, val: Any): + """ eq requires values from the column to be equal to 'val' Args: @@ -102,13 +103,13 @@ def eq(self,name:str,val: Any): val (Any): the value to compare with Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.checks.eq = val return self - def ge(self,name:str,val: Any): - ''' + def ge(self, name: str, val: Any): + """ ge requires values from the column to be greater or equal to 'val' Args: @@ -116,13 +117,13 @@ def ge(self,name:str,val: Any): val (Any): the lower bound value Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.checks.ge = val return self - def gt(self,name:str,val: Any): - ''' + def gt(self, name: str, val: Any): + """ gt requires values from the column to be greater than 'val' Args: @@ -130,13 +131,20 @@ def gt(self,name:str,val: Any): val (Any): the lower bound value Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.checks.gt = val return self - def in_range(self,name:str,min_value: float,max_value: float,include_min: bool = True,include_max: bool = True): - ''' + def in_range( + self, + name: str, + min_value: float, + max_value: float, + include_min: bool = True, + include_max: bool = True, + ): + """ in_range requires values from the column to be in a specified range Args: @@ -147,13 +155,18 @@ def in_range(self,name:str,min_value: float,max_value: float,include_min: bool = include_max (bool, optional): whether the maximum value is included in the range. Defaults to True. Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) - col.checks.in_range = models.ColumnSchemaChecksInRange(max_value=max_value,min_value=min_value,include_min=include_min,include_max=include_max) + col.checks.in_range = models.ColumnSchemaChecksInRange( + max_value=max_value, + min_value=min_value, + include_min=include_min, + include_max=include_max, + ) return self - def str_startswith(self,name:str,val:str): - ''' + def str_startswith(self, name: str, val: str): + """ str_startswith requires that all values from the column start with a specific substring Args: @@ -161,14 +174,13 @@ def str_startswith(self,name:str,val:str): val (str): the substring Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.checks.str_startswith = val return self - - def isin(self,name:str,vals: List[Any]): - ''' + def isin(self, name: str, vals: List[Any]): + """ isin requires that all values from the column are from a specified set of values Args: @@ -176,13 +188,13 @@ def isin(self,name:str,vals: List[Any]): vals (List[Any]): the specified set of values Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.checks.isin = vals return self - def notin(self,name:str,vals: List[Any]): - ''' + def notin(self, name: str, vals: List[Any]): + """ notin requires that all values from the column are excluded from specified set of values Args: @@ -190,13 +202,13 @@ def notin(self,name:str,vals: List[Any]): vals (List[Any]): the set of values to exclude Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.checks.isin = vals return self - def required(self,name:str,required: bool): - ''' + def required(self, name: str, required: bool): + """ required sets a column as required or optional Args: @@ -204,13 +216,13 @@ def required(self,name:str,required: bool): required (bool): whether the column is required Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.required = required return self - def dtype(self,name:str, dtype: str): - ''' + def dtype(self, name: str, dtype: str): + """ dtype sets the required data type of the column Args: @@ -218,13 +230,13 @@ def dtype(self,name:str, dtype: str): dtype (str): the required data type Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.dtype = dtype return self - def nullable(self,name:str, nullable: bool): - ''' + def nullable(self, name: str, nullable: bool): + """ dtype sets the nullable status of the column Args: @@ -232,13 +244,13 @@ def nullable(self,name:str, nullable: bool): nullable (bool): whether the column is nullable Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.nullable = nullable return self - def coerce(self,name:str, coerce: bool): - ''' + def coerce(self, name: str, coerce: bool): + """ dtype sets the coerce value of the column Args: @@ -246,7 +258,7 @@ def coerce(self,name:str, coerce: bool): coerce (bool): whether the validator should coerce invalid types Returns: self (DatasetSchema): the updated schema - ''' + """ col = self.get_column(name) col.coerce = coerce return self diff --git a/src/tuneinsight/computations/enc_aggregation.py b/src/tuneinsight/computations/enc_aggregation.py index ae80c6a..f992a98 100644 --- a/src/tuneinsight/computations/enc_aggregation.py +++ b/src/tuneinsight/computations/enc_aggregation.py @@ -6,8 +6,10 @@ from tuneinsight.utils.plots import style_plot -def revert_quantiles(quantiles: List[float],n: int,min_v: float,max_v: float) -> List[float]: - ''' +def revert_quantiles( + quantiles: List[float], n: int, min_v: float, max_v: float +) -> List[float]: + """ revert_quantiles reverts the averaged/normalized quantiles returned by the aggregation to their original value Args: @@ -18,29 +20,30 @@ def revert_quantiles(quantiles: List[float],n: int,min_v: float,max_v: float) -> Returns: List[float]: the reverted quantiles list - ''' + """ res = [] for q in quantiles: res.append(((q / n) * (max_v - min_v) + min_v)) return res + class EncryptedAggregation(ComputationRunner): - ''' + """ EncryptedAggregation Represents the encrypted aggregation computation Args: ComputationRunner: Inherits all methods available from the computation runner parent class - ''' - + """ cohort_id: str = "" join_id: str = "" float_precision: int = 2 selected_cols: List[str] = None - def get_model(self) -> models.EncryptedAggregation: - model = models.EncryptedAggregation(type=models.ComputationType.ENCRYPTEDAGGREGATION) + model = models.EncryptedAggregation( + type=models.ComputationType.ENCRYPTEDAGGREGATION + ) model.project_id = self.project_id model.cohort_id = self.cohort_id model.join_id = self.join_id @@ -48,9 +51,8 @@ def get_model(self) -> models.EncryptedAggregation: model.aggregate_columns = self.selected_cols return model - def get_aggregation(self, local: bool = False) -> pd.DataFrame: - ''' + """ get_aggregation computes the encrypted aggregation computation and returns the decrypted results as a dataframe Args: @@ -58,23 +60,24 @@ def get_aggregation(self, local: bool = False) -> pd.DataFrame: Returns: pd.DataFrame: the decrypted results as a dataframe - ''' + """ model = self.get_model() - dataobjects = super().run_computation(comp=model,local=local,release=True) + dataobjects = super().run_computation(comp=model, local=local, release=True) result = dataobjects[0].get_float_matrix() totals = result.data[0] rounded_totals = [round(v, self.float_precision) for v in totals] if len(result.columns) == len(rounded_totals): - data = {'Column': result.columns, 'Total': rounded_totals} + data = {"Column": result.columns, "Total": rounded_totals} else: data = rounded_totals return pd.DataFrame(data) - - def get_averaged_quantiles(self,column:str,min_v:float =0,max_v: float = 200,local: bool = False) -> pd.DataFrame: - ''' + def get_averaged_quantiles( + self, column: str, min_v: float = 0, max_v: float = 200, local: bool = False + ) -> pd.DataFrame: + """ get_averaged_quantiles computes the averaged quantiles over all participants computed by aggregating (N * normalize_min_max(q_i,min,max)) across all participants, where N is the number of local data points and q_i is the i'th quantile. @@ -86,22 +89,27 @@ def get_averaged_quantiles(self,column:str,min_v:float =0,max_v: float = 200,loc Returns: pd.DataFrame: a dataframe with one row recording all quantiles and the total number of data points - ''' - self.preprocessing.quantiles(column,min_v,max_v) + """ + self.preprocessing.quantiles(column, min_v, max_v) df = self.get_aggregation(local=local) quantiles = list(df.Total)[1:] n = list(df.Total)[0] new_row = [n] - new_row.extend(revert_quantiles(quantiles,n,min_v,max_v)) - cols = ['n'] - cols.extend([f'q{i}' for i in range(len(quantiles))]) + new_row.extend(revert_quantiles(quantiles, n, min_v, max_v)) + cols = ["n"] + cols.extend([f"q{i}" for i in range(len(quantiles))]) rounded_values = [round(v, self.float_precision) for v in new_row] - return pd.DataFrame(data=[rounded_values],columns=cols) - + return pd.DataFrame(data=[rounded_values], columns=cols) @staticmethod - def plot_aggregation(result:pd.DataFrame, title:str, x_label:str, y_label:str, size:tuple=(8,4)): - ''' + def plot_aggregation( + result: pd.DataFrame, + title: str, + x_label: str, + y_label: str, + size: tuple = (8, 4), + ): + """ plot_aggregation plots the results of the aggregation as a histogram Args: @@ -110,7 +118,7 @@ def plot_aggregation(result:pd.DataFrame, title:str, x_label:str, y_label:str, s x_label (str): the x label to give to the plot y_label (str): the y label to give to the plot size (tuple, optional): the sizing of the plot. Defaults to (8,4). - ''' + """ plt.style.use("bmh") fig, ax = plt.subplots() @@ -123,10 +131,8 @@ def plot_aggregation(result:pd.DataFrame, title:str, x_label:str, y_label:str, s plt.show() - - def display_workflow(self): - ''' + """ display_workflow displays the workflow of the encrypted aggregation - ''' + """ return super().display_documentation(self.get_model()) diff --git a/src/tuneinsight/computations/encrypted_mean.py b/src/tuneinsight/computations/encrypted_mean.py new file mode 100644 index 0000000..c29edf7 --- /dev/null +++ b/src/tuneinsight/computations/encrypted_mean.py @@ -0,0 +1,54 @@ +from typing import Union, List +import pandas as pd +from tuneinsight.api.sdk import models +from tuneinsight.client.computations import ComputationRunner +from tuneinsight.api.sdk.types import UNSET, Unset + + +class EncryptedMean(ComputationRunner): + """ + Computes the mean and standard deviation of a list of numbers, removes outliers, then returns the mean without outliers. + + """ + + participant: Union[Unset, str] = UNSET + variables: Union[Unset, List[str]] = UNSET + grouping_keys: Union[Unset, List[str]] = UNSET + min_participants: Union[Unset, int] = 5 + outlier_threshold: Union[Unset, float] = 2 + + def __init__(self, project_id: str = "", client=UNSET): + super().__init__(project_id=project_id, client=client) + + def get_model(self) -> models.EncryptedMean: + """ + get_model initializes the computation definition given the parameters of this class + + Returns: + models.EncryptedMean: the API model for the computation definition + """ + model = models.EncryptedMean(type=models.ComputationType.ENCRYPTEDMEAN) + model.project_id = self.project_id + model.participant = self.participant + model.grouping_keys = self.grouping_keys + model.min_participants = self.min_participants + model.outlier_threshold = self.outlier_threshold + model.variables = self.variables + return model + + def compute_average(self, local: bool = False) -> pd.DataFrame: + """ + compute_average runs the secure average computation. + + Args: + local (bool, optional): defines whether the computation is run locally or collectively. Defaults to False. + + Returns: + pd.DataFrame: the resulting dataset that contains all of the averages + """ + model = self.get_model() + return ( + super() + .run_computation(comp=model, local=local, release=True)[0] + .get_dataframe() + ) diff --git a/src/tuneinsight/computations/errors.py b/src/tuneinsight/computations/errors.py index 039c2b5..235b772 100644 --- a/src/tuneinsight/computations/errors.py +++ b/src/tuneinsight/computations/errors.py @@ -3,62 +3,69 @@ from tuneinsight.api.sdk.models import ComputationError from tuneinsight.api.sdk.models import ComputationErrorType as ErrorType + class DisclosurePreventionError(Exception): - ''' + """ DisclosurePreventionError Exception class for the disclosure prevention error - ''' + """ def __init__(self, comp_error: ComputationError): - self.message = format_computation_error("computation aborted",comp_error) + self.message = format_computation_error("computation aborted", comp_error) super().__init__(self.message) class PreprocessingError(Exception): - ''' + """ PreprocessingError is used to represent error that happen during preprocessing - ''' + """ + def __init__(self, comp_error: ComputationError): - self.message = format_computation_error("error while preprocessing",comp_error) + self.message = format_computation_error("error while preprocessing", comp_error) super().__init__(self.message) class QueryError(Exception): - def __init__(self, comp_error: ComputationError): - self.message = format_computation_error("error while querying data",comp_error) + self.message = format_computation_error("error while querying data", comp_error) super().__init__(self.message) class InternalError(Exception): - ''' + """ InternalError is used represent unexpected errors that happened internally in the computation - ''' + """ + def __init__(self, comp_error: ComputationError): - self.message = format_computation_error("error happened internally",comp_error) + self.message = format_computation_error("error happened internally", comp_error) super().__init__(self.message) + class ValidationError(Exception): - ''' + """ ValidationError is used represent unexpected errors that happened while validating the data or user-defined parameters - ''' + """ + def __init__(self, comp_error: ComputationError): - self.message = format_computation_error("error while validating inputs",comp_error) + self.message = format_computation_error( + "error while validating inputs", comp_error + ) super().__init__(self.message) error_types = { - ErrorType.DISCLOSUREPREVENTION : DisclosurePreventionError, + ErrorType.DISCLOSUREPREVENTION: DisclosurePreventionError, ErrorType.INTERNAL: InternalError, ErrorType.PREPROCESSING: PreprocessingError, ErrorType.QUERY: QueryError, ErrorType.VALIDATION: ValidationError, } -''' +""" Mapping from computation error type to the appropriate exception -''' +""" + -def format_computation_error(prefix: str,error: ComputationError) -> str: - ''' +def format_computation_error(prefix: str, error: ComputationError) -> str: + """ format_computation_error formats the computation error to a string that is displayed to the user Args: @@ -67,12 +74,12 @@ def format_computation_error(prefix: str,error: ComputationError) -> str: Returns: str: the formatted error - ''' - return f'{prefix}: {error.message}' + """ + return f"{prefix}: {error.message}" def raise_computation_error(errors: List[ComputationError]): - ''' + """ raise_computation_error raises the appropriate given the list of errors from the computation and suppresses any traceback Args: @@ -81,12 +88,12 @@ def raise_computation_error(errors: List[ComputationError]): Raises: exc: the appropriate computation error when a common pattern is detected ComputationError: the default computation error when no common pattern is detected - ''' + """ with hidden_traceback_scope(): if len(errors) > 0: # Only take the first error err = errors[0] if err.type in error_types: exc = error_types[err.type] - raise exc(comp_error = err) + raise exc(comp_error=err) raise InternalError(comp_error=err) diff --git a/src/tuneinsight/computations/gwas.py b/src/tuneinsight/computations/gwas.py index 21e0b52..8f7d4c7 100644 --- a/src/tuneinsight/computations/gwas.py +++ b/src/tuneinsight/computations/gwas.py @@ -9,18 +9,25 @@ class GWAS(ComputationRunner): - """ Computation for a Genome-Wide Association Study (GWAS). + """Computation for a Genome-Wide Association Study (GWAS). Args: ComputationRunner (ComputationRunner): parent class for running computation through the REST API. """ - cohort_id: str = UNSET join_id: str = UNSET - def linear_regression(self, target_label:str = UNSET, variants_organization:str = UNSET, matching_params:models.MatchingParams = UNSET, covariates:List[str] = UNSET, locus_range:models.LocusRange = UNSET, local: bool = False) -> pd.DataFrame: - """ Run a linear regression for the GWAS. + def linear_regression( + self, + target_label: str = UNSET, + variants_organization: str = UNSET, + matching_params: models.MatchingParams = UNSET, + covariates: List[str] = UNSET, + locus_range: models.LocusRange = UNSET, + local: bool = False, + ) -> pd.DataFrame: + """Run a linear regression for the GWAS. Args: target_label (str, optional): name of the column containing the phenotypical trait to study. Defaults to UNSET. @@ -45,12 +52,14 @@ def linear_regression(self, target_label:str = UNSET, variants_organization:str model.timeout = 500 # self.max_timeout = 5 * self.max_timeout - dataobjects = super().run_computation(comp=model,local=local,keyswitch= not local,decrypt=True) + dataobjects = super().run_computation( + comp=model, local=local, keyswitch=not local, decrypt=True + ) result = dataobjects[0].get_float_matrix() p_values = result.data[0] if len(result.columns) == len(p_values): - data = {'locus': result.columns, 'p_value': p_values} + data = {"locus": result.columns, "p_value": p_values} else: data = p_values @@ -58,38 +67,63 @@ def linear_regression(self, target_label:str = UNSET, variants_organization:str @staticmethod def plot_manhattan(p_values: pd.DataFrame): - """ Display the GWAS result as a manhattan plot. + """Display the GWAS result as a manhattan plot. Args: p_values (pd.DataFrame): DataFrame containing p-values. """ # Transform data for plot - p_values = p_values[['locus', 'p_value']] - p_values['chromosome'] = p_values[['locus']].applymap(lambda x: x.split(':')[0]) - p_values['minuslog10pvalue'] = -np.log10(p_values.p_value) - p_values.chromosome = p_values.chromosome.astype('category') - p_values['ind'] = range(len(p_values)) - p_grouped = p_values.groupby(('chromosome')) - + p_values = p_values[["locus", "p_value"]] + p_values["chromosome"] = p_values[["locus"]].applymap(lambda x: x.split(":")[0]) + p_values["minuslog10pvalue"] = -np.log10(p_values.p_value) + p_values.chromosome = p_values.chromosome.astype("category") + p_values["ind"] = range(len(p_values)) + p_grouped = p_values.groupby(("chromosome")) plt.style.use("bmh") fig, ax = plt.subplots() - - colors = ['#348ABD', '#A60628', '#7A68A6', '#467821', '#D55E00', '#CC79A7', '#56B4E9', '#009E73','#F0E442'] + colors = [ + "#348ABD", + "#A60628", + "#7A68A6", + "#467821", + "#D55E00", + "#CC79A7", + "#56B4E9", + "#009E73", + "#F0E442", + ] x_labels = [] x_labels_pos = [] for num, (name, group) in enumerate(p_grouped): - group.plot(kind='scatter', x='ind', y='minuslog10pvalue',color=colors[num % len(colors)], ax=ax) + group.plot( + kind="scatter", + x="ind", + y="minuslog10pvalue", + color=colors[num % len(colors)], + ax=ax, + ) x_labels.append(name) - x_labels_pos.append((group['ind'].iloc[-1] - (group['ind'].iloc[-1] - group['ind'].iloc[0])/2)) + x_labels_pos.append( + ( + group["ind"].iloc[-1] + - (group["ind"].iloc[-1] - group["ind"].iloc[0]) / 2 + ) + ) ax.set_xticks(x_labels_pos) ax.set_xticklabels(x_labels) ax.set_xlim([0, len(p_values)]) - style_plot(axis=ax, fig=fig, title="Manhattan Plot for GWAS", x_label='Chromosome', y_label="P-value (-log10 scale)") + style_plot( + axis=ax, + fig=fig, + title="Manhattan Plot for GWAS", + x_label="Chromosome", + y_label="P-value (-log10 scale)", + ) plt.show() diff --git a/src/tuneinsight/computations/hybrid_fl.py b/src/tuneinsight/computations/hybrid_fl.py index c3e4d2b..f9b714a 100644 --- a/src/tuneinsight/computations/hybrid_fl.py +++ b/src/tuneinsight/computations/hybrid_fl.py @@ -6,18 +6,19 @@ from matplotlib.ticker import MaxNLocator from tuneinsight.api.sdk.api.api_dataobject import get_data_object from tuneinsight.api.sdk import models -from tuneinsight.api.sdk.types import Response +from tuneinsight.api.sdk.types import Response from tuneinsight.client.validation import validate_response from tuneinsight.client.dataobject import DataObject from tuneinsight.client.computations import ComputationRunner + class HybridFL(ComputationRunner): def create_from_params( - self, - task_id: str, - learning_params: models.HybridFLLearningParams, - task_def: Optional[Dict[str, Union[str, int, float]]] = None, - ): + self, + task_id: str, + learning_params: models.HybridFLLearningParams, + task_def: Optional[Dict[str, Union[str, int, float]]] = None, + ): model = models.HybridFL(type=models.ComputationType.HYBRIDFL) model.task_id = task_id model.learning_params = learning_params @@ -28,10 +29,7 @@ def create_from_params( model.project_id = self.project_id dataobjects = super().run_computation( - comp = model, - local = False, - keyswitch = False, - decrypt = False + comp=model, local=False, keyswitch=False, decrypt=False ) return dataobjects @@ -41,36 +39,52 @@ def get_client_result(self, client) -> List[DataObject]: computation = client.get_project(self.project_id).model.computations[0] for result in computation.results: - response: Response[models.DataObject] = get_data_object.sync_detailed(client = client.client, data_object_id = result) + response: Response[models.DataObject] = get_data_object.sync_detailed( + client=client.client, data_object_id=result + ) validate_response(response) - results.append(DataObject(model=response.parsed, client = client.client)) + results.append(DataObject(model=response.parsed, client=client.client)) return results @staticmethod - def get_results(history, local_only = False): + def get_results(history, local_only=False): history = deepcopy(history) format_history(history) - train_metrics = history.metrics['train'] if local_only else history.init_metrics['train'] - test_metrics = history.metrics['val'] if local_only else history.init_metrics['val'] + train_metrics = ( + history.metrics["train"] if local_only else history.init_metrics["train"] + ) + test_metrics = ( + history.metrics["val"] if local_only else history.init_metrics["val"] + ) for key, value in train_metrics.items(): - train_metrics[key] = round(value[-1][-1] or -1, 4) if local_only else round(value[-1] or -1, 4) + train_metrics[key] = ( + round(value[-1][-1] or -1, 4) + if local_only + else round(value[-1] or -1, 4) + ) for key, value in test_metrics.items(): - test_metrics[key] = round(value[-1][-1] or -1, 4) if local_only else round(value[-1] or -1, 4) + test_metrics[key] = ( + round(value[-1][-1] or -1, 4) + if local_only + else round(value[-1] or -1, 4) + ) return train_metrics, test_metrics - def display_results(self, history, local_only = False, metrics_to_display = ('acc',)): + def display_results(self, history, local_only=False, metrics_to_display=("acc",)): history = deepcopy(history) format_history(history) - self.plot_timeline(history, local_only = local_only, metrics_to_display = metrics_to_display) + self.plot_timeline( + history, local_only=local_only, metrics_to_display=metrics_to_display + ) @staticmethod - def plot_timeline(history, local_only, metrics_to_display = ('acc',)): + def plot_timeline(history, local_only, metrics_to_display=("acc",)): n_plots = len(metrics_to_display) - _, ax = plt.subplots(n_plots, 1, figsize=(20, 4*n_plots)) + _, ax = plt.subplots(n_plots, 1, figsize=(20, 4 * n_plots)) ax = [ax] if n_plots == 1 else ax last_idx = -1 if local_only else None @@ -79,92 +93,161 @@ def plot_timeline(history, local_only, metrics_to_display = ('acc',)): start_timestamps = [x for x in history.start_timestamps if x is not None] end_timestamps = [x for x in history.end_timestamps if x is not None] - init_train_metrics = history.init_metrics['train'] - train_metrics = history.metrics['train'] - init_test_metrics = history.init_metrics['val'] - test_metrics = history.metrics['val'] - - agg_durations = [datetime.fromtimestamp(end/1000.0) - datetime.fromtimestamp(starts[0]/1000.0) for (starts, end) in zip(end_timestamps, init_timestamps[1:])] - local_epochs_durations = [datetime.fromtimestamp(ends[-1]/1000.0) - datetime.fromtimestamp(start/1000.0) for (start, ends) in zip(init_timestamps, end_timestamps)] + init_train_metrics = history.init_metrics["train"] + train_metrics = history.metrics["train"] + init_test_metrics = history.init_metrics["val"] + test_metrics = history.metrics["val"] + + agg_durations = [ + datetime.fromtimestamp(end / 1000.0) + - datetime.fromtimestamp(starts[0] / 1000.0) + for (starts, end) in zip(end_timestamps, init_timestamps[1:]) + ] + local_epochs_durations = [ + datetime.fromtimestamp(ends[-1] / 1000.0) + - datetime.fromtimestamp(start / 1000.0) + for (start, ends) in zip(init_timestamps, end_timestamps) + ] aggregation_width = 1 epochs = [list(range(len(x) + 1)) for x in start_timestamps] - flat_epochs = [x+i*(len(sublist)-1 + aggregation_width) for i, sublist in enumerate(epochs) for x in sublist] - flat_epochs = flat_epochs + [flat_epochs[-1]+aggregation_width] # adds last aggregation + flat_epochs = [ + x + i * (len(sublist) - 1 + aggregation_width) + for i, sublist in enumerate(epochs) + for x in sublist + ] + flat_epochs = flat_epochs + [ + flat_epochs[-1] + aggregation_width + ] # adds last aggregation x_points = flat_epochs merged_train_metrics = {} merged_test_metrics = {} for key in metrics_to_display: - merged_train_metrics[key] = [x for init, sub in zip(init_train_metrics[key], train_metrics[key]) for x in [init] + sub] + [init_train_metrics[key][-1]] - if key == 'acc': - merged_train_metrics[key] = [100*x for x in merged_train_metrics[key]] + merged_train_metrics[key] = [ + x + for init, sub in zip(init_train_metrics[key], train_metrics[key]) + for x in [init] + sub + ] + [init_train_metrics[key][-1]] + if key == "acc": + merged_train_metrics[key] = [100 * x for x in merged_train_metrics[key]] for key in metrics_to_display: - merged_test_metrics[key] = [x for init, sub in zip(init_test_metrics[key], test_metrics[key]) for x in [init] + sub] + [init_test_metrics[key][-1]] - if key == 'acc': - merged_test_metrics[key] = [100*x for x in merged_test_metrics[key]] + merged_test_metrics[key] = [ + x + for init, sub in zip(init_test_metrics[key], test_metrics[key]) + for x in [init] + sub + ] + [init_test_metrics[key][-1]] + if key == "acc": + merged_test_metrics[key] = [100 * x for x in merged_test_metrics[key]] limits = {} for key in metrics_to_display: - if key == 'loss': - max_val = max([x or 0 for x in merged_train_metrics[key][:last_idx] + merged_test_metrics[key][:last_idx]]) - margin = abs(max_val)*1/10 - limits[key] = (-margin, max_val+margin) - elif key == 'acc': + if key == "loss": + max_val = max( + x or 0 + for x in merged_train_metrics[key][:last_idx] + + merged_test_metrics[key][:last_idx] + ) + margin = abs(max_val) * 1 / 10 + limits[key] = (-margin, max_val + margin) + elif key == "acc": limits[key] = (-10, 110) - elif key in ['f1', 'auroc']: + elif key in ["f1", "auroc"]: limits[key] = (-0.1, 1.1) else: - min_val = min(merged_train_metrics[key][:last_idx] + merged_test_metrics[key][:last_idx]) - max_val = max(merged_train_metrics[key][:last_idx] + merged_test_metrics[key][:last_idx]) - margin = max(abs(max_val), abs(min_val))*1/10 - limits[key] = (min_val-margin, max_val+margin) + min_val = min( + merged_train_metrics[key][:last_idx] + + merged_test_metrics[key][:last_idx] + ) + max_val = max( + merged_train_metrics[key][:last_idx] + + merged_test_metrics[key][:last_idx] + ) + margin = max(abs(max_val), abs(min_val)) * 1 / 10 + limits[key] = (min_val - margin, max_val + margin) for i, metric in enumerate(metrics_to_display): - plot_axis(ax[i], x_points[:last_idx], merged_train_metrics[metric][:last_idx], merged_test_metrics[metric][:last_idx], limits[metric], epochs, aggregation_width, format_metric_name(metric), local_only) + plot_axis( + ax[i], + x_points[:last_idx], + merged_train_metrics[metric][:last_idx], + merged_test_metrics[metric][:last_idx], + limits[metric], + epochs, + aggregation_width, + format_metric_name(metric), + local_only, + ) # Display time information below graph - add_text(epochs, aggregation_width, agg_durations, local_epochs_durations, local_only) + add_text( + epochs, aggregation_width, agg_durations, local_epochs_durations, local_only + ) -def add_text(epochs, aggregation_width, agg_durations, local_epochs_durations, local_only): + +def add_text( + epochs, aggregation_width, agg_durations, local_epochs_durations, local_only +): text_margin = -0.2 for i, epoch in enumerate(epochs): - n_epochs = len(epoch)-1 - epoch_start = i*(n_epochs+aggregation_width) - epoch_end = i*(n_epochs+aggregation_width) + n_epochs - epochs_list = ', '.join(list(map(str, range(i*n_epochs+1, i*n_epochs+n_epochs+1)))) - text = f'Epochs {epochs_list}\n{local_epochs_durations[i].seconds} sec.' - plt.text((epoch_start+epoch_end)/2+0.2, text_margin, text, rotation=90, fontsize=14, ha='right', va='top') + n_epochs = len(epoch) - 1 + epoch_start = i * (n_epochs + aggregation_width) + epoch_end = i * (n_epochs + aggregation_width) + n_epochs + epochs_list = ", ".join( + list(map(str, range(i * n_epochs + 1, i * n_epochs + n_epochs + 1))) + ) + text = f"Epochs {epochs_list}\n{local_epochs_durations[i].seconds} sec." + plt.text( + (epoch_start + epoch_end) / 2 + 0.2, + text_margin, + text, + rotation=90, + fontsize=14, + ha="right", + va="top", + ) if len(agg_durations) > i and not local_only: - agg_start = len(epoch)-1+i*(len(epoch)-1+aggregation_width) + agg_start = len(epoch) - 1 + i * (len(epoch) - 1 + aggregation_width) agg_seconds = agg_durations[i].seconds - text = f'Aggregation: {agg_seconds} sec.' + text = f"Aggregation: {agg_seconds} sec." if agg_seconds < 2: agg_millis = agg_durations[i].total_seconds() - text += f' ({agg_millis})' - plt.text(agg_start+aggregation_width*0.5, text_margin, text, rotation=90, fontsize=14, ha='right', va='top') - -def plot_axis(axis, x, y_train, y_test, ylims, epochs, aggregation_width, label, local_only): + text += f" ({agg_millis})" + plt.text( + agg_start + aggregation_width * 0.5, + text_margin, + text, + rotation=90, + fontsize=14, + ha="right", + va="top", + ) + + +def plot_axis( + axis, x, y_train, y_test, ylims, epochs, aggregation_width, label, local_only +): last_idx = -1 if local_only else None axis.set_ylim(ylims[0], ylims[1]) axis.yaxis.tick_right() - axis.plot(x, y_train, marker='.', label='Train') - axis.plot(x, y_test, marker='.', label='Validation') - axis.grid(axis='y', linewidth=0.5) + axis.plot(x, y_train, marker=".", label="Train") + axis.plot(x, y_test, marker=".", label="Validation") + axis.grid(axis="y", linewidth=0.5) axis.xaxis.set_major_locator(MaxNLocator(integer=True)) axis.set_ylabel(label) for i, epoch in enumerate(epochs[:last_idx]): - agg_start = len(epoch)-1+i*(len(epoch)-1+aggregation_width) - agg_end = (i+1)*(len(epoch)-1+aggregation_width) - axis.axvspan(agg_start, agg_end, alpha=0.2, color='grey') + agg_start = len(epoch) - 1 + i * (len(epoch) - 1 + aggregation_width) + agg_end = (i + 1) * (len(epoch) - 1 + aggregation_width) + axis.axvspan(agg_start, agg_end, alpha=0.2, color="grey") axis.get_xaxis().set_ticks([]) axis.legend() + def format_history(history): metrics = {} for json_metrics in history.metrics: @@ -192,13 +275,14 @@ def format_history(history): init_metrics[split][key] = [metrics_array] history.init_metrics = init_metrics + def format_metric_name(metric_name: str) -> str: - if metric_name == 'loss': - return 'Loss' - if metric_name == 'acc': - return 'Accuracy (%)' - if metric_name == 'f1': - return 'F1-Score' - if metric_name == 'auroc': - return 'AUROC' + if metric_name == "loss": + return "Loss" + if metric_name == "acc": + return "Accuracy (%)" + if metric_name == "f1": + return "F1-Score" + if metric_name == "auroc": + return "AUROC" return metric_name diff --git a/src/tuneinsight/computations/intersection.py b/src/tuneinsight/computations/intersection.py new file mode 100644 index 0000000..282943a --- /dev/null +++ b/src/tuneinsight/computations/intersection.py @@ -0,0 +1,69 @@ +from typing import List, Any +import pandas as pd +from tuneinsight.api.sdk import models +from tuneinsight.client.computations import ComputationRunner + + +class SetIntersection(ComputationRunner): + """ + SetIntersection Represents the private set intersection computation + + Args: + ComputationRunner: Inherits all methods available from the computation runner parent class + """ + + def find_matches( + self, + col: str = None, + value: Any = None, + cols: List[str] = None, + values: List[Any] = None, + data: pd.DataFrame = None, + fuzzy: bool = False, + fuzzy_cols: bool = None, + ) -> pd.DataFrame: + """ + find_matches returns a dataframe that contains the result of the private set intersection across all nodes using the provided + column list as the matching columns. If an input is provided here then the remote instance will use it as its own input instead of + querying the database. + + Args: + col (str, optional): the single column to match items on. Defaults to None. + value (Any, optional): the single value to use as a local input. Defaults to None. + cols (List[str], optional): the list of joint columns to match items on. Defaults to None. + values (List[Any], optional):the list of values to use as local inputs. Defaults to None. + data (pd.DataFrame, optional): the dataframe to use as a local input. Defaults to None. + fuzzy (bool, optional): whether or not fuzzy matching is used. Defaults to False. + fuzzy_cols (bool, optional): the list of columns that should be fuzzy matched. Defaults to None. + + Raises: + ValueError: in case invalid arguments are passed. + + Returns: + pd.DataFrame: the list of items that matched with other instances. + """ + + model = models.SetIntersection(type=models.ComputationType.SETINTERSECTION) + if col is not None: + cols = [col] + if cols is None: + raise ValueError("columns must be provided") + model.matching_columns = [str(col) for col in cols] + + if value is not None: + values = [value] + if values is not None: + data = pd.DataFrame(data=values, columns=cols) + + if data is not None: + self.set_local_input(data) + + if fuzzy: + if fuzzy_cols is None: + fuzzy_cols = cols + model.fuzzy_params = models.FuzzyMatchingParameters( + phonetic_columns=model.matching_columns + ) + + results = self.run_computation(model, local=False, release=True) + return results[0].get_dataframe() diff --git a/src/tuneinsight/computations/policy.py b/src/tuneinsight/computations/policy.py index 947dc60..af9ff7a 100644 --- a/src/tuneinsight/computations/policy.py +++ b/src/tuneinsight/computations/policy.py @@ -6,23 +6,21 @@ from tuneinsight.api.sdk.types import Unset - class Policy(models.ComputationPolicy): - ''' + """ Policy related to a computation. Defines a set of constraints and Disclosure Prevention Mechanisms Args: models (_type_): _description_ - ''' - + """ def __init__(self): - ''' + """ __init__ initializes the the policy using the computation type Args: computation_type (Type): _description_ - ''' + """ super().__init__() self.authorized_preprocessing_operations = [] self.authorized_data_source_queries = [] @@ -30,55 +28,68 @@ def __init__(self): self.flexible_parameters = [] self.dp_policy = models.DPPolicy() - - def add_authorized_preprocessing(self,operations: List[Operation]): - ''' + def add_authorized_preprocessing(self, operations: List[Operation]): + """ add_authorized_preprocessing appends the set of operations from the preprocessing builder instance to the set of authorized preprocessing operation types Args: operations (PreprocessingBuilder): the preprocessing builder instance - ''' + """ auth_operations = set(self.authorized_preprocessing_operations) for op in operations: - auth_operations.add(models.PreprocessingOperationType(op.to_preproc_operation_type())) + auth_operations.add( + models.PreprocessingOperationType(op.to_preproc_operation_type()) + ) self.authorized_preprocessing_operations = list(auth_operations) - - def add_authorized_query(self,query:str): - ''' + def add_authorized_query(self, query: str): + """ add_authorized_query adds the datasource query to the set of authorized queries Args: query (str): the data source query string - ''' + """ queries = set(self.authorized_data_source_queries) queries.add(query) self.authorized_data_source_queries = list(queries) @staticmethod - def __new_threshold(relative:bool=False,fixed_value:int=10,relative_factor:float=0.2) -> models.Threshold: + def __new_threshold( + relative: bool = False, fixed_value: int = 10, relative_factor: float = 0.2 + ) -> models.Threshold: if relative: threshold_type = models.ThresholdType.RELATIVE else: threshold_type = models.ThresholdType.FIXED - return models.Threshold(fixed_value=fixed_value,relative_factor=relative_factor,type=threshold_type) - - - def set_max_columns(self,relative:bool=False,fixed_value:int=5,relative_factor:float=0.2): - ''' + return models.Threshold( + fixed_value=fixed_value, + relative_factor=relative_factor, + type=threshold_type, + ) + + def set_max_columns( + self, relative: bool = False, fixed_value: int = 5, relative_factor: float = 0.2 + ): + """ set_max_columns Sets the maximum number of columns that can be input to the computation Args: relative (bool, optional): whether or not the maximum value is relative to the dataset size. Defaults to False. fixed_value (int, optional): absolute maximum value when not relative. Defaults to 5. relative_factor (float, optional): factor of the dataset size when relative. Defaults to 0.2. - ''' - self.dp_policy.max_column_count = self.__new_threshold(relative,fixed_value,relative_factor) - - - def set_min_frequencies(self,relative:bool=False,fixed_value:int=10,relative_factor:float=0.2): - ''' + """ + self.dp_policy.max_column_count = self.__new_threshold( + relative, fixed_value, relative_factor + ) + + def set_min_frequencies( + self, + relative: bool = False, + fixed_value: int = 10, + relative_factor: float = 0.2, + ): + """ set_min_frequencies Sets the minimum number of frequencies required when the input dataset is used for counting The backend computation will verify that, for each numerical column, the sum of values is @@ -86,22 +97,36 @@ def set_min_frequencies(self,relative:bool=False,fixed_value:int=10,relative_fac relative (bool, optional): whether the threshold is relative or not. Defaults to False. fixed_value (int, optional): fixed value for the threshold. Defaults to 10. relative_factor (float, optional): the factor of the dataset size when the threshold is relative. Defaults to 0.2. - ''' - self.dp_policy.min_frequencies = self.__new_threshold(relative,fixed_value,relative_factor) - - def set_max_factor(self,relative:bool=False,fixed_value:int=10,relative_factor:float=0.2): - ''' + """ + self.dp_policy.min_frequencies = self.__new_threshold( + relative, fixed_value, relative_factor + ) + + def set_max_factor( + self, + relative: bool = False, + fixed_value: int = 10, + relative_factor: float = 0.2, + ): + """ set_max_factor Sets the maximum number of factors any column can take as input Args: relative (bool, optional): whether the threshold is relative or not. Defaults to False. fixed_value (int, optional): fixed value for the threshold. Defaults to 10. relative_factor (float, optional): the factor of the dataset size when the threshold is relative. Defaults to 0.1. - ''' - self.dp_policy.max_factors = self.__new_threshold(relative,fixed_value,relative_factor) - - - def set_output_noise(self,eps: float=1,sensitivity:float=1,discrete:bool = False,delta: float=0.0001): - ''' + """ + self.dp_policy.max_factors = self.__new_threshold( + relative, fixed_value, relative_factor + ) + + def set_output_noise( + self, + eps: float = 1, + sensitivity: float = 1, + discrete: bool = False, + delta: float = 0.0001, + ): + """ set_output_noise sets the noise parameters. when set, every computation output gets encrypted noise added to it. If the noise is non discrete, then the laplacian mechanism is used, otherwise the gaussian mechanism is used @@ -110,95 +135,137 @@ def set_output_noise(self,eps: float=1,sensitivity:float=1,discrete:bool = False sensitivity (float, optional): the sensitivity parameter should be equal to the maximum difference expected between two neighboring datasets. Defaults to 1. discrete (bool, optional): whether or not the noise should be discretized delta (float,optional): the delta value when the noise is discrete - ''' - self.dp_policy.noise_parameters = models.NoiseParameters(epsilon=eps,sensitivity=sensitivity,discrete=discrete,delta=delta) - + """ + self.dp_policy.noise_parameters = models.NoiseParameters( + epsilon=eps, sensitivity=sensitivity, discrete=discrete, delta=delta + ) displayed_labels = { - 'validateParameters': 'template validation', - 'fixedParameters': 'fixed', - 'flexibleParameters': 'flexible', - 'restrictDataSourceQueries': 'validate queries', - 'authorizedDataSourceQueries': 'authorized queries', - 'restrictPreprocessingOperations': 'validate operations', - 'authorizedPreprocessingOperations': 'authorized operations', - 'consideredVariables': 'authorized variables', - 'minDatasetSize': 'min rows', - 'minFrequencies': 'min frequencies', - 'maxColumnCount': 'max columns', - 'noiseParameters': 'noise', + "validateParameters": "template validation", + "fixedParameters": "fixed", + "flexibleParameters": "flexible", + "restrictDataSourceQueries": "validate queries", + "authorizedDataSourceQueries": "authorized queries", + "restrictPreprocessingOperations": "validate operations", + "authorizedPreprocessingOperations": "authorized operations", + "consideredVariables": "authorized variables", + "minDatasetSize": "min rows", + "minFrequencies": "min frequencies", + "maxColumnCount": "max columns", + "noiseParameters": "noise", } -def display_policy(p: models.ComputationPolicy,detailed: bool = False,show_queries: bool = False): - display(Markdown('### Workflow Restrictions')) +def display_policy( + p: models.ComputationPolicy, detailed: bool = False, show_queries: bool = False +): + display(Markdown("### Workflow Restrictions")) if not p.restrict_data_source_queries and not p.restrict_preprocessing_operations: display(Markdown("No workflow restrictions are set")) if p.restrict_preprocessing_operations: - display(Markdown(f"Set of restricted preprocessing operations: `{[op.value for op in p.authorized_preprocessing_operations]}`")) + display( + Markdown( + f"Set of restricted preprocessing operations: `{[op.value for op in p.authorized_preprocessing_operations]}`" + ) + ) if p.restrict_data_source_queries: - display(Markdown("The policy only allows for limited queries on the data source")) + display( + Markdown("The policy only allows for limited queries on the data source") + ) if show_queries: - display(Markdown('#### Authorized Queries')) + display(Markdown("#### Authorized Queries")) for q in p.authorized_data_source_queries: - display(Markdown(f'```sql\n{q}```\n')) - if not isinstance(p.dp_policy,Unset): + display(Markdown(f"```sql\n{q}```\n")) + if not isinstance(p.dp_policy, Unset): dp = p.dp_policy display_dp_policy(dp) if detailed: - display(Markdown('### Detailed Policy (JSON)')) + display(Markdown("### Detailed Policy (JSON)")) dict_values = p.to_dict() - dict_values.pop('authorizedDataSourceQueries') + dict_values.pop("authorizedDataSourceQueries") json_formatted_str = json.dumps(dict_values, indent=2) display(Markdown("```\n" + json_formatted_str + "\n ```")) - -def display_threshold(text:str,t: models.Threshold): +def display_threshold(text: str, t: models.Threshold): if t.type == models.ThresholdType.FIXED: - display(Markdown(f'{text}: ${t.fixed_value}$')) + display(Markdown(f"{text}: ${t.fixed_value}$")) else: - display(Markdown(f'{text}: ${t.relative_factor} N$ (where $N$ is the local dataset size) ')) + display( + Markdown( + f"{text}: ${t.relative_factor} N$ (where $N$ is the local dataset size) " + ) + ) -def display_dp_policy(dp : models.DPPolicy): - display(Markdown('### Disclosure Prevention Mechanisms')) +def display_dp_policy(dp: models.DPPolicy): + display(Markdown("### Disclosure Prevention Mechanisms")) params_set = False - if isinstance(dp.authorized_variables,list) and len(dp.authorized_variables) > 0: + if isinstance(dp.authorized_variables, list) and len(dp.authorized_variables) > 0: params_set = True - display(Markdown(f'Set of authorized variables: `{dp.authorized_variables}` _(any non-matching variable is automatically dropped)_')) - if not isinstance(dp.min_dataset_size,Unset): + display( + Markdown( + f"Set of authorized variables: `{dp.authorized_variables}` _(any non-matching variable is automatically dropped)_" + ) + ) + if not isinstance(dp.min_dataset_size, Unset): params_set = True - display(Markdown(f'minimum local dataset size: ${dp.min_dataset_size}$')) - if not isinstance(dp.min_global_dataset_size,Unset): + display(Markdown(f"minimum local dataset size: ${dp.min_dataset_size}$")) + if not isinstance(dp.min_global_dataset_size, Unset): params_set = True - display(Markdown(f'minimum global dataset size: ${dp.min_global_dataset_size}$')) - if not isinstance(dp.min_frequencies,Unset): + display( + Markdown(f"minimum global dataset size: ${dp.min_global_dataset_size}$") + ) + if not isinstance(dp.min_frequencies, Unset): params_set = True - display(Markdown('#### Minimum Frequencies')) - display_threshold("Threshold",dp.min_frequencies) - display(Markdown('This ensures that any Frequency $F$ is such that $F = 0$ or $F = N$ or $T <= F <= N - T$, where $N$ is the local dataset size and $T$ is the value of the threshold')) - if not isinstance(dp.max_column_count,Unset): + display(Markdown("#### Minimum Frequencies")) + display_threshold("Threshold", dp.min_frequencies) + display( + Markdown( + "This ensures that any Frequency $F$ is such that $F = 0$ or $F = N$ or $T <= F <= N - T$, where $N$ is the local dataset size and $T$ is the value of the threshold" + ) + ) + if not isinstance(dp.max_column_count, Unset): params_set = True - display(Markdown('#### Maximum Number of Columns')) - display_threshold("Threshold",dp.max_column_count) - display(Markdown('this limits the number of columns which can be created during the preprocessing, avoiding that a subsequent aggregation leaks individual information.')) - if not isinstance(dp.max_factors,Unset): + display(Markdown("#### Maximum Number of Columns")) + display_threshold("Threshold", dp.max_column_count) + display( + Markdown( + "this limits the number of columns which can be created during the preprocessing, avoiding that a subsequent aggregation leaks individual information." + ) + ) + if not isinstance(dp.max_factors, Unset): params_set = True - display(Markdown('#### Maximum Number of Factors')) - display_threshold("Threshold",dp.max_factors) - display(Markdown('Verifies for each categorical variable that the number of factors does not exceed the threshold')) - if not isinstance(dp.noise_parameters,Unset): + display(Markdown("#### Maximum Number of Factors")) + display_threshold("Threshold", dp.max_factors) + display( + Markdown( + "Verifies for each categorical variable that the number of factors does not exceed the threshold" + ) + ) + if not isinstance(dp.noise_parameters, Unset): params_set = True np = dp.noise_parameters noise_type = "Laplacian Mechanism" if np.discrete: noise_type = "Discretized Gaussian Mechanism" - display(Markdown(f'#### Output Noise ({noise_type})')) - display(Markdown(f'epsilon: ${np.epsilon}$ (privacy budget, ideally should be set to values $< 1$ to ensure enough privacy)')) - display(Markdown(f'sensitivity: ${np.sensitivity}$ (maximum difference of result when computing over two neighboring datasets)')) + display(Markdown(f"#### Output Noise ({noise_type})")) + display( + Markdown( + f"epsilon: ${np.epsilon}$ (privacy budget, ideally should be set to values $< 1$ to ensure enough privacy)" + ) + ) + display( + Markdown( + f"sensitivity: ${np.sensitivity}$ (maximum difference of result when computing over two neighboring datasets)" + ) + ) if np.discrete: - display(Markdown(f'delta: ${np.delta}$ (secondary noise parameters should be set to ~ $1 / N$ where $N$ is an expected dataset size)')) + display( + Markdown( + f"delta: ${np.delta}$ (secondary noise parameters should be set to ~ $1 / N$ where $N$ is an expected dataset size)" + ) + ) if not params_set: display(Markdown("No disclosure prevention mechanism is set")) diff --git a/src/tuneinsight/computations/preprocessing.py b/src/tuneinsight/computations/preprocessing.py index 43d55cc..0418d13 100644 --- a/src/tuneinsight/computations/preprocessing.py +++ b/src/tuneinsight/computations/preprocessing.py @@ -1,10 +1,9 @@ -import inspect -import textwrap from enum import Enum -from typing import Dict,List,Callable +from typing import Dict, List, Callable from warnings import warn import pandas as pd +from tuneinsight.utils.code import get_code from tuneinsight.api.sdk.types import UNSET from tuneinsight.api.sdk import models from tuneinsight.computations.survival import SurvivalParameters @@ -12,7 +11,6 @@ from tuneinsight.api.sdk.models import PreprocessingOperationType as op_type - class Operation(Enum): ONE_HOT = op_type.ONEHOTENCODING PHONETIC = op_type.PHONETICENCODING @@ -42,18 +40,18 @@ class PreprocessingBuilder: def __init__(self): self.chain = [] self.compound_chain = {} - self.output_selection = models.Select(type=models.PreprocessingOperationType.SELECT,cols=[]) + self.output_selection = models.Select( + type=models.PreprocessingOperationType.SELECT, cols=[] + ) self.output_selection_set = False self.schema = None - def new_schema(self) -> DatasetSchema: self.schema = DatasetSchema() return self.schema - def new_chain(self, chain: List[models.PreprocessingOperation]): - """ Sets the global pre-processing chain + """Sets the global pre-processing chain Args: chain (List[models.PreprocessingOperation]): list of pre-processing operations constituting the chain @@ -65,7 +63,7 @@ def new_chain(self, chain: List[models.PreprocessingOperation]): return self def new_compound_chain(self, chain: Dict[str, models.PreprocessingChain]): - """ Sets the compound pre-processing chain + """Sets the compound pre-processing chain Args: chain (Dict[str, models.PreprocessingChain]): dictionary mapping each node to its individual preprocessing chain @@ -76,8 +74,14 @@ def new_compound_chain(self, chain: Dict[str, models.PreprocessingChain]): self.compound_chain = chain return self - def one_hot_encoding(self, target_column: str, prefix: str, specified_types: List[str], nodes: List[str] = None): - """ Add a one hot encoding operation to the preprocessing chain. This operation encodes a target column into one hot encoding and extends the table with the resulting columns. + def one_hot_encoding( + self, + target_column: str, + prefix: str, + specified_types: List[str], + nodes: List[str] = None, + ): + """Add a one hot encoding operation to the preprocessing chain. This operation encodes a target column into one hot encoding and extends the table with the resulting columns. Args: @@ -89,11 +93,25 @@ def one_hot_encoding(self, target_column: str, prefix: str, specified_types: Lis Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.append_to_chain(models.OneHotEncoding(type=models.PreprocessingOperationType.ONEHOTENCODING, input_col = target_column, prefix = prefix, specified_types = specified_types), nodes) + self.append_to_chain( + models.OneHotEncoding( + type=models.PreprocessingOperationType.ONEHOTENCODING, + input_col=target_column, + prefix=prefix, + specified_types=specified_types, + ), + nodes, + ) return self - def select(self, columns: List[str], create_if_missing:bool = False, dummy_value:str = "", nodes: List[str] = None): - """ Add a select operation to the preprocessing chain. The operation selects specified columns from data. + def select( + self, + columns: List[str], + create_if_missing: bool = False, + dummy_value: str = "", + nodes: List[str] = None, + ): + """Add a select operation to the preprocessing chain. The operation selects specified columns from data. Args: columns (List[str]): list of column names to be selected @@ -104,27 +122,42 @@ def select(self, columns: List[str], create_if_missing:bool = False, dummy_value Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.append_to_chain(models.Select(type=models.PreprocessingOperationType.SELECT, cols=columns, create_if_missing=create_if_missing, dummy_value=dummy_value), nodes) + self.append_to_chain( + models.Select( + type=models.PreprocessingOperationType.SELECT, + cols=columns, + create_if_missing=create_if_missing, + dummy_value=dummy_value, + ), + nodes, + ) return self - - def set_columns(self,columns: List[str],create_if_missing:bool = False, dummy_value:str = ""): - ''' - set_columns set columns sets the selected columns after all other preprocessing blocks are applied + def set_columns( + self, columns: List[str], create_if_missing: bool = False, dummy_value: str = "" + ): + """ + set_columns sets the selected columns after all other preprocessing blocks are applied Args: columns (List[str]): list of column names to be selected create_if_missing (bool, optional): whether to create the columns if they do not exist. Defaults to False. dummy_value (str, optional): what to fill the created columns with. Defaults to "". - ''' + """ self.output_selection.cols = columns self.output_selection.create_if_missing = create_if_missing self.output_selection.dummy_value = dummy_value self.output_selection_set = True - - def filter(self, target_column: str, comparator:models.ComparisonType, value:str, numerical:bool = False, nodes: List[str] = None): - """ Add a filter operation to the preprocessing chain. The operation filters rows from the data under a given condition. + def filter( + self, + target_column: str, + comparator: models.ComparisonType, + value: str, + numerical: bool = False, + nodes: List[str] = None, + ): + """Add a filter operation to the preprocessing chain. The operation filters rows from the data under a given condition. Args: target_column (str): name of column to filter on @@ -136,11 +169,20 @@ def filter(self, target_column: str, comparator:models.ComparisonType, value:str Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.append_to_chain(models.Filter(type=models.PreprocessingOperationType.FILTER, col_name=target_column, comparator=comparator, value=value, numerical=numerical), nodes) + self.append_to_chain( + models.Filter( + type=models.PreprocessingOperationType.FILTER, + col_name=target_column, + comparator=comparator, + value=value, + numerical=numerical, + ), + nodes, + ) return self def counts(self, output_column_name: str, nodes: List[str] = None): - """ Add a counts operation to the preprocessing chain. The operation concatenates a new column filled with ones. + """Add a counts operation to the preprocessing chain. The operation concatenates a new column filled with ones. Args: output_column_name (str): name of the column to store the counts. If not specified, the name 'count' will be used. @@ -149,11 +191,17 @@ def counts(self, output_column_name: str, nodes: List[str] = None): Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.append_to_chain(models.Counts(type=models.PreprocessingOperationType.COUNTS, output_col=output_column_name), nodes) + self.append_to_chain( + models.Counts( + type=models.PreprocessingOperationType.COUNTS, + output_col=output_column_name, + ), + nodes, + ) return self def transpose(self, copy: bool = False, nodes: List[str] = None): - """ Add a transpose operation to the preprocessing chain. The operation transposes the index and columns of the data. + """Add a transpose operation to the preprocessing chain. The operation transposes the index and columns of the data. Args: @@ -163,11 +211,22 @@ def transpose(self, copy: bool = False, nodes: List[str] = None): Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.append_to_chain(models.Transpose(type=models.PreprocessingOperationType.TRANSPOSE, copy=copy), nodes) + self.append_to_chain( + models.Transpose( + type=models.PreprocessingOperationType.TRANSPOSE, copy=copy + ), + nodes, + ) return self - def set_index(self, columns: List[str], drop: bool = True, append: bool = False, nodes: List[str] = None): - """ Add a set index operation to the preprocessing chain. The operation sets the DataFrame index using existing columns. + def set_index( + self, + columns: List[str], + drop: bool = True, + append: bool = False, + nodes: List[str] = None, + ): + """Add a set index operation to the preprocessing chain. The operation sets the DataFrame index using existing columns. Args: columns (List[str]): list of column names to set as index @@ -178,11 +237,21 @@ def set_index(self, columns: List[str], drop: bool = True, append: bool = False, Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.append_to_chain(models.SetIndex(type=models.PreprocessingOperationType.SETINDEX, cols=columns, drop=drop, append=append), nodes) + self.append_to_chain( + models.SetIndex( + type=models.PreprocessingOperationType.SETINDEX, + cols=columns, + drop=drop, + append=append, + ), + nodes, + ) return self - def reset_index(self, drop: bool = False, level: List[str] = None, nodes: List[str] = None): - """ Add a reset index operation to the preprocessing chain. The operation resets the DataFrame index (or a level of it). + def reset_index( + self, drop: bool = False, level: List[str] = None, nodes: List[str] = None + ): + """Add a reset index operation to the preprocessing chain. The operation resets the DataFrame index (or a level of it). Args: drop (bool, optional): Whether to insert index into dataframe columns. This resets the index to the default integer index. Defaults to False. @@ -192,11 +261,25 @@ def reset_index(self, drop: bool = False, level: List[str] = None, nodes: List[s Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.append_to_chain(models.ResetIndex(type=models.PreprocessingOperationType.RESETINDEX, drop=drop, level=level), nodes) + self.append_to_chain( + models.ResetIndex( + type=models.PreprocessingOperationType.RESETINDEX, + drop=drop, + level=level, + ), + nodes, + ) return self - def rename(self, mapper: dict, axis: models.RenameAxis = models.RenameAxis.COLUMNS, copy: bool = True, errors: bool = True, nodes: List[str] = None): - """ Add a rename operation to the preprocessing chain. The operation alters axis labels. + def rename( + self, + mapper: dict, + axis: models.RenameAxis = models.RenameAxis.COLUMNS, + copy: bool = True, + errors: bool = True, + nodes: List[str] = None, + ): + """Add a rename operation to the preprocessing chain. The operation alters axis labels. Args: mapper (dict): Dict of name transformations to apply. @@ -209,11 +292,26 @@ def rename(self, mapper: dict, axis: models.RenameAxis = models.RenameAxis.COLUM self (PreprocessingBuilder): the updated PreprocessingBuilder """ mapper = models.RenameMapper.from_dict(mapper) - self.append_to_chain(models.Rename(type=models.PreprocessingOperationType.RENAME, mapper=mapper, axis=axis, copy=copy, errors=errors), nodes) + self.append_to_chain( + models.Rename( + type=models.PreprocessingOperationType.RENAME, + mapper=mapper, + axis=axis, + copy=copy, + errors=errors, + ), + nodes, + ) return self - def astype(self, type_map: dict, copy: bool = True, errors: bool = True, nodes: List[str] = None): - """ Add an as type operation to the preprocessing chain. The operation casts column types. + def astype( + self, + type_map: dict, + copy: bool = True, + errors: bool = True, + nodes: List[str] = None, + ): + """Add an as type operation to the preprocessing chain. The operation casts column types. Args: type_map (dict): Dict which maps column names to the data types they should be cast to. @@ -225,11 +323,25 @@ def astype(self, type_map: dict, copy: bool = True, errors: bool = True, nodes: self (PreprocessingBuilder): the updated PreprocessingBuilder """ type_map = models.AsTypeTypeMap.from_dict(type_map) - self.append_to_chain(models.AsType(type=models.PreprocessingOperationType.ASTYPE, type_map=type_map, copy=copy, errors=errors), nodes) + self.append_to_chain( + models.AsType( + type=models.PreprocessingOperationType.ASTYPE, + type_map=type_map, + copy=copy, + errors=errors, + ), + nodes, + ) return self - def extract(self, field:str, columns:List[str], names: List[str] = None, nodes: List[str] = None): - """ Add an extract operation to the preprocessing chain. The operation extracts field values from dict-like columns. + def extract( + self, + field: str, + columns: List[str], + names: List[str] = None, + nodes: List[str] = None, + ): + """Add an extract operation to the preprocessing chain. The operation extracts field values from dict-like columns. Args: @@ -241,29 +353,55 @@ def extract(self, field:str, columns:List[str], names: List[str] = None, nodes: Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - assert isinstance(columns,list) - self.append_to_chain(models.ExtractDictField(type=models.PreprocessingOperationType.EXTRACTDICTFIELD, field=field, cols=columns, names=names), nodes) + assert isinstance(columns, list) + self.append_to_chain( + models.ExtractDictField( + type=models.PreprocessingOperationType.EXTRACTDICTFIELD, + field=field, + cols=columns, + names=names, + ), + nodes, + ) return self - def apply_regex(self, regex:str, columns:List[str], names: List[str] = None, regex_type: models.ApplyRegExRegexType = models.ApplyRegExRegexType.MATCH, nodes: List[str] = None): - """ Add an apply regex operation to the preprocessing chain. The operation applies a regular expression to columns. + def apply_regex( + self, + regex: str, + columns: List[str], + names: List[str] = None, + regex_type: models.ApplyRegExRegexType = models.ApplyRegExRegexType.MATCH, + nodes: List[str] = None, + ): + """Add an apply regex operation to the preprocessing chain. The operation applies a regular expression to columns. Args: regex (str): regular expression to apply. columns (List[str]): list of column names to apply the regular expression to. regex_type (models.ApplyRegExRegexType, optional): defines what we want to retrieve from the regex (see ApplyRegExRegexType). Defaults to models.ApplyRegExRegexType.MATCH. - names (List[str]): names of resulting columns. + names (List[str]): names of resulting columns. If None, this operation is in place. nodes (List[str], optional): list of the name of nodes to apply the operation on, applied to all if None. Defaults to None. Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - assert isinstance(columns,list) - self.append_to_chain(models.ApplyRegEx(type=models.PreprocessingOperationType.APPLYREGEX, regex=regex, cols=columns, regex_type=regex_type, names=names), nodes) + assert isinstance(columns, list) + self.append_to_chain( + models.ApplyRegEx( + type=models.PreprocessingOperationType.APPLYREGEX, + regex=regex, + cols=columns, + regex_type=regex_type, + names=names, + ), + nodes, + ) return self - def quantiles(self,input_:str,min_v: float,max_v: float,nodes: List[str] = None): - ''' + def quantiles( + self, input_: str, min_v: float, max_v: float, nodes: List[str] = None + ): + """ quantiles computes the local quantiles (min,q1,median,q3,max) of the input column, and then applies the following transformation to each quantile: - normalization using the given `min_v` and `max_v` values. @@ -282,13 +420,30 @@ def quantiles(self,input_:str,min_v: float,max_v: float,nodes: List[str] = None) Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder - ''' - self.append_to_chain(models.Quantiles(type=models.PreprocessingOperationType.QUANTILES,input_=input_,min_=min_v,max_=max_v),nodes=nodes) + """ + self.append_to_chain( + models.Quantiles( + type=models.PreprocessingOperationType.QUANTILES, + input_=input_, + min_=min_v, + max_=max_v, + ), + nodes=nodes, + ) return self - def time_diff(self,start: str,end:str,output: str,unit: models.TimeUnit = models.TimeUnit.MONTHS,unit_value=1,filter_na: bool = True, nodes: List[str] = None): - ''' - time_diff computes the time difference between two datetime columns (must be parsable dates/times + def time_diff( + self, + start: str, + end: str, + output: str, + unit: models.TimeUnit = models.TimeUnit.MONTHS, + unit_value=1, + filter_na: bool = True, + nodes: List[str] = None, + ): + """ + time_diff computes the time difference between two datetime columns (must be parsable dates/times) Args: start (str): the column indicating the start of the measurement @@ -301,28 +456,47 @@ def time_diff(self,start: str,end:str,output: str,unit: models.TimeUnit = models Returns: _type_: _description_ - ''' - duration = models.Duration(unit=unit,value=unit_value) - self.append_to_chain(models.TimeDiff(type=models.PreprocessingOperationType.TIMEDIFF,start=start,end=end,output=output,interval=duration,filter_na=filter_na),nodes) + """ + duration = models.Duration(unit=unit, value=unit_value) + self.append_to_chain( + models.TimeDiff( + type=models.PreprocessingOperationType.TIMEDIFF, + start=start, + end=end, + output=output, + interval=duration, + filter_na=filter_na, + ), + nodes, + ) return self - def dropna(self,subset: List[str] = None,nodes: List[str] = None): - ''' + def dropna(self, subset: List[str] = None, nodes: List[str] = None): + """ dropna Drops all rows that contain NaN values, calls the standard pandas function, it also converts strings with the value 'NaN' to actual NaN values Args: nodes (List[str], optional): the list of nodes to apply this preprocessing operation to. Defaults to None. Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder - ''' + """ if subset is None: subset = UNSET - self.append_to_chain(models.Dropna(type=models.PreprocessingOperationType.DROPNA,subset=subset),nodes) + self.append_to_chain( + models.Dropna(type=models.PreprocessingOperationType.DROPNA, subset=subset), + nodes, + ) return self - - def apply_mapping(self,input_: str,output: str,mapping: Dict[str,str],default: str = "",nodes: List[str] = None): - ''' + def apply_mapping( + self, + input_: str, + output: str, + mapping: Dict[str, str], + default: str = "", + nodes: List[str] = None, + ): + """ apply_mapping creates a new column based on another column, given a mapping defined by the user Args: @@ -334,17 +508,33 @@ def apply_mapping(self,input_: str,output: str,mapping: Dict[str,str],default: s Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder - ''' + """ sm = models.StringMapping.from_dict(mapping) # Convert all keys and values to strings - for key,value in sm.additional_properties.items(): + for key, value in sm.additional_properties.items(): sm.additional_properties[key] = str(value) - self.append_to_chain(models.ApplyMapping(type=models.PreprocessingOperationType.APPLYMAPPING,input_=input_,output=output,mapping=sm,default=str(default)),nodes=nodes) + self.append_to_chain( + models.ApplyMapping( + type=models.PreprocessingOperationType.APPLYMAPPING, + input_=input_, + output=output, + mapping=sm, + default=str(default), + ), + nodes=nodes, + ) return self - def cut(self,input_:str,output:str,cuts: List[float],labels: List[str],nodes: List[str] = None): - ''' + def cut( + self, + input_: str, + output: str, + cuts: List[float], + labels: List[str] = None, + nodes: List[str] = None, + ): + """ cut transforms a numeric variable into categories according to a list of cuts and labels defined by the user Args: @@ -356,21 +546,38 @@ def cut(self,input_:str,output:str,cuts: List[float],labels: List[str],nodes: Li Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder - ''' + """ if len(labels) != len(cuts) - 1: - raise ValueError(f"wrong number of labels, expected {len(cuts) - 1}, got {len(labels)}") + raise ValueError( + f"wrong number of labels, expected {len(cuts) - 1}, got {len(labels)}" + ) # Make sure the values passed are in appropriate format - for i,_ in enumerate(cuts): + for i, _ in enumerate(cuts): cuts[i] = float(cuts[i]) - for i,_ in enumerate(labels): + for i, _ in enumerate(labels): labels[i] = str(labels[i]) - self.append_to_chain(models.Cut(type=models.PreprocessingOperationType.CUT,input_=input_,output=output,cuts=cuts,labels=labels),nodes=nodes) + self.append_to_chain( + models.Cut( + type=models.PreprocessingOperationType.CUT, + input_=input_, + output=output, + cuts=cuts, + labels=labels, + ), + nodes=nodes, + ) return self - - def deviation_squares(self,input_:str,output_:str,mean: float,count:int = 0,nodes: List[str] = None): - ''' + def deviation_squares( + self, + input_: str, + output_: str, + mean: float, + count: int = 0, + nodes: List[str] = None, + ): + """ deviation_squares creates a new column where each value is equal to (df[input] - mean)^2- / (count - 1) if count is < 1 then the denominator is equal to 1 (computes the squared deviation) should be used when computing the variance of a variable once the global mean and count are known @@ -381,14 +588,29 @@ def deviation_squares(self,input_:str,output_:str,mean: float,count:int = 0,node mean (float): the previously computed global mean count (int, optional): the previously computed global count. Defaults to 0. nodes (List[str], optional): the nodes to assign the preprocessing operation to. Defaults to None. - ''' + """ mean = float(mean) count = int(count) - self.append_to_chain(models.DeviationSquares(models.PreprocessingOperationType.DEVIATIONSQUARES,count=count,input_=input_,output=output_,mean=mean),nodes=nodes) - - - def add_columns(self,input_cols: List[str],output:str,sep: str="",numerical: bool = False,nodes: List[str] = None): - ''' + self.append_to_chain( + models.DeviationSquares( + models.PreprocessingOperationType.DEVIATIONSQUARES, + count=count, + input_=input_, + output=output_, + mean=mean, + ), + nodes=nodes, + ) + + def add_columns( + self, + input_cols: List[str], + output: str, + sep: str = "", + numerical: bool = False, + nodes: List[str] = None, + ): + """ add_columns adds the specified columns together, if the columns are not numerical a separator can be additionally specified Args: @@ -397,13 +619,26 @@ def add_columns(self,input_cols: List[str],output:str,sep: str="",numerical: boo sep (str, optional): separator when the columns are strings. Defaults to "". numerical (bool, optional): whether or not to add numerically. Defaults to False. nodes (List[str], optional): the nodes for which the preprocessing applies to. Defaults to None. - ''' - self.append_to_chain(models.AddColumns(type=models.PreprocessingOperationType.ADDCOLUMNS,input_columns=input_cols,output=output,sep=sep,numerical=numerical),nodes) - - - - def custom(self,function: Callable[[pd.DataFrame], pd.DataFrame],name: str = "",description: str = "",nodes:List[str] = None): - ''' + """ + self.append_to_chain( + models.AddColumns( + type=models.PreprocessingOperationType.ADDCOLUMNS, + input_columns=input_cols, + output=output, + sep=sep, + numerical=numerical, + ), + nodes, + ) + + def custom( + self, + function: Callable[[pd.DataFrame], pd.DataFrame], + name: str = "", + description: str = "", + nodes: List[str] = None, + ): + """ custom adds a custom python preprocessing block to the chain. **WARNING**: This preprocessing operation can be dangerous as it enables the users to run code on other organization's machine. Workflows containing such preprocessing blocks should always be carefully reviewed by the responsible data controllers from each @@ -417,12 +652,25 @@ def custom(self,function: Callable[[pd.DataFrame], pd.DataFrame],name: str = "", Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder - ''' - self.append_to_chain(models.Custom(type=models.PreprocessingOperationType.CUSTOM,name=name,description=description,function=textwrap.dedent(inspect.getsource(function))),nodes) + """ + self.append_to_chain( + models.Custom( + type=models.PreprocessingOperationType.CUSTOM, + name=name, + description=description, + function=get_code(function), + ), + nodes, + ) return self - def gwas_preprocessing(self, genomic_nodes: List[str], clinical_nodes: List[str], sample_cols: List[str]): - """ Add the necessary preprocessing operations for a Genome-Wide Association Study. + def gwas_preprocessing( + self, + genomic_nodes: List[str], + clinical_nodes: List[str], + sample_cols: List[str], + ): + """Add the necessary preprocessing operations for a Genome-Wide Association Study. Args: genomic_nodes (List[str]): list of names of the nodes containing genomic data. @@ -432,17 +680,25 @@ def gwas_preprocessing(self, genomic_nodes: List[str], clinical_nodes: List[str] Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.set_index(columns=['LOCUS'], nodes=genomic_nodes) + self.set_index(columns=["LOCUS"], nodes=genomic_nodes) self.select(columns=sample_cols, nodes=genomic_nodes) - self.extract(field='GT_type', columns=sample_cols, nodes=genomic_nodes) + self.extract(field="GT_type", columns=sample_cols, nodes=genomic_nodes) self.transpose(nodes=genomic_nodes) self.reset_index(nodes=genomic_nodes) - self.rename(mapper={'index': 'ID'}, axis=models.RenameAxis.COLUMNS, nodes=genomic_nodes) - self.rename(mapper={'Sample': 'ID'}, axis=models.RenameAxis.COLUMNS, nodes=clinical_nodes) + self.rename( + mapper={"index": "ID"}, axis=models.RenameAxis.COLUMNS, nodes=genomic_nodes + ) + self.rename( + mapper={"Sample": "ID"}, + axis=models.RenameAxis.COLUMNS, + nodes=clinical_nodes, + ) return self - def create_survival_columns(self, params: SurvivalParameters, nodes: List[str] = None): - """ Add the necessary preprocessing operations for a survival analysis. + def create_survival_columns( + self, params: SurvivalParameters, nodes: List[str] = None + ): + """Add the necessary preprocessing operations for a survival analysis. Args: params (SurvivalParameters): parameters of the survival analysis. @@ -451,11 +707,13 @@ def create_survival_columns(self, params: SurvivalParameters, nodes: List[str] = Returns: self (PreprocessingBuilder): the updated PreprocessingBuilder """ - self.append_to_chain(params.get_preprocessing_op(),nodes) + self.append_to_chain(params.get_preprocessing_op(), nodes) return self - def append_to_chain(self, op: models.PreprocessingOperation, nodes: List[str] = None): - """ Append a preprocessing operation to the global or compound chain. + def append_to_chain( + self, op: models.PreprocessingOperation, nodes: List[str] = None + ): + """Append a preprocessing operation to the global or compound chain. Args: op (models.PreprocessingOperation): the preprocessing operation to append @@ -464,7 +722,7 @@ def append_to_chain(self, op: models.PreprocessingOperation, nodes: List[str] = if nodes is None: self.chain.append(op) else: - assert isinstance(nodes,list) + assert isinstance(nodes, list) for node in nodes: if node not in self.compound_chain.keys(): self.compound_chain.update({node: models.PreprocessingChain([op])}) @@ -478,11 +736,21 @@ def check_validity(self): Check the validity of the preprocessing chains. """ if self.check_chain(self.chain) is True: - warn("Preprocessing chain contains one hot encoding without a subsequent select. This could lead to an error if nodes have different categorical values. \n Chain: " + str(self.chain), stacklevel=2) + warn( + "Preprocessing chain contains one hot encoding without a subsequent select. This could lead to an error if nodes have different categorical values. \n Chain: " + + str(self.chain), + stacklevel=2, + ) for node, node_chain in self.compound_chain.items(): if self.check_chain(node_chain.chain) is True: - warn("Preprocessing chain for node " + node + " contains one hot encoding without a subsequent select. This could lead to an error if nodes have different categorical values. \n Chain: " + str(node_chain), stacklevel=2) + warn( + "Preprocessing chain for node " + + node + + " contains one hot encoding without a subsequent select. This could lead to an error if nodes have different categorical values. \n Chain: " + + str(node_chain), + stacklevel=2, + ) @staticmethod def check_chain(chain: models.PreprocessingChain) -> bool: @@ -498,14 +766,15 @@ def check_chain(chain: models.PreprocessingChain) -> bool: return one_hot_without_select - def get_params(self) -> models.ComputationPreprocessingParameters: res = models.ComputationPreprocessingParameters() self.check_validity() if self.chain != []: res.global_preprocessing = models.PreprocessingChain(self.chain) if self.compound_chain != {}: - compound_params = models.ComputationPreprocessingParametersCompoundPreprocessing() + compound_params = ( + models.ComputationPreprocessingParametersCompoundPreprocessing() + ) compound_params.additional_properties = self.compound_chain res.compound_preprocessing = compound_params if self.output_selection_set: diff --git a/src/tuneinsight/computations/private_search.py b/src/tuneinsight/computations/private_search.py index 9b865a0..6776235 100644 --- a/src/tuneinsight/computations/private_search.py +++ b/src/tuneinsight/computations/private_search.py @@ -3,7 +3,7 @@ from matplotlib.ticker import MaxNLocator from tuneinsight.api.sdk import models -from tuneinsight.api.sdk.types import Response +from tuneinsight.api.sdk.types import Response from tuneinsight.api.sdk.api.api_private_search import get_private_search_database from tuneinsight.client.session import PIRSession from tuneinsight.client.project import Project @@ -13,15 +13,13 @@ class PrivateSearch(ComputationRunner): - """ Private Search computation - - """ + """Private Search computation""" pir_db: models.PrivateSearchDatabase - pir_dataset_id: str = '' + pir_dataset_id: str = "" session: PIRSession - def __init__(self,project: Project, pir_dataset: str): + def __init__(self, project: Project, pir_dataset: str): """ __init__ initializes the private search computation and the corresponding session @@ -37,7 +35,6 @@ def __init__(self,project: Project, pir_dataset: str): self.session = PIRSession(self.client, self.pir_db) self.session.upload_eval_keys() - def query(self, query: str) -> pd.DataFrame: """Perform a private search query @@ -53,12 +50,17 @@ def query(self, query: str) -> pd.DataFrame: pir.pir_search_object_id = self.session.encrypt_query(query) except ValueError: return pd.DataFrame() - dataobjects = super().run_computation(comp=pir,keyswitch=False,decrypt=False) + dataobjects = super().run_computation(comp=pir, keyswitch=False, decrypt=False) result = dataobjects[0].get_raw_data() return self.session.decrypt_response(result) @staticmethod - def filter_result(result: pd.DataFrame, start: str = None, end: str = None, granularity: str = None) -> pd.DataFrame: + def filter_result( + result: pd.DataFrame, + start: str = None, + end: str = None, + granularity: str = None, + ) -> pd.DataFrame: """Filter the query result on dates Args: @@ -77,12 +79,19 @@ def filter_result(result: pd.DataFrame, start: str = None, end: str = None, gran filtered_result = filtered_result.loc[:end] if granularity is not None: filtered_result = filtered_result.reset_index() - filtered_result['Date'] = pd.to_datetime(filtered_result['index']) - filtered_result = filtered_result.resample(granularity, on='Date').sum() + filtered_result["Date"] = pd.to_datetime(filtered_result["index"]) + filtered_result = filtered_result.resample(granularity, on="Date").sum() return filtered_result.transpose() @staticmethod - def plot_result(result: pd.DataFrame, title:str,x_label:str, y_label:str, size:tuple=(8,4),timestamps: bool = False): + def plot_result( + result: pd.DataFrame, + title: str, + x_label: str, + y_label: str, + size: tuple = (8, 4), + timestamps: bool = False, + ): """Plot the private search result Args: @@ -109,7 +118,6 @@ def plot_result(result: pd.DataFrame, title:str,x_label:str, y_label:str, size:t plt.show() - def get_pir_db(self) -> models.PrivateSearchDatabase: """Retrieve the private search database given the client and database id @@ -117,6 +125,10 @@ def get_pir_db(self) -> models.PrivateSearchDatabase: models.PrivateSearchDatabase: the private search database """ self.client.timeout = 30 - response: Response[models.PrivateSearchDatabase] = get_private_search_database.sync_detailed(client=self.client,database_id=self.pir_dataset_id) + response: Response[models.PrivateSearchDatabase] = ( + get_private_search_database.sync_detailed( + client=self.client, database_id=self.pir_dataset_id + ) + ) validate_response(response) return response.parsed diff --git a/src/tuneinsight/computations/queries.py b/src/tuneinsight/computations/queries.py index 7a0e606..78a9968 100644 --- a/src/tuneinsight/computations/queries.py +++ b/src/tuneinsight/computations/queries.py @@ -1,12 +1,14 @@ -from typing import Dict,List +from typing import Dict, List from tuneinsight.api.sdk import models -from tuneinsight.api.sdk.models.computation_data_source_parameters import ComputationDataSourceParameters +from tuneinsight.api.sdk.models.computation_data_source_parameters import ( + ComputationDataSourceParameters, +) from tuneinsight.api.sdk.models.data_source_query import DataSourceQuery -class QueryBuilder: +class QueryBuilder: global_query: DataSourceQuery - compound_query: Dict[str,DataSourceQuery] + compound_query: Dict[str, DataSourceQuery] query_set: bool datasource_id: str @@ -16,7 +18,7 @@ def __init__(self): self.query_set = False self.datasource_id = "" - def _set_query(self,query_type: str,query: str,nodes: List[str] = None): + def _set_query(self, query_type: str, query: str, nodes: List[str] = None): if nodes is None: setattr(self.global_query, query_type, query) else: @@ -26,33 +28,33 @@ def _set_query(self,query_type: str,query: str,nodes: List[str] = None): setattr(self.compound_query[node], query_type, query) self.query_set = True - def _set_query_from_dict(self,query_type: str,query_dict: Dict[str, str]): + def _set_query_from_dict(self, query_type: str, query_dict: Dict[str, str]): for node in query_dict: query = query_dict[node] self._set_query(query_type, query, [node]) - def set_database_query(self,query: str,nodes: List[str] = None): + def set_database_query(self, query: str, nodes: List[str] = None): self._set_query("database_query", query, nodes) - def set_database_query_from_dict(self,query_dict: Dict[str, str]): + def set_database_query_from_dict(self, query_dict: Dict[str, str]): self._set_query_from_dict("database_query", query_dict) - def set_api_request_body(self,request_body: str,nodes: List[str] = None): + def set_api_request_body(self, request_body: str, nodes: List[str] = None): self._set_query("api_request_body", request_body, nodes) - def set_api_request_body_from_dict(self,query_dict: Dict[str, str]): + def set_api_request_body_from_dict(self, query_dict: Dict[str, str]): self._set_query_from_dict("api_request_body", query_dict) - def set_api_path_query(self,query: str,nodes: List[str] = None): + def set_api_path_query(self, query: str, nodes: List[str] = None): self._set_query("api_path_query", query, nodes) - def set_api_path_query_from_dict(self,query_dict: Dict[str, str]): + def set_api_path_query_from_dict(self, query_dict: Dict[str, str]): self._set_query_from_dict("api_path_query", query_dict) - def set_api_json_path(self,json_path: str,nodes: List[str] = None): + def set_api_json_path(self, json_path: str, nodes: List[str] = None): self._set_query("api_json_path", json_path, nodes) - def set_api_json_path_from_dict(self,query_dict: Dict[str, str]): + def set_api_json_path_from_dict(self, query_dict: Dict[str, str]): self._set_query_from_dict("api_json_path", query_dict) def get_parameters(self) -> models.ComputationDataSourceParameters: @@ -65,11 +67,11 @@ def get_parameters(self) -> models.ComputationDataSourceParameters: params.compound_disabled = True return params - def set_parameters(self,params :models.ComputationDataSourceParameters): + def set_parameters(self, params: models.ComputationDataSourceParameters): self.query_set = False - if isinstance(params.data_source_query,DataSourceQuery): + if isinstance(params.data_source_query, DataSourceQuery): self.global_query = params.data_source_query self.query_set = True - if isinstance(params.compound_query,models.DataSourceCompoundQuery): + if isinstance(params.compound_query, models.DataSourceCompoundQuery): self.compound_query = params.compound_query.additional_properties self.query_set = True diff --git a/src/tuneinsight/computations/regression.py b/src/tuneinsight/computations/regression.py index 4b72db1..2de48c5 100644 --- a/src/tuneinsight/computations/regression.py +++ b/src/tuneinsight/computations/regression.py @@ -1,13 +1,15 @@ import time import itertools import uuid -from typing import List,Dict +from typing import List, Dict from typing_extensions import Self import pandas as pd from tuneinsight.api.sdk import models from tuneinsight.api.sdk.models.computation_type import ComputationType from tuneinsight.api.sdk import Client -from tuneinsight.api.sdk.models.encrypted_regression_params import EncryptedRegressionParams +from tuneinsight.api.sdk.models.encrypted_regression_params import ( + EncryptedRegressionParams, +) from tuneinsight.api.sdk.types import UNSET from tuneinsight.client.computations import ComputationRunner from tuneinsight.client.datasource import DataSource @@ -15,22 +17,28 @@ class Regression(ComputationRunner): - """ Regression computation - - """ + """Regression computation""" feature_columns: List[str] label_columns: List[str] model: models.EncryptedRegression - predict_model : models.EncryptedPrediction + predict_model: models.EncryptedPrediction encrypted_model_dataobject_id: str type: models.RegressionType - - def __init__(self, reg_type: models.RegressionType, client:Client = UNSET, project_id:str=""): + def __init__( + self, + reg_type: models.RegressionType, + client: Client = UNSET, + project_id: str = "", + ): self.type = reg_type - self.model = models.EncryptedRegression(type=models.ComputationType.ENCRYPTEDREGRESSION) - self.predict_model = models.EncryptedPrediction(type=ComputationType.ENCRYPTEDPREDICTION) + self.model = models.EncryptedRegression( + type=models.ComputationType.ENCRYPTEDREGRESSION + ) + self.predict_model = models.EncryptedPrediction( + type=ComputationType.ENCRYPTEDPREDICTION + ) self.model.params = EncryptedRegressionParams(type=reg_type, seed=0) super().__init__(client=client, project_id=project_id) self.max_timeout = self.max_timeout * 10 @@ -39,9 +47,17 @@ def __init__(self, reg_type: models.RegressionType, client:Client = UNSET, proje def copy(self): return type(self)(self.type, self.client, self.project_id) - - def fit(self, X: List[str], y: List[str], learning_rate=0.02, network_iteration_count=1, seed=0, elastic=0.85, momentum=0.92) -> Self: - """ Fit the regression model + def fit( + self, + X: List[str], + y: List[str], + learning_rate=0.02, + network_iteration_count=1, + seed=0, + elastic=0.85, + momentum=0.92, + ) -> Self: + """Fit the regression model Args: X (List[str]): Column names of the features @@ -63,15 +79,16 @@ def fit(self, X: List[str], y: List[str], learning_rate=0.02, network_iteration_ self.model.params.elastic_rate = elastic self.model.params.momentum = momentum - dataobjects = super().run_computation(comp=self.model, local = False, keyswitch=False, decrypt=False) + dataobjects = super().run_computation( + comp=self.model, local=False, keyswitch=False, decrypt=False + ) self.encrypted_model_dataobject_id = dataobjects[0].get_id() return self - def predict(self, X: pd.DataFrame) -> models.FloatMatrix: - """ Predict using the model + """Predict using the model Args: X (pd.DataFrame): Test data @@ -83,7 +100,9 @@ def predict(self, X: pd.DataFrame) -> models.FloatMatrix: self.predict_model.model = self.encrypted_model_dataobject_id ds_uid = uuid.uuid4() - ds = DataSource.from_dataframe(client=self.client,dataframe=X, name="predict_data_"+str(ds_uid)) + ds = DataSource.from_dataframe( + client=self.client, dataframe=X, name="predict_data_" + str(ds_uid) + ) do = ds.adapt(models.DataObjectType.TABLE) ds.delete() self.predict_model.data = do.get_id() @@ -92,11 +111,21 @@ def predict(self, X: pd.DataFrame) -> models.FloatMatrix: self.predict_model.only_root_prediction = True # run predict comp - dataobjects = super().run_computation(comp=self.predict_model, local=False, release=True) + dataobjects = super().run_computation( + comp=self.predict_model, local=False, release=True + ) return dataobjects[0].get_float_matrix() - def grid_search(self, feature_cols: List[str], label_cols: List[str], test_X: pd.DataFrame, test_Y:pd.DataFrame, param_dict:dict = None, log:bool = False) -> dict: - """ Performs a grid search on parameters to fine-tune the model + def grid_search( + self, + feature_cols: List[str], + label_cols: List[str], + test_X: pd.DataFrame, + test_Y: pd.DataFrame, + param_dict: dict = None, + log: bool = False, + ) -> dict: + """Performs a grid search on parameters to fine-tune the model Args: feature_cols (List[str]): Column names of the features @@ -110,29 +139,49 @@ def grid_search(self, feature_cols: List[str], label_cols: List[str], test_X: pd dict: dictionary of the combination of parameters with the highest R2 score """ if param_dict is None: - param_dict = {'learning_rate': [0.004, 0.005], 'network_iteration_count':[1, 2], 'elastic':[0.85, 0.98]} + param_dict = { + "learning_rate": [0.004, 0.005], + "network_iteration_count": [1, 2], + "elastic": [0.85, 0.98], + } param_grid = self.generate_param_grid(param_dict) - default_params = {'learning_rate': 0.0045, 'elastic':0.85, 'seed':0, 'momentum':0.92, 'network_iteration_count':1} - - + default_params = { + "learning_rate": 0.0045, + "elastic": 0.85, + "seed": 0, + "momentum": 0.92, + "network_iteration_count": 1, + } start_time = time.time() - best_r2 = float('-inf') - rmse_of_best = float('inf') + best_r2 = float("-inf") + rmse_of_best = float("inf") best_params = default_params for params in param_grid: all_params = dict(default_params | params) if log: print("Parameters:") - for param,val in all_params.items(): + for param, val in all_params.items(): print(" " + param + ": " + str(val)) regression = self.copy() - model = regression.fit(feature_cols,label_cols, learning_rate=all_params['learning_rate'], elastic=all_params['elastic'], seed=all_params['seed'], momentum=all_params['momentum'], network_iteration_count=all_params['network_iteration_count']) + model = regression.fit( + feature_cols, + label_cols, + learning_rate=all_params["learning_rate"], + elastic=all_params["elastic"], + seed=all_params["seed"], + momentum=all_params["momentum"], + network_iteration_count=all_params["network_iteration_count"], + ) results = model.predict(X=test_X) - predictions = [pred for prediction_list in results.predictions for pred in prediction_list] + predictions = [ + pred + for prediction_list in results.predictions + for pred in prediction_list + ] r2_tmp = r2_score(test_Y, predictions) rmse_tmp = rmse(test_Y, predictions) @@ -148,7 +197,7 @@ def grid_search(self, feature_cols: List[str], label_cols: List[str], test_X: pd start_time = time.time() print("Best hyper-parameters:") - for param,val in best_params.items(): + for param, val in best_params.items(): print(" " + param + ": " + str(val)) print("R2 Score: " + str(best_r2)) print("RMSE: " + str(rmse_of_best)) @@ -156,8 +205,8 @@ def grid_search(self, feature_cols: List[str], label_cols: List[str], test_X: pd return best_params @staticmethod - def generate_param_grid(param_dict:Dict) -> List[dict]: - """ Generates a grid of parameter combinations + def generate_param_grid(param_dict: Dict) -> List[dict]: + """Generates a grid of parameter combinations Args: param_dict (dict): dictionary of different parameter values to include in the parameter grid @@ -167,39 +216,52 @@ def generate_param_grid(param_dict:Dict) -> List[dict]: """ keys = param_dict.keys() values = (param_dict[key] for key in keys) - return [dict(zip(keys, combination)) for combination in itertools.product(*values)] + return [ + dict(zip(keys, combination)) for combination in itertools.product(*values) + ] class LinearRegression(Regression): - """ Linear Regression - """ + """Linear Regression""" + type: models.RegressionType = models.RegressionType.LINEAR continuous_labels: bool - def __init__(self, continuous_labels: bool, client:Client = UNSET, project_id:str=""): + def __init__( + self, continuous_labels: bool, client: Client = UNSET, project_id: str = "" + ): super().__init__(reg_type=self.type, client=client, project_id=project_id) self.continuous_labels = continuous_labels - self.model.params.linear = models.EncryptedRegressionParamsLinear(continuous_labels=continuous_labels) + self.model.params.linear = models.EncryptedRegressionParamsLinear( + continuous_labels=continuous_labels + ) def copy(self): return type(self)(self.continuous_labels, self.client, self.project_id) + class LogisticRegression(Regression): - """ Logistic Regression - """ + """Logistic Regression""" + type: models.RegressionType = models.RegressionType.LOGISTIC approximation_params: models.approximation_params.ApproximationParams - def __init__(self, approximation_params:models.approximation_params.ApproximationParams, client:Client = UNSET, project_id:str=""): + def __init__( + self, + approximation_params: models.approximation_params.ApproximationParams, + client: Client = UNSET, + project_id: str = "", + ): super().__init__(reg_type=self.type, client=client, project_id=project_id) self.model.params.approximation_params = approximation_params + class PoissonRegression(Regression): - """Poisson Regression - """ + """Poisson Regression""" + type: models.RegressionType = models.RegressionType.POISSON - def __init__(self, client:Client = UNSET, project_id:str=""): + def __init__(self, client: Client = UNSET, project_id: str = ""): super().__init__(reg_type=self.type, client=client, project_id=project_id) diff --git a/src/tuneinsight/computations/secure_join.py b/src/tuneinsight/computations/secure_join.py index 5e724cc..84d966a 100644 --- a/src/tuneinsight/computations/secure_join.py +++ b/src/tuneinsight/computations/secure_join.py @@ -13,8 +13,10 @@ class SampleExtraction(ComputationRunner): join_id: str = "" - def get_sample(self,sample_size: int = 1,seed: str = "default-seed") -> pd.DataFrame: - """ Extract a sample from the data. + def get_sample( + self, sample_size: int = 1, seed: str = "default-seed" + ) -> pd.DataFrame: + """Extract a sample from the data. Args: sample_size (int, optional): size of the sample as number of rows. Defaults to 1. @@ -32,13 +34,15 @@ def get_sample(self,sample_size: int = 1,seed: str = "default-seed") -> pd.DataF comp.sample_size = sample_size comp.seed = seed comp.join_id = self.join_id - results = super().run_computation(comp=comp,local=False,keyswitch=True,decrypt=True) + results = super().run_computation( + comp=comp, local=False, keyswitch=True, decrypt=True + ) df = results[0].get_dataframe() return df class SecureJoin(ComputationRunner): - """ Perform a distributed join. + """Perform a distributed join. Args: ComputationRunner (ComputationRunner): parent class for running computation through the REST API. @@ -47,29 +51,27 @@ class SecureJoin(ComputationRunner): Exception: if the join_id is not set. """ - join_id: str = "" - - def create(self, target_columns: List[str], join_columns: List[str]): - """ Create a dataset from a distributed join. + """Create a dataset from a distributed join. Args: target_columns (List[str]): column names of target columns join_columns (List[str]): column names to join the data on + """ model = models.DistributedJoin(type=models.ComputationType.DISTRIBUTEDJOIN) model.target_columns = target_columns model.join_columns = join_columns model.project_id = self.project_id - model.missing_patterns = ["","NaN"] - dataobjects = super().run_computation(comp=model,local=False,release=True) + model.missing_patterns = ["", "NaN"] + dataobjects = super().run_computation(comp=model, local=False, release=True) self.join_id = dataobjects[0].get_id() def new_sample_extraction(self) -> SampleExtraction: - """ Create a Sample Extraction computation. + """Create a Sample Extraction computation. Raises: Exception: if the join_id is not set. @@ -79,6 +81,7 @@ def new_sample_extraction(self) -> SampleExtraction: """ if self.join_id == "": raise Exception("join must be created before extracting a sample") + sample_ext = SampleExtraction(client=self.client, project_id=self.project_id) sample_ext.join_id = self.join_id return sample_ext diff --git a/src/tuneinsight/computations/statistical_aggregation.py b/src/tuneinsight/computations/statistical_aggregation.py index 3de39e4..cdfd6fb 100644 --- a/src/tuneinsight/computations/statistical_aggregation.py +++ b/src/tuneinsight/computations/statistical_aggregation.py @@ -1,4 +1,4 @@ -from typing import Callable, Dict,List, Tuple, Union +from typing import Callable, Dict, List, Tuple, Union import pandas as pd from tuneinsight.api.sdk import models @@ -6,50 +6,56 @@ from tuneinsight.api.sdk.types import UNSET from tuneinsight.client.computations import ComputationRunner -class Aggregation(ComputationRunner): +class Aggregation(ComputationRunner): float_precision: int = 2 target_column: str = "" join_id: str = "" values: List[str] = None aggregated_columns: List[str] = [] - count_columns: Dict[str,List[str]] = {} - interval:List[float] = None + keep_non_categorized_items: bool = True + count_columns: Dict[str, List[str]] = {} + interval: List[float] = None @staticmethod def new_model() -> models.StatisticalAggregation: - return models.StatisticalAggregation(type=models.ComputationType.STATISTICALAGGREGATION) - + return models.StatisticalAggregation( + type=models.ComputationType.STATISTICALAGGREGATION + ) - def interval_to_categorical_label(self,interval) -> Callable[[str],str]: + def interval_to_categorical_label(self, interval) -> Callable[[str], str]: def res(cat: str) -> str: - return self.interval_str(int(cat),interval) + return self.interval_str(int(cat), interval) + return res @staticmethod - def value_to_categorical_label() -> Callable[[str],str]: - def res(cat:str) -> str: + def value_to_categorical_label() -> Callable[[str], str]: + def res(cat: str) -> str: return cat + return res - def run(self,comp: models.StatisticalAggregation,local: bool) -> models.FloatMatrix: - dataobjects = super().run_computation(comp=comp,local=local,release=True) + def run( + self, comp: models.StatisticalAggregation, local: bool + ) -> models.FloatMatrix: + dataobjects = super().run_computation(comp=comp, local=local, release=True) return dataobjects[0].get_float_matrix() @staticmethod - def interval_str(interval_index: int,interval: List[float]) -> str: + def interval_str(interval_index: int, interval: List[float]) -> str: res = "-" if interval_index < len(interval): res = res + str(interval[interval_index]) if interval_index > 0: - res = str(interval[interval_index -1]) + res + res = str(interval[interval_index - 1]) + res return res @staticmethod def parse_aggregation(result: float_matrix) -> Dict: vals = result.data[0] totals = {} - for i,col_info in enumerate(result.contextual_info.columns_info): + for i, col_info in enumerate(result.contextual_info.columns_info): if col_info.value_type == models.ColumnInfoValueType.ROWCOUNT: totals["row_count"] = vals[i] else: @@ -57,7 +63,22 @@ def parse_aggregation(result: float_matrix) -> Dict: return totals - def count(self, local: bool = False) -> Union[int,pd.DataFrame]: + @staticmethod + def sanitize_output(df: pd.DataFrame) -> pd.DataFrame: + """ + sanitize_output removes any invalid values (can be due to aggregation overflows) from the output dataset + + Args: + df (pd.DataFrame): the output dataset to sanitize + + Returns: + pd.DataFrame: the sanitized version of the dataset + """ + # Filters any rows that contain negative values (due to overflow) + df = df[~df.select_dtypes(include="number").lt(0).any(axis=1)] + return df + + def count(self, local: bool = False) -> Union[int, pd.DataFrame]: """count returns the number of records Args: @@ -69,7 +90,9 @@ def count(self, local: bool = False) -> Union[int,pd.DataFrame]: columns_to_extract = [] if len(self.count_columns) > 0: - extracted_dict = [vals + ['other ' + k] for k,vals in self.count_columns.items()] + extracted_dict = [ + vals + ["other " + k] for k, vals in self.count_columns.items() + ] flattened = [v for sublist in extracted_dict for v in sublist] columns_to_extract.extend(flattened) if self.values is not None: @@ -83,7 +106,7 @@ def count(self, local: bool = False) -> Union[int,pd.DataFrame]: model = self.new_model() model.include_dataset_length = True - result = self.run(comp=model,local=local) + result = self.run(comp=model, local=local) parsed = self.parse_aggregation(result) @@ -100,24 +123,28 @@ def sum(self, columns: List[str], local: bool = False) -> pd.DataFrame: pd.DataFrame: the sum for each column """ if self.values is not None: - self.aggregated_columns = columns # pylint: disable=W0201 + self.aggregated_columns = columns # pylint: disable=W0201 result = self.group_by_value(local=local) - columns_to_extract = [self.target_column] + ["total " + col for col in columns] + columns_to_extract = [self.target_column] + [ + "total " + col for col in columns + ] return result[columns_to_extract] if self.interval is not None: - self.aggregated_columns = columns # pylint: disable=W0201 + self.aggregated_columns = columns # pylint: disable=W0201 result = self.group_by_interval(local=local) - columns_to_extract = [self.target_column] + ["total " + col for col in columns] + columns_to_extract = [self.target_column] + [ + "total " + col for col in columns + ] return result[columns_to_extract] model = self.new_model() model.aggregation_columns = columns - result = self.run(comp=model,local=local) + result = self.run(comp=model, local=local) parsed = self.parse_aggregation(result) - return pd.DataFrame.from_dict(parsed, orient='index', columns=["Total"]) + return pd.DataFrame.from_dict(parsed, orient="index", columns=["Total"]) def average(self, columns: List[str], local: bool = False) -> pd.DataFrame: """average returns the average value for each given column @@ -131,37 +158,46 @@ def average(self, columns: List[str], local: bool = False) -> pd.DataFrame: """ if self.values is not None: - self.aggregated_columns = columns # pylint: disable=W0201 + self.aggregated_columns = columns # pylint: disable=W0201 result = self.group_by_value(local=local) - columns_to_extract = [self.target_column] + ["average " + col for col in columns] + columns_to_extract = [self.target_column] + [ + "average " + col for col in columns + ] return result[columns_to_extract] if self.interval is not None: - self.aggregated_columns = columns # pylint: disable=W0201 + self.aggregated_columns = columns # pylint: disable=W0201 result = self.group_by_interval(local=local) - columns_to_extract = [self.target_column] + ["average " + col for col in columns] + columns_to_extract = [self.target_column] + [ + "average " + col for col in columns + ] return result[columns_to_extract] model = self.new_model() model.aggregation_columns = columns model.include_dataset_length = True - result = self.run(comp=model,local=local) + result = self.run(comp=model, local=local) parsed = self.parse_aggregation(result) dataset_length = round(parsed["row_count"]) parsed.pop("row_count") cols = [] data = [] - for column_name,total in parsed.items(): + for column_name, total in parsed.items(): cols.append("average " + column_name) if dataset_length != 0: - data.append(round(total/float(dataset_length),self.float_precision)) + data.append(round(total / float(dataset_length), self.float_precision)) else: data.append(0) - return pd.DataFrame(data=[data],columns=cols) - - - def group_by(self, target_col:str = None, values: List[str] = None, interval: List[float] = None, count_columns: Dict[str,List[str]] = {}): # pylint: disable=W0102 + return pd.DataFrame(data=[data], columns=cols) + + def group_by( + self, + target_col: str = None, + values: List[str] = None, + interval: List[float] = None, + count_columns: Dict[str, List[str]] = {}, + ): # pylint: disable=W0102 """group_by sets attributes to perform grouped aggregations Args: @@ -176,12 +212,13 @@ def group_by(self, target_col:str = None, values: List[str] = None, interval: Li self.interval = interval self.count_columns = count_columns - - def group_by_to_dataframe(self,cat_labels: List[str],counts: Dict[str,int],totals: Dict[str,float]) -> pd.DataFrame: + def group_by_to_dataframe( + self, cat_labels: List[str], counts: Dict[str, int], totals: Dict[str, float] + ) -> pd.DataFrame: # Create the data frame columns - cols = [self.target_column,"count"] + cols = [self.target_column, "count"] # categorical data - for col,vals in self.count_columns.items(): + for col, vals in self.count_columns.items(): for v in vals: cols.append(v) cols.append("other " + col) @@ -190,92 +227,119 @@ def group_by_to_dataframe(self,cat_labels: List[str],counts: Dict[str,int],total cols.append("total " + col) cols.append("average " + col) - # create the rows data = [] # iterate over each category for cat in cat_labels: # add category + count count = counts[cat] - tmp = [cat,count] + tmp = [cat, count] # fetch the categorical counts - for col,vals in self.count_columns.items(): + for col, vals in self.count_columns.items(): for v in vals: if count != 0: - tmp.append(round(counts[cat + col + v] / count * 100,self.float_precision)) + tmp.append( + round( + counts[cat + col + v] / count * 100, + self.float_precision, + ) + ) else: tmp.append(0) - tmp.append(round(counts[cat + col + "other"] / count * 100,self.float_precision)) + tmp.append( + round( + counts[cat + col + "other"] / count * 100, self.float_precision + ) + ) # fetch the numerical aggregations for col in self.aggregated_columns: tot = totals[cat + col] - tmp.append(round(tot,self.float_precision)) + tmp.append(round(tot, self.float_precision)) if count != 0: - tmp.append(round(tot / float(count),self.float_precision)) + tmp.append(round(tot / float(count), self.float_precision)) else: tmp.append(0) data.append(tmp) - - return pd.DataFrame(data=data,columns=cols) + result = pd.DataFrame(data=data, columns=cols) + return self.sanitize_output(result) @staticmethod - def process_group_by_columns(column_infos: List[models.ColumnInfo], - vals: List[float],cat_to_label: Callable[[str],str]) -> Tuple[Dict[str,int],Dict[str,float],List[str]]: + def process_group_by_columns( + column_infos: List[models.ColumnInfo], + vals: List[float], + cat_to_label: Callable[[str], str], + ) -> Tuple[Dict[str, int], Dict[str, float], List[str]]: counts = {} totals = {} categories = {} - for i,col_info in enumerate(column_infos): + for i, col_info in enumerate(column_infos): cat_label = cat_to_label(col_info.group_info.category) categories[cat_label] = True if col_info.value_type == models.ColumnInfoValueType.ROWCOUNT: if col_info.origin_column == UNSET: counts[cat_label] = int(vals[i]) else: - counts[cat_label + col_info.origin_column + col_info.origin_value] = int(vals[i]) + counts[ + cat_label + col_info.origin_column + col_info.origin_value + ] = int(vals[i]) else: totals[cat_label + col_info.origin_column] = vals[i] - return counts,totals,categories.keys() - - + return counts, totals, categories.keys() - def group_by_value(self,local: bool = False) -> pd.DataFrame: + def group_by_value(self, local: bool = False) -> pd.DataFrame: # create groupBy Value model model = self.new_model() - cc :List[models.CategoricalColumn] = [] - for col,vals in self.count_columns.items(): - cc.append(models.CategoricalColumn(name=col,values=vals)) - bin_operation: models.BinningOperation = models.BinningOperation(aggregated_columns=self.aggregated_columns, - categories=self.values, - group_by_type=models.GroupByType.CATEGORY, - target_column=self.target_column, - count_columns=cc) + cc: List[models.CategoricalColumn] = [] + for col, vals in self.count_columns.items(): + cc.append(models.CategoricalColumn(name=col, values=vals)) + bin_operation: models.BinningOperation = models.BinningOperation( + aggregated_columns=self.aggregated_columns, + categories=self.values, + group_by_type=models.GroupByType.CATEGORY, + target_column=self.target_column, + keep_non_categorized_items=self.keep_non_categorized_items, + count_columns=cc, + ) model.binning_operations = [bin_operation] # Run the computation - result = self.run(comp=model,local=local) + result = self.run(comp=model, local=local) vals = result.data[0] # Process results - counts,totals,categories = self.process_group_by_columns(result.contextual_info.columns_info,result.data[0],self.value_to_categorical_label()) - return self.group_by_to_dataframe(cat_labels=categories,counts=counts,totals=totals) - - - def group_by_interval(self,local: bool = False) -> pd.DataFrame: - + counts, totals, categories = self.process_group_by_columns( + result.contextual_info.columns_info, + result.data[0], + self.value_to_categorical_label(), + ) + return self.group_by_to_dataframe( + cat_labels=categories, counts=counts, totals=totals + ) + + def group_by_interval(self, local: bool = False) -> pd.DataFrame: model = self.new_model() - cc :List[models.CategoricalColumn] = [] - for col,vals in self.count_columns.items(): - cc.append(models.CategoricalColumn(name=col,values=vals)) - bin_operation: models.BinningOperation = models.BinningOperation(aggregated_columns=self.aggregated_columns, - range_values=self.interval, - group_by_type=models.GroupByType.RANGE, - target_column=self.target_column, - count_columns=cc) + cc: List[models.CategoricalColumn] = [] + for col, vals in self.count_columns.items(): + cc.append(models.CategoricalColumn(name=col, values=vals)) + bin_operation: models.BinningOperation = models.BinningOperation( + aggregated_columns=self.aggregated_columns, + range_values=self.interval, + group_by_type=models.GroupByType.RANGE, + target_column=self.target_column, + keep_non_categorized_items=self.keep_non_categorized_items, + count_columns=cc, + ) model.binning_operations = [bin_operation] - result = self.run(comp=model,local=local) + result = self.run(comp=model, local=local) vals = result.data[0] - counts,totals,categories = self.process_group_by_columns(result.contextual_info.columns_info,result.data[0],self.interval_to_categorical_label(self.interval)) - return self.group_by_to_dataframe(cat_labels=categories,counts=counts,totals=totals) + counts, totals, categories = self.process_group_by_columns( + result.contextual_info.columns_info, + result.data[0], + self.interval_to_categorical_label(self.interval), + ) + return self.group_by_to_dataframe( + cat_labels=categories, counts=counts, totals=totals + ) diff --git a/src/tuneinsight/computations/stats.py b/src/tuneinsight/computations/stats.py index db7cec7..66344b1 100644 --- a/src/tuneinsight/computations/stats.py +++ b/src/tuneinsight/computations/stats.py @@ -1,4 +1,4 @@ -from typing import List,Dict,Any +from typing import List, Dict, Any import math import pandas as pd import matplotlib.pyplot as plt @@ -8,26 +8,33 @@ from tuneinsight.utils.plots import style_title, style_ylabel, add_ti_branding - class Statistics: - results: List[models.StatisticResult] - def __init__(self,results: List[models.StatisticResult]): + def __init__(self, results: List[models.StatisticResult]): self.results = results def as_table(self) -> pd.DataFrame: - cols = ["name","mean","variance","min","median","max","IQR"] + cols = ["name", "mean", "variance", "min", "median", "max", "IQR"] data = [] for res in self.results: - data.append([res.name,res.mean,res.variance,res.min_,res.median,res.max_,res.iqr]) - return pd.DataFrame(data=data,columns=cols) - - - def plot(self,metric: str = "", local=False): + data.append( + [ + res.name, + res.mean, + res.variance, + res.min_, + res.median, + res.max_, + res.iqr, + ] + ) + return pd.DataFrame(data=data, columns=cols) + + def plot(self, metric: str = "", local=False): plt.style.use("bmh") boxes = [] - fig, ax = plt.subplots(1, 2,sharey=True) + fig, ax = plt.subplots(1, 2, sharey=True) names = [] means = [] deviations = [] @@ -37,55 +44,79 @@ def plot(self,metric: str = "", local=False): c = "#DE5F5A" for res in self.results: tmp = res.name.split() - var_name = tmp[len(tmp) -1] - boxes.append({ - 'label' : var_name, - 'whislo': res.min_, # Bottom whisker position - 'q1' : res.quantiles[1], # First quartile (25th percentile) - 'med' : res.median, # Median (50th percentile) - 'q3' : res.quantiles[3], # Third quartile (75th percentile) - 'whishi': res.max_, # Top whisker position - 'fliers': [] # Outliers - }) + var_name = tmp[len(tmp) - 1] + boxes.append( + { + "label": var_name, + "whislo": res.min_, # Bottom whisker position + "q1": res.quantiles[1], # First quartile (25th percentile) + "med": res.median, # Median (50th percentile) + "q3": res.quantiles[3], # Third quartile (75th percentile) + "whishi": res.max_, # Top whisker position + "fliers": [], # Outliers + } + ) names.append(var_name) means.append(res.mean) deviations.append(math.sqrt(res.variance)) - ax[0].errorbar(names,means,deviations,linestyle='None', marker='o',color=c) + ax[0].errorbar(names, means, deviations, linestyle="None", marker="o", color=c) ax[1].bxp(boxes, showfliers=False, medianprops={"color": c}) style_title(ax[0], title="Mean & Standard Deviation") style_title(ax[1], title="Quantiles") style_ylabel(ax[0], y_label=metric) - add_ti_branding(ax[0], ha='right', local=local) + add_ti_branding(ax[0], ha="right", local=local) plt.show() - - class DatasetStatistics(ComputationRunner): + variables: Dict[str, models.StatisticDefinition] = {} - variables: Dict[str,models.StatisticDefinition] = {} - - - def create_subgroups(self,variable_name:str,column: str,values: List[str],numerical=False): + def create_subgroups( + self, variable_name: str, column: str, values: List[str], numerical=False + ): if variable_name not in self.variables: - raise Exception(f'no such variable: {variable_name}') + raise Exception(f"no such variable: {variable_name}") variable = self.variables[variable_name] for v in values: new_var_name = str(column) + "=" + str(v) - self.new_variable(name=new_var_name,variable=variable.variable,min_bound=variable.min_bound,max_bound=variable.max_bound) - self.set_filter(variable_name=new_var_name,column=column,comparator=models.ComparisonType.EQUAL,value=v,numerical=numerical) - - - def set_filter(self,variable_name: str,column: str,comparator:models.ComparisonType, value:Any, numerical:bool = True): + self.new_variable( + name=new_var_name, + variable=variable.variable, + min_bound=variable.min_bound, + max_bound=variable.max_bound, + ) + self.set_filter( + variable_name=new_var_name, + column=column, + comparator=models.ComparisonType.EQUAL, + value=v, + numerical=numerical, + ) + + def set_filter( + self, + variable_name: str, + column: str, + comparator: models.ComparisonType, + value: Any, + numerical: bool = True, + ): if variable_name not in self.variables: - raise Exception(f'no such variable: {variable_name}') - f = models.Filter(type=models.PreprocessingOperationType.FILTER, col_name=column, comparator=comparator, value=str(value), numerical=numerical) + raise Exception(f"no such variable: {variable_name}") + f = models.Filter( + type=models.PreprocessingOperationType.FILTER, + col_name=column, + comparator=comparator, + value=str(value), + numerical=numerical, + ) self.variables[variable_name].filter_ = f - - def new_variable(self,name: str,variable: str,min_bound: float = 0.0,max_bound: float = 200.0): + def new_variable( + self, name: str, variable: str, min_bound: float = 0.0, max_bound: float = 200.0 + ): """ new_variable adds a new variable for the statistics computation. @@ -95,24 +126,27 @@ def new_variable(self,name: str,variable: str,min_bound: float = 0.0,max_bound: min_bound (float, optional): The minimum bound for the variable. Defaults to 0.0. max_bound (float, optional): The maximum bound for the variable. Defaults to 200.0. """ - self.variables[name] = models.StatisticDefinition(name=name,variable=variable,min_bound=min_bound,max_bound=max_bound,quantiles_k_value=1) - - + self.variables[name] = models.StatisticDefinition( + name=name, + variable=variable, + min_bound=min_bound, + max_bound=max_bound, + quantiles_k_value=1, + ) def get_model(self) -> models.DatasetStatistics: - ''' + """ get_model returns the api model definition of this computation Returns: models.DatasetStatistics: the computation definition - ''' + """ model = models.DatasetStatistics(type=models.ComputationType.DATASETSTATISTICS) model.statistics = list(self.variables.values()) model.project_id = self.project_id return model - - def compute(self,local: bool=False) -> Statistics: + def compute(self, local: bool = False) -> Statistics: """ Computes the statistics for the variables added to the computation. @@ -126,12 +160,11 @@ def compute(self,local: bool=False) -> Statistics: raise Exception("at least one variable must be added to the computation") model = self.get_model() self.max_timeout = 30 * time.minute - results = super().run_computation(comp=model,local=local,release=True) + results = super().run_computation(comp=model, local=local, release=True) return Statistics(results[0].get_stats().results) - def display_workflow(self): - ''' + """ display_workflow displays a documentation of the computation workflow - ''' + """ return super().display_documentation(self.get_model()) diff --git a/src/tuneinsight/computations/survival.py b/src/tuneinsight/computations/survival.py index d54934c..b8f8565 100644 --- a/src/tuneinsight/computations/survival.py +++ b/src/tuneinsight/computations/survival.py @@ -1,26 +1,26 @@ -from typing import List,Dict +from typing import List, Dict import pandas as pd import matplotlib.pyplot as plt from tuneinsight.api.sdk import models from tuneinsight.utils.plots import style_plot from tuneinsight.api.sdk.types import UNSET + def at_risk_column(i): - return 'risk_' + str(int(i)) + return "risk_" + str(int(i)) + def event_column(i): - return 'event_' + str(int(i)) + return "event_" + str(int(i)) -def get_survival_prob_at(previous,num_at_risk,num_events): +def get_survival_prob_at(previous, num_at_risk, num_events): if num_at_risk == 0: return previous return previous * (1.0 - float(num_events) / float(num_at_risk)) class SurvivalParameters: - - duration_col: str event_col: str event_val: str @@ -30,7 +30,17 @@ class SurvivalParameters: unit_value: int num_frames: int - def __init__(self, duration_col: str = UNSET, event_col: str = UNSET, event_val: str = UNSET, start_event: str = UNSET, end_event: str = UNSET, num_frames: int = UNSET, unit: models.TimeUnit = models.TimeUnit.WEEKS,unit_value: int = 1): + def __init__( + self, + duration_col: str = UNSET, + event_col: str = UNSET, + event_val: str = UNSET, + start_event: str = UNSET, + end_event: str = UNSET, + num_frames: int = UNSET, + unit: models.TimeUnit = models.TimeUnit.WEEKS, + unit_value: int = 1, + ): self.duration_col = duration_col self.event_col = event_col self.event_val = event_val @@ -40,7 +50,6 @@ def __init__(self, duration_col: str = UNSET, event_col: str = UNSET, event_val: self.unit = unit self.unit_value = unit_value - def _resolve_duration_column(self) -> str: if self.duration_col is not UNSET: return self.duration_col @@ -48,36 +57,45 @@ def _resolve_duration_column(self) -> str: return str(self.unit) return str(models.TimeUnit.WEEKS) - - def compute_survival(self,aggregation_output: pd.DataFrame) -> pd.DataFrame: - + def compute_survival(self, aggregation_output: pd.DataFrame) -> pd.DataFrame: aggregation_output = aggregation_output.round(0) - tmp = pd.DataFrame(data=[aggregation_output["Total"].to_list()],columns=aggregation_output["Column"]) + tmp = pd.DataFrame( + data=[aggregation_output["Total"].to_list()], + columns=aggregation_output["Column"], + ) final = pd.DataFrame() duration_col = self._resolve_duration_column() final[duration_col] = range(self.num_frames) - final['n_at_risk'] = final.apply(lambda x: tmp[at_risk_column(x[duration_col])],axis=1) - final['n_events'] = final.apply(lambda x: tmp[event_column(x[duration_col])],axis=1) + final["n_at_risk"] = final.apply( + lambda x: tmp[at_risk_column(x[duration_col])], axis=1 + ) + final["n_events"] = final.apply( + lambda x: tmp[event_column(x[duration_col])], axis=1 + ) curr_prob = 1 survival_probabilities = [] for i in range(self.num_frames): - curr_prob = get_survival_prob_at(curr_prob,final.loc[i,"n_at_risk"],final.loc[i,"n_events"]) + curr_prob = get_survival_prob_at( + curr_prob, final.loc[i, "n_at_risk"], final.loc[i, "n_events"] + ) survival_probabilities.append(curr_prob) - final['survival_probability'] = survival_probabilities + final["survival_probability"] = survival_probabilities return final def get_preprocessing_op(self) -> models.PreprocessingOperation: - interval = models.Duration(unit=self.unit,value=self.unit_value) - return models.Survival(models.PreprocessingOperationType.SURVIVAL, - duration_col=self.duration_col, - event_col=self.event_col, - event_val=self.event_val, - start_event=self.start_event, - end_event=self.end_event, - interval=interval, - num_frames=self.num_frames) + interval = models.Duration(unit=self.unit, value=self.unit_value) + return models.Survival( + models.PreprocessingOperationType.SURVIVAL, + duration_col=self.duration_col, + event_col=self.event_col, + event_val=self.event_val, + start_event=self.start_event, + end_event=self.end_event, + interval=interval, + num_frames=self.num_frames, + ) def get_target_columns(self) -> List[str]: res = [] @@ -86,7 +104,13 @@ def get_target_columns(self) -> List[str]: res.append(event_column(i)) return res - def plot_survivals(self, results: Dict[str, pd.DataFrame], size:tuple=(8,4), duration_col: str = None, title="Survival curve"): + def plot_survivals( + self, + results: Dict[str, pd.DataFrame], + size: tuple = (8, 4), + duration_col: str = None, + title="Survival curve", + ): if duration_col is None: duration_col = self._resolve_duration_column() plt.style.use("bmh") @@ -96,7 +120,7 @@ def plot_survivals(self, results: Dict[str, pd.DataFrame], size:tuple=(8,4), dur for label, df in results.items(): x = df[duration_col] - y = df['survival_probability'] + y = df["survival_probability"] ax.step(x, y, linewidth=2.5, label=label) @@ -106,7 +130,13 @@ def plot_survivals(self, results: Dict[str, pd.DataFrame], size:tuple=(8,4), dur plt.show() - def plot_survival(self, df: pd.DataFrame, size:tuple=(8,4), duration_col: str = None, title="Survival curve"): + def plot_survival( + self, + df: pd.DataFrame, + size: tuple = (8, 4), + duration_col: str = None, + title="Survival curve", + ): if duration_col is None: duration_col = self.duration_col @@ -115,13 +145,11 @@ def plot_survival(self, df: pd.DataFrame, size:tuple=(8,4), duration_col: str = # plot fig, ax = plt.subplots() - x = df[duration_col] - y = df['survival_probability'] + y = df["survival_probability"] ax.step(x, y, linewidth=2.5, color="#DE5F5A") - style_plot(ax, fig, title, duration_col, "Survival Probability", size=size) plt.show() diff --git a/src/tuneinsight/computations/survival_aggregation.py b/src/tuneinsight/computations/survival_aggregation.py index eb78885..3f56398 100644 --- a/src/tuneinsight/computations/survival_aggregation.py +++ b/src/tuneinsight/computations/survival_aggregation.py @@ -1,4 +1,4 @@ -from typing import List,Dict +from typing import List, Dict import pandas as pd from tuneinsight.client.computations import ComputationRunner from tuneinsight.computations.survival import SurvivalParameters @@ -6,57 +6,83 @@ from tuneinsight.api.sdk import Client from tuneinsight.api.sdk.types import UNSET -class SurvivalAggregation(ComputationRunner): - +class SurvivalAggregation(ComputationRunner): subgroups: List[models.SurvivalAggregationSubgroupsItem] matching_organization: str secure_matching: bool matching_columns: List[models.MatchingColumn] - def __init__(self, project_id:str = "", client:Client = UNSET): + def __init__(self, project_id: str = "", client: Client = UNSET): self.subgroups = [] self.secure_matching = False self.matching_organization = "" self.matching_columns = [] - super().__init__(project_id,client) + super().__init__(project_id, client) - def add_categories(self,column:str,values: List[str]): + def add_categories(self, column: str, values: List[str]): for v in values: self.add_subgroup(v, column, models.ComparisonType.EQUAL, v) - def add_subgroup(self,name: str,target_column: str, comparator:models.ComparisonType, value:str, numerical:bool = False): - filter_operation = models.Filter(type=models.PreprocessingOperationType.FILTER, col_name=target_column, comparator=comparator, value=value, numerical=numerical) - item = models.SurvivalAggregationSubgroupsItem(filter_=filter_operation,name=name) + def add_subgroup( + self, + name: str, + target_column: str, + comparator: models.ComparisonType, + value: str, + numerical: bool = False, + ): + filter_operation = models.Filter( + type=models.PreprocessingOperationType.FILTER, + col_name=target_column, + comparator=comparator, + value=value, + numerical=numerical, + ) + item = models.SurvivalAggregationSubgroupsItem( + filter_=filter_operation, name=name + ) self.subgroups.append(item) - def set_matching(self,matching_organization: str, matching_columns: List[str],fuzzy_matching: bool = False): + def set_matching( + self, + matching_organization: str, + matching_columns: List[str], + fuzzy_matching: bool = False, + ): self.secure_matching = True self.matching_columns = [] self.matching_organization = matching_organization for c in matching_columns: - self.matching_columns.append(models.MatchingColumn(name=c,fuzzy=fuzzy_matching)) - + self.matching_columns.append( + models.MatchingColumn(name=c, fuzzy=fuzzy_matching) + ) - def compute_survival(self,survival_parameters: SurvivalParameters) -> Dict[str,pd.DataFrame]: + def compute_survival( + self, survival_parameters: SurvivalParameters + ) -> Dict[str, pd.DataFrame]: # Set parameters and run computation - model = models.SurvivalAggregation(type=models.ComputationType.SURVIVALAGGREGATION) + model = models.SurvivalAggregation( + type=models.ComputationType.SURVIVALAGGREGATION + ) model.subgroups = self.subgroups model.secure_matching = self.secure_matching model.matching_columns = self.matching_columns model.matching_organization = self.matching_organization model.project_id = self.project_id model.survival_parameters = survival_parameters.get_preprocessing_op() - dataobjects = super().run_computation(comp=model,local=False,release=True) + dataobjects = super().run_computation(comp=model, local=False, release=True) # Compute Mapping for survival results - fm = dataobjects[0].get_float_matrix() + fm = dataobjects[0].get_float_matrix() if len(fm.data) != len(self.subgroups) + 1: - raise Exception(f"result dimensions mismatch, expected {len(self.subgroups) + 1} rows, got {len(fm.data)}") + raise Exception( + f"result dimensions mismatch, expected {len(self.subgroups) + 1} rows, got {len(fm.data)}" + ) result_mapping = {} - for i,row in enumerate(fm.data): + for i, row in enumerate(fm.data): subgroup_name = "all" if i > 0: - subgroup_name = self.subgroups[i-1].name - df = pd.DataFrame({'Column': fm.columns,'Total': row}) + subgroup_name = self.subgroups[i - 1].name + df = pd.DataFrame({"Column": fm.columns, "Total": row}) result_mapping[subgroup_name] = survival_parameters.compute_survival(df) return result_mapping diff --git a/src/tuneinsight/computations/types.py b/src/tuneinsight/computations/types.py index 83e0da5..0a9763c 100644 --- a/src/tuneinsight/computations/types.py +++ b/src/tuneinsight/computations/types.py @@ -1,15 +1,16 @@ from enum import Enum from tuneinsight.api.sdk.models import ComputationType as ct + class Type(Enum): - ''' + """ Type Enumeration of all of the exposed distributed computations types - ''' + """ AGGREGATION = ct.ENCRYPTEDAGGREGATION REGRESSION = ct.ENCRYPTEDREGRESSION PREDICTION = ct.ENCRYPTEDPREDICTION - INTERSECTION = ct.SETINTERSECTION + INTERSECTION = ct.SETINTERSECTION STAT_AGGREGATION = ct.STATISTICALAGGREGATION JOIN = ct.DISTRIBUTEDJOIN SAMPLE_EXTRACTION = ct.SAMPLEEXTRACTION @@ -21,16 +22,15 @@ def to_computation_type(self) -> ct: return ct(self.value) - displayed_types = { - Type.AGGREGATION : "Aggregation", + Type.AGGREGATION: "Aggregation", Type.REGRESSION: "Regression", Type.PREDICTION: "Prediction", - Type.INTERSECTION: 'Private Set Intersection', - Type.STAT_AGGREGATION: 'Statistical Aggregation', + Type.INTERSECTION: "Private Set Intersection", + Type.STAT_AGGREGATION: "Statistical Aggregation", Type.JOIN: "Secure Join", Type.SAMPLE_EXTRACTION: "Sample Extraction", - Type.GWAS: 'GWAS', + Type.GWAS: "GWAS", Type.SURVIVAL_ANALYSIS: "Survival Analysis", Type.DATASET_STATISTICS: "Secure Quantiles Computation", } diff --git a/src/tuneinsight/cryptolib/cryptolib-linux_x86_64.so b/src/tuneinsight/cryptolib/cryptolib-linux_x86_64.so index 4897b3c..e1bc113 100644 Binary files a/src/tuneinsight/cryptolib/cryptolib-linux_x86_64.so and b/src/tuneinsight/cryptolib/cryptolib-linux_x86_64.so differ diff --git a/src/tuneinsight/cryptolib/cryptolib.py b/src/tuneinsight/cryptolib/cryptolib.py index 3c3ad97..918b1c1 100644 --- a/src/tuneinsight/cryptolib/cryptolib.py +++ b/src/tuneinsight/cryptolib/cryptolib.py @@ -11,7 +11,9 @@ os = platform.system().lower() cryptolib_path = cwd / f"cryptolib-{os}_{arch}.so" if not exists(cryptolib_path): - raise FileNotFoundError("Could not find the cryptolib library. Your platform might not be supported.") + raise FileNotFoundError( + "Could not find the cryptolib library. Your platform might not be supported." + ) so = ctypes.cdll.LoadLibrary(cryptolib_path) @@ -25,7 +27,10 @@ def go_error() -> Exception: return ValueError(error_message) return Exception(error_message) -def new_hefloat_operator_from_b64_hefloat_parameters(hefloat_parameters_b64: str) -> bytes: + +def new_hefloat_operator_from_b64_hefloat_parameters( + hefloat_parameters_b64: str, +) -> bytes: """Instantiates a new HeFloat Operator from a base64 encoded hefloat parameters. Args: @@ -41,6 +46,7 @@ def new_hefloat_operator_from_b64_hefloat_parameters(hefloat_parameters_b64: str raise go_error() return hefloat_operator_id + def new_hefloat_operator_from_b64_scheme_context(scheme_context_b64: str) -> bytes: """Instantiates a new HEFloat Operator from a base64 encoded scheme.Context. @@ -57,8 +63,9 @@ def new_hefloat_operator_from_b64_scheme_context(scheme_context_b64: str) -> byt raise go_error() return hefloat_operator_id + def get_relin_key_bytes(hefloat_operator_id: bytes) -> bytes: - ''' + """ get_relin_key_bytes retrieves the relinearization key bytes from the cryptosystem Args: @@ -66,14 +73,17 @@ def get_relin_key_bytes(hefloat_operator_id: bytes) -> bytes: Returns: bytes: the relinearization key bytes - ''' + """ get_relin_key = so.GetRelinearizationKeyBytes get_relin_key.restype = ctypes.c_void_p res = get_relin_key(hefloat_operator_id) return _handle_bytes_result(res) -def encrypt_prediction_dataset(hefloat_operator_id:bytes,csv_bytes: bytes,b64_params: str,remove_header: bool) -> bytes: - ''' + +def encrypt_prediction_dataset( + hefloat_operator_id: bytes, csv_bytes: bytes, b64_params: str, remove_header: bool +) -> bytes: + """ encrypt_prediction_dataset encrypts a provided dataset in prediction format using the secret key of the cryptosystem Args: @@ -84,15 +94,21 @@ def encrypt_prediction_dataset(hefloat_operator_id:bytes,csv_bytes: bytes,b64_pa Returns: bytes: the encrypted version of the dataset that can be used as input to a encrypted prediction computation - ''' + """ encrypt_pred = so.EncryptPredictionDataset encrypt_pred.restype = ctypes.c_void_p - res = encrypt_pred(hefloat_operator_id,csv_bytes,len(csv_bytes),b64_params.encode(),ctypes.c_bool(remove_header)) + res = encrypt_pred( + hefloat_operator_id, + csv_bytes, + len(csv_bytes), + b64_params.encode(), + ctypes.c_bool(remove_header), + ) return _handle_bytes_result(res) -def decrypt_prediction(hefloat_operator_id: bytes,ct: bytes) -> bytes: - ''' +def decrypt_prediction(hefloat_operator_id: bytes, ct: bytes) -> bytes: + """ decrypt_prediction decrypts the encrypted prediction ciphertext Args: @@ -101,12 +117,13 @@ def decrypt_prediction(hefloat_operator_id: bytes,ct: bytes) -> bytes: Returns: bytes: the decrypted predicted values as a csv in byte format - ''' + """ decrypt_pred = so.DecryptPredictionResult decrypt_pred.restype = ctypes.c_void_p - res = decrypt_pred(hefloat_operator_id,ct,len(ct)) + res = decrypt_pred(hefloat_operator_id, ct, len(ct)) return _handle_bytes_result(res) + def key_generation(hefloat_operator_id: bytes) -> bytes: """Generate a key for a given cryptosystem. @@ -123,6 +140,7 @@ def key_generation(hefloat_operator_id: bytes) -> bytes: raise go_error() return key_response + def relinearization_key_generation(hefloat_operator_id: bytes) -> bytes: """Generate a key for a given cryptosystem. @@ -194,9 +212,10 @@ def encrypt_matrix(hefloat_operator_id: bytes, csv_string: bytes) -> bytes: ciphertext = ciphertext[8:] return ciphertext -def decrypt_dataframe(hefloat_operator_id: bytes, - dataframe_ciphertext: bytes, - headers: List[str] = None) -> pd.DataFrame: + +def decrypt_dataframe( + hefloat_operator_id: bytes, dataframe_ciphertext: bytes, headers: List[str] = None +) -> pd.DataFrame: """Turn an encrypted pandas dataframe into a new decrypted pandas dataframe. Indices are recovered, column names can optionally be provided by the user. @@ -211,11 +230,11 @@ def decrypt_dataframe(hefloat_operator_id: bytes, plaintext_csv_bytes = decrypt_csv(hefloat_operator_id, dataframe_ciphertext) if plaintext_csv_bytes is None: raise go_error() - plaintext_csv = plaintext_csv_bytes.decode('utf8') + plaintext_csv = plaintext_csv_bytes.decode("utf8") plaintext_dataframe = pd.DataFrame( - [row.split(',') for row in plaintext_csv.split('\n')]) - plaintext_dataframe = plaintext_dataframe.set_index( - plaintext_dataframe.columns[0]) + [row.split(",") for row in plaintext_csv.split("\n")] + ) + plaintext_dataframe = plaintext_dataframe.set_index(plaintext_dataframe.columns[0]) plaintext_dataframe.index.name = None if headers is not None: if len(headers) != len(plaintext_dataframe.columns): @@ -240,7 +259,8 @@ def decrypt_csv(hefloat_operator_id: bytes, csv_ciphertext: bytes) -> bytes: decrypt_cipher_table.restype = ctypes.c_char_p ctxt_length = len(csv_ciphertext) csv_plaintext = decrypt_cipher_table( - hefloat_operator_id, csv_ciphertext, ctxt_length) + hefloat_operator_id, csv_ciphertext, ctxt_length + ) if csv_plaintext is None: raise go_error() return csv_plaintext @@ -273,9 +293,10 @@ def test_polynomial_evaluation_hefloat_params() -> str: raise go_error() return cryptoparameters_b64 -def encrypted_addition(hefloat_operator_id: bytes, - number1: bytes, - number2: bytes) -> bytes: + +def encrypted_addition( + hefloat_operator_id: bytes, number1: bytes, number2: bytes +) -> bytes: add = so.Add add.restype = ctypes.c_void_p number1_size = len(number1) @@ -288,7 +309,10 @@ def encrypted_addition(hefloat_operator_id: bytes, ciphertext = ciphertext[8:] return ciphertext -def encrypted_multiplication(hefloat_operator_id: bytes, number1: bytes, number2: bytes) -> bytes: + +def encrypted_multiplication( + hefloat_operator_id: bytes, number1: bytes, number2: bytes +) -> bytes: multiply = so.Multiply multiply.restype = ctypes.c_void_p number1_size = len(number1) @@ -301,17 +325,26 @@ def encrypted_multiplication(hefloat_operator_id: bytes, number1: bytes, number2 ciphertext = ciphertext[8:] return ciphertext -def encrypted_polynomial_evaluation(hefloat_operator_id: bytes, polynomial_coefficients: list[int, float], number: bytes) -> bytes: + +def encrypted_polynomial_evaluation( + hefloat_operator_id: bytes, polynomial_coefficients: list[int, float], number: bytes +) -> bytes: polynomial_evaluation = so.PolynomialEvaluation polynomial_evaluation.restype = ctypes.c_void_p number_size = len(number) polynomial_coefficients = [str(i) for i in polynomial_coefficients] polynomial_coefficients = ",".join(polynomial_coefficients) - polynomial_coefficients = polynomial_coefficients.encode('UTF-8') + polynomial_coefficients = polynomial_coefficients.encode("UTF-8") polynomial_size = len(polynomial_coefficients) - result = polynomial_evaluation(hefloat_operator_id, polynomial_coefficients, polynomial_size, number, number_size) + result = polynomial_evaluation( + hefloat_operator_id, + polynomial_coefficients, + polynomial_size, + number, + number_size, + ) if result is None: raise go_error() res_length = int.from_bytes(ctypes.string_at(result, 8), "little") @@ -319,10 +352,11 @@ def encrypted_polynomial_evaluation(hefloat_operator_id: bytes, polynomial_coeff ciphertext = ciphertext[8:] return ciphertext + def encrypt_number(hefloat_operator_id: bytes, number1: int) -> bytes: encrypt = so.EncryptNumber encrypt.restype = ctypes.c_void_p - byte_number = str(number1).encode('UTF-8') + byte_number = str(number1).encode("UTF-8") result = encrypt(hefloat_operator_id, byte_number) if result is None: raise go_error() @@ -331,6 +365,7 @@ def encrypt_number(hefloat_operator_id: bytes, number1: int) -> bytes: ciphertext = ciphertext[8:] return ciphertext + def decrypt_number(hefloat_operator_id: bytes, encrypted_number: bytes) -> int: decrypt = so.DecryptNumber decrypt.restype = ctypes.c_void_p @@ -341,6 +376,7 @@ def decrypt_number(hefloat_operator_id: bytes, encrypted_number: bytes) -> int: result_length = int.from_bytes(ctypes.string_at(result, 8), "little") return result_length + def _handle_bytes_result(result) -> bytes: if result is None: raise go_error() @@ -349,21 +385,22 @@ def _handle_bytes_result(result) -> bytes: result_bytes = result_bytes[8:] return result_bytes + ############################################### PIR ############################################### + class PIRContext: - ''' + """ Represents a PIR context for client side PIR operations Raises: go_error: upon getting invalid parameters - ''' - + """ ctx_id: bytes - def __init__(self,pir_b64:str,index_b64: str): - ''' + def __init__(self, pir_b64: str, index_b64: str): + """ __init__ initializes a PIR context Args: @@ -372,28 +409,27 @@ def __init__(self,pir_b64:str,index_b64: str): Raises: go_error: upon invalid parameters - ''' + """ func = so.NewPIRContext func.restype = ctypes.c_char_p - self.ctx_id = func(pir_b64.encode(),index_b64.encode()) + self.ctx_id = func(pir_b64.encode(), index_b64.encode()) if self.ctx_id is None: raise go_error() - def get_eva_key(self) -> bytes: - ''' + """ get_eva_key returns the bytes of the evaluation key set Returns: bytes: the bytes of the evaluation key set - ''' + """ get_func = so.GetPIREvaluationKeyBytes get_func.restype = ctypes.c_void_p result = get_func(self.ctx_id) return _handle_bytes_result(result) def encrypt_query(self, query: str) -> bytes: - ''' + """ encrypt_query encrypts the given query Args: @@ -401,14 +437,14 @@ def encrypt_query(self, query: str) -> bytes: Returns: bytes: the encrypted query as bytes ready to be uploaded - ''' + """ encrypt_pir = so.EncryptPIRQuery encrypt_pir.restype = ctypes.c_void_p result = encrypt_pir(self.ctx_id, query.encode()) return _handle_bytes_result(result) def decrypt_response(self, pir_result: bytes) -> bytes: - ''' + """ decrypt_response decrypts the encrypted bytes as a plaintext csv string Args: @@ -416,7 +452,7 @@ def decrypt_response(self, pir_result: bytes) -> bytes: Returns: bytes: the decrypted csv as a byte string - ''' + """ decrypt_pir = so.DecryptPIRResult decrypt_pir.restype = ctypes.c_void_p result = decrypt_pir(self.ctx_id, pir_result, len(pir_result)) diff --git a/src/tuneinsight/utils/benchmarks.py b/src/tuneinsight/utils/benchmarks.py index 1561dd6..7f84e04 100644 --- a/src/tuneinsight/utils/benchmarks.py +++ b/src/tuneinsight/utils/benchmarks.py @@ -1,4 +1,4 @@ -from typing import List,Any,Dict,Tuple +from typing import List, Any, Dict, Tuple from dateutil.parser import parse import numpy as np @@ -9,7 +9,6 @@ from tuneinsight.utils.plots import style_title, style_suptitle - bit = 1 byte = 8 * bit kilobit = 1024 * bit @@ -21,27 +20,27 @@ net_labels = { bit: "b", - byte: 'B', - kilobit: 'Kb', - kilobyte: 'KB', - megabit: 'Mb', - megabyte: 'MB', - gigabit: 'Gb', - gigabyte: 'GB', + byte: "B", + kilobit: "Kb", + kilobyte: "KB", + megabit: "Mb", + megabyte: "MB", + gigabit: "Gb", + gigabyte: "GB", } time_labels = { - time_tools.microsecond: 'μs', - time_tools.millisecond: 'ms', - time_tools.second: 's', - time_tools.minute: 'min', - time_tools.hour: 'hour', - time_tools.day: 'day', + time_tools.microsecond: "μs", + time_tools.millisecond: "ms", + time_tools.second: "s", + time_tools.minute: "min", + time_tools.hour: "hour", + time_tools.day: "day", } def get_total_time(comp: models.Computation) -> int: - ''' + """ get_total_time returns the total running time of the computation in microseconds Args: @@ -49,15 +48,15 @@ def get_total_time(comp: models.Computation) -> int: Returns: int: the total running times in microseconds - ''' + """ start = parse(comp.started_at) end = parse(comp.ended_at) diff = end - start return int(round((diff).total_seconds() * time_tools.second)) -def get_total_communication(comp: models.Computation) ->int: - ''' +def get_total_communication(comp: models.Computation) -> int: + """ get_total_communication returns the total egress + ingress communication of the computation (recorded from the point of view of the node that returned the provided schema) Args: @@ -65,12 +64,21 @@ def get_total_communication(comp: models.Computation) ->int: Returns: int: the total recorded communication in bits - ''' + """ return comp.egress * byte + comp.ingress * byte -def plot_benchmarks(x_values: List[Any],times: Dict[str,np.ndarray],nets: Dict[str,np.ndarray],time_unit: int = time_tools.millisecond,net_unit: int = kilobyte,title:str = "",markers="",x_label=""): - ''' +def plot_benchmarks( + x_values: List[Any], + times: Dict[str, np.ndarray], + nets: Dict[str, np.ndarray], + time_unit: int = time_tools.millisecond, + net_unit: int = kilobyte, + title: str = "", + markers="", + x_label="", +): + """ plot_benchmarks plots the benchmarks results Args: @@ -81,14 +89,14 @@ def plot_benchmarks(x_values: List[Any],times: Dict[str,np.ndarray],nets: Dict[s net_unit (int, optional): the communication size unit used. Defaults to kilobyte. title (str, optional): optional title to provide to the plot. Defaults to "". markers (str, optional): optional marker values for plot points. Defaults to "". - ''' + """ fig, ax = plt.subplots(1, 2) - style_suptitle(fig,title=title,fontsize=18) + style_suptitle(fig, title=title, fontsize=18) fig.tight_layout() for label in times.keys(): - ax[0].plot(x_values,times[label],label=label,marker=markers) - ax[1].plot(x_values,nets[label],label=label,marker=markers) - ax[1].set_ylabel(f'total communication ({net_labels[net_unit]})') + ax[0].plot(x_values, times[label], label=label, marker=markers) + ax[1].plot(x_values, nets[label], label=label, marker=markers) + ax[1].set_ylabel(f"total communication ({net_labels[net_unit]})") ax[1].set_xlabel(x_label) style_title(ax[0], title=f"Time ({time_labels[time_unit]})") style_title(ax[1], title=f"Communication Cost ({net_labels[net_unit]})") @@ -96,8 +104,8 @@ def plot_benchmarks(x_values: List[Any],times: Dict[str,np.ndarray],nets: Dict[s plt.show() -def average_benchmarks(vals: List[Dict[str,np.ndarray]]) ->Dict[str,np.ndarray]: - ''' +def average_benchmarks(vals: List[Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]: + """ average_benchmarks averages a list of benchmark values Args: @@ -105,10 +113,10 @@ def average_benchmarks(vals: List[Dict[str,np.ndarray]]) ->Dict[str,np.ndarray]: Returns: Dict[str,np.ndarray]: the dictionary that averages the list given as argument - ''' + """ result = {} for benchmarks in vals: - for key,values in benchmarks.items(): + for key, values in benchmarks.items(): if key not in result: result[key] = values else: @@ -119,9 +127,13 @@ def average_benchmarks(vals: List[Dict[str,np.ndarray]]) ->Dict[str,np.ndarray]: return final_res - -def compute_benchmarks(x_values: List[Any],recordings: List[List[models.Computation]],time_unit: int = time_tools.millisecond,net_unit: int = kilobyte) -> Tuple[Dict[str,np.ndarray],Dict[str,np.ndarray]]: - ''' +def compute_benchmarks( + x_values: List[Any], + recordings: List[List[models.Computation]], + time_unit: int = time_tools.millisecond, + net_unit: int = kilobyte, +) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]: + """ compute_benchmarks computes the time/communication benchmarking results given a set of computation recordings Args: @@ -132,7 +144,7 @@ def compute_benchmarks(x_values: List[Any],recordings: List[List[models.Computat Returns: Tuple[Dict[str,np.ndarray],Dict[str,np.ndarray]]: a tuple (T,N) where T is the timings for each computation types and N is the communications for each computation types - ''' + """ all_comp_types = set() for recording in recordings: for computation in recording: @@ -144,7 +156,7 @@ def compute_benchmarks(x_values: List[Any],recordings: List[List[models.Computat nets[comp] = np.zeros(len(x_values)) total_time = np.zeros(len(x_values)) total_net = np.zeros(len(x_values)) - for i,recording in enumerate(recordings): + for i, recording in enumerate(recordings): for comp in recording: time = get_total_time(comp) / time_unit net = get_total_communication(comp) / net_unit @@ -152,6 +164,6 @@ def compute_benchmarks(x_values: List[Any],recordings: List[List[models.Computat total_time[i] += time nets[str(comp.definition.type)][i] = net total_net[i] += net - times['total'] = total_time - nets['total'] = total_net - return times,nets + times["total"] = total_time + nets["total"] = total_net + return times, nets diff --git a/src/tuneinsight/utils/client.py b/src/tuneinsight/utils/client.py index 8710ca6..cf68f3b 100644 --- a/src/tuneinsight/utils/client.py +++ b/src/tuneinsight/utils/client.py @@ -2,10 +2,9 @@ from tuneinsight.api.sdk.client import Client - @contextmanager -def with_timeout(c: Client,timeout: int): - ''' +def with_timeout(c: Client, timeout: int): + """ with_timeout sets a custom timeout to the client temporarily to be used in a with statement Args: @@ -14,7 +13,7 @@ def with_timeout(c: Client,timeout: int): Yields: Client: the client with updated timeout - ''' + """ old_timeout = c.timeout c.timeout = timeout yield c diff --git a/src/tuneinsight/utils/code.py b/src/tuneinsight/utils/code.py new file mode 100644 index 0000000..d4f1b7f --- /dev/null +++ b/src/tuneinsight/utils/code.py @@ -0,0 +1,26 @@ +"""Utilities to read and package Python code to be sent.""" + +import inspect +import textwrap + + +def get_code(function): + """Returns the cleaned code for a Python function. + + The cleaning involves (1) dedenting the code so that the `def` statement is not + indented, and (2) removing all decorators. + + """ + # Fetch and dedent the code of the custom function. + function_code = textwrap.dedent(inspect.getsource(function)) + # inspect.getsource also fetches decorators around the function. We remove these, as (1) this can cause issues, since + # the decorator code is not sent, (2) we have custom decorators implemented in remotedf. + lines = function_code.split("\n") + cutoff = 0 + for i, line in enumerate(lines): + if line.startswith("def "): + cutoff = i + break + # Ensure we are not cutting any non-decorator code. + assert line.startswith("@"), "Non-decorator line before function declaration" + return "\n".join(lines[cutoff:]) diff --git a/src/tuneinsight/utils/custom_generator.py b/src/tuneinsight/utils/custom_generator.py index 5a28f35..5632510 100644 --- a/src/tuneinsight/utils/custom_generator.py +++ b/src/tuneinsight/utils/custom_generator.py @@ -1,4 +1,4 @@ -from typing import List,Any +from typing import List, Any from random import Random from datetime import timedelta import uuid @@ -7,16 +7,15 @@ import numpy as np - -def inv_sigmoid(x,c1,c2): +def inv_sigmoid(x, c1, c2): return 1.0 / (1.0 + math.exp(c1 * (-x - c2))) -def get_sigmoids(c1,c2,num): - res = [] +def get_sigmoids(c1, c2, num): + res = [] for i in range(num): - x = (float(i) / float(num))*2 -1 - res.append(inv_sigmoid(x,c1,c2)) + x = (float(i) / float(num)) * 2 - 1 + res.append(inv_sigmoid(x, c1, c2)) return res @@ -26,178 +25,229 @@ def flatten(l): def cluster_weights(cluster_size: int): half = float(cluster_size) / 2.0 - 0.5 + def get_weight(i): dist = abs(half - i) if dist < 1: return 1 return 1.0 / (dist * 1.1) + return [get_weight(i) for i in range(cluster_size)] -def generate_survival_curve(num_months: int,death_rate: float,shift: float) -> List[float]: + +def generate_survival_curve( + num_months: int, death_rate: float, shift: float +) -> List[float]: c1 = -10 * death_rate c2 = shift - return get_sigmoids(c1,c2,num_months) + return get_sigmoids(c1, c2, num_months) class CustomGenerator(Random): - df: pd.DataFrame snp_weights: List[float] snp_choices: List[float] locuses: List[str] rows: int - def __init__(self,rows: int = 10): + def __init__(self, rows: int = 10): self.df = pd.DataFrame(index=range(rows)) self.rows = rows self.np_rng = np.random.default_rng() - self.snp_weights = [15,5,1] - self.snp_choices = [0,0.5,1] - + self.snp_weights = [15, 5, 1] + self.snp_choices = [0, 0.5, 1] - def seed(self,a=None, version=2): + def seed(self, a=None, version=2): self.np_rng = np.random.default_rng(a) - super().seed(a,version) - - - def generate_dummies(self,name: str): - dummies = pd.get_dummies(self.df[name],prefix=name,prefix_sep="_") - self.df = pd.concat([self.df,dummies],axis=1) - - def generate_genome(self,num_snps: int,locus_path: str,locus_column: str,keep: List[str] = None): - locuses = self._get_sample_locuses(num_snps,locus_path,locus_column,keep) - for i,locus in enumerate(locuses): - self.df[locus] = self.choices(self.snp_choices,weights=self.snp_weights,k=self.rows) + super().seed(a, version) + + def generate_dummies(self, name: str): + dummies = pd.get_dummies(self.df[name], prefix=name, prefix_sep="_") + self.df = pd.concat([self.df, dummies], axis=1) + + def generate_genome( + self, num_snps: int, locus_path: str, locus_column: str, keep: List[str] = None + ): + locuses = self._get_sample_locuses(num_snps, locus_path, locus_column, keep) + for i, locus in enumerate(locuses): + self.df[locus] = self.choices( + self.snp_choices, weights=self.snp_weights, k=self.rows + ) if i % 25 == 0: self.df = self.df.copy() self.locuses = locuses - def normalize_min_max(self,column): - self.df[column] = (self.df[column]-self.df[column].min())/(self.df[column].max()-self.df[column].min()) + def normalize_min_max(self, column): + self.df[column] = (self.df[column] - self.df[column].min()) / ( + self.df[column].max() - self.df[column].min() + ) - def clip(self,column,lower,upper): - self.df[column] = self.df[column].clip(lower=lower,upper=upper) + def clip(self, column, lower, upper): + self.df[column] = self.df[column].clip(lower=lower, upper=upper) - def add_random_event(self,event: str,start,end): + def add_random_event(self, event: str, start, end): delta = end - start + def random_date(): int_delta = (delta.days * 24 * 60 * 60) + delta.seconds random_second = self.randrange(int_delta) return start + timedelta(seconds=random_second) - self.df[event] = self.df.apply(lambda x: random_date(),axis=1) - def add_covariate(self,name:str,choices: List[float],weights: List[float]): - self.df[name] = self.choices(choices,weights=weights,k=self.rows) + self.df[event] = self.df.apply(lambda x: random_date(), axis=1) + + def add_covariate(self, name: str, choices: List[float], weights: List[float]): + self.df[name] = self.choices(choices, weights=weights, k=self.rows) self.normalize_min_max(name) - def encode_category(self,name: str,encoded_name:str,choices: List[Any]): + def encode_category(self, name: str, encoded_name: str, choices: List[Any]): mapping = {} - for i,choice in enumerate(choices): + for i, choice in enumerate(choices): mapping[choice] = i def encode(x): return mapping[x[name]] - self.df[encoded_name] = self.df.apply(encode,axis=1) + self.df[encoded_name] = self.df.apply(encode, axis=1) self.normalize_min_max(encoded_name) - - def add_survival_event(self,event: str,start_event: str,num_months: int =12 ,death_rate: float =0.9,shift: float =0.0,condition=None): + def add_survival_event( + self, + event: str, + start_event: str, + num_months: int = 12, + death_rate: float = 0.9, + shift: float = 0.0, + condition=None, + ): if not event in self.df: self.df[event] = pd.NaT - survivals = generate_survival_curve(num_months=num_months,death_rate=death_rate,shift=shift) + survivals = generate_survival_curve( + num_months=num_months, death_rate=death_rate, shift=shift + ) def survival_func(x): if condition is not None: if not condition(x): return x[event] - return self.survival_events(x[start_event],survivals) - - self.df[event] = self.df.apply(survival_func,axis=1) - - - def add_phenotype(self,name: str,correlated_snp_indexes: List[int],weights: List[float],covariates: List[str] = None,covariates_weights: List[float] = None): - + return self.survival_events(x[start_event], survivals) + + self.df[event] = self.df.apply(survival_func, axis=1) + + def add_phenotype( + self, + name: str, + correlated_snp_indexes: List[int], + weights: List[float], + covariates: List[str] = None, + covariates_weights: List[float] = None, + ): def compute_value(x): val = 0.0 - for i,index in enumerate(correlated_snp_indexes): + for i, index in enumerate(correlated_snp_indexes): val += x[self.locuses[index]] * weights[i] - for i,cov in enumerate(covariates): + for i, cov in enumerate(covariates): val += x[cov] * covariates_weights[i] return val - self.df[name] = self.df.apply(compute_value,axis=1) + self.df[name] = self.df.apply(compute_value, axis=1) self.normalize_min_max(name) + def apply_snp_correlation( + self, num_regions: int, condition, correlation_factor: float = 0.9 + ): + snp_indices, probabilities = self.get_random_snp_clusters( + num_clusters=num_regions, + num_vals=len(self.locuses), + scale=correlation_factor, + ) - def apply_snp_correlation(self,num_regions: int,condition,correlation_factor: float = 0.9): - snp_indices,probabilities = self.get_random_snp_clusters(num_clusters=num_regions,num_vals=len(self.locuses),scale=correlation_factor) - def correlate(x,snp,probability): + def correlate(x, snp, probability): if condition(x) and self.random() < probability: return 1 return x[snp] - for i,snp_index in enumerate(snp_indices): + for i, snp_index in enumerate(snp_indices): snp = self.locuses[snp_index] prob = probabilities[i] - self.df[snp] = self.df.apply(lambda x,y=snp,z=prob: correlate(x,y,z),axis=1) - - - def add_variable(self,variable_name: str,loc: float=0.0,scale:float= 1.0,normalize: bool = True): - self.df[variable_name] = self.np_rng.normal(loc,scale,size=self.df.shape[0]) + self.df[snp] = self.df.apply( + lambda x, y=snp, z=prob: correlate(x, y, z), axis=1 + ) + + def add_variable( + self, + variable_name: str, + loc: float = 0.0, + scale: float = 1.0, + normalize: bool = True, + ): + self.df[variable_name] = self.np_rng.normal(loc, scale, size=self.df.shape[0]) if normalize: self.normalize_min_max(variable_name) + def add_relation( + self, + new_variable: str, + related_variable: str, + loc: float = 0.0, + scale: float = 1.0, + ): + vals = self.np_rng.normal(loc, scale, size=self.df.shape[0]) - def add_relation(self,new_variable: str,related_variable:str,loc: float=0.0,scale:float= 1.0): - vals = self.np_rng.normal(loc,scale,size=self.df.shape[0]) def apply_func(x): return x[related_variable] + vals[x.name] - self.df[new_variable] = self.df.apply(apply_func,axis=1) - def apply_variable(self,variable_name: str,condition,loc: float=0.0,scale:float= 1.0): - vals = self.np_rng.normal(loc,scale,size=self.df.shape[0]) + self.df[new_variable] = self.df.apply(apply_func, axis=1) + + def apply_variable( + self, variable_name: str, condition, loc: float = 0.0, scale: float = 1.0 + ): + vals = self.np_rng.normal(loc, scale, size=self.df.shape[0]) + def apply_func(x): if condition(x): return vals[x.name] return x[variable_name] - self.df[variable_name] = self.df.apply(apply_func,axis=1) + self.df[variable_name] = self.df.apply(apply_func, axis=1) - def apply_correlation(self,column: str,condition,applied_value: float=1,correlation_factor: float= 0.9): + def apply_correlation( + self, + column: str, + condition, + applied_value: float = 1, + correlation_factor: float = 0.9, + ): def correlate(x): if condition(x) and self.random() < correlation_factor: return applied_value return x[column] - self.df[column] = self.df.apply(correlate,axis=1) + self.df[column] = self.df.apply(correlate, axis=1) - def get_random_snp_clusters(self,num_clusters: int,num_vals: int,scale: float): + def get_random_snp_clusters(self, num_clusters: int, num_vals: int, scale: float): allIndices = [] allWeights = [] - cluster_size = int(math.ceil(float(num_vals) / 500 )) + cluster_size = int(math.ceil(float(num_vals) / 500)) for _ in range(num_clusters): - random_index = self.randint(0,num_vals -1 -cluster_size) - indices = list(range(random_index,random_index + cluster_size)) + random_index = self.randint(0, num_vals - 1 - cluster_size) + indices = list(range(random_index, random_index + cluster_size)) weights = cluster_weights(cluster_size=cluster_size) allIndices.append(indices) allWeights.append(weights) allIndices = flatten(allIndices) allWeights = flatten(allWeights) - for i,_ in enumerate(allWeights): + for i, _ in enumerate(allWeights): allWeights[i] *= scale allWeights[i] -= self.random() * 0.1 * allWeights[i] - return allIndices,allWeights - + return allIndices, allWeights - def apply_choice(self,name: str,choices: List[Any],weights: List[float] = None): - self.df[name] = self.choices(choices,weights=weights,k=self.rows) + def apply_choice(self, name: str, choices: List[Any], weights: List[float] = None): + self.df[name] = self.choices(choices, weights=weights, k=self.rows) - - def survival_events(self,start_time,target_survival): + def survival_events(self, start_time, target_survival): endEventTime = start_time for prob in target_survival: - endEventTime += np.timedelta64(1,'M') + endEventTime += np.timedelta64(1, "M") if endEventTime > pd.Timestamp.now(): return pd.NaT t = self.random() @@ -205,19 +255,26 @@ def survival_events(self,start_time,target_survival): return endEventTime return pd.NaT - - def gen_ids(self,id_column: str): + def gen_ids(self, id_column: str): self.df[id_column] = "" - self.df[id_column] = self.df.apply(lambda x: self._random_id(),axis=1) - + self.df[id_column] = self.df.apply(lambda x: self._random_id(), axis=1) def _random_id(self): return str(uuid.UUID(int=self.getrandbits(128))) @staticmethod - def _get_sample_locuses(num: int,locus_values_csv_path: str,locus_csv_column: str,keep: List[str] = None) -> List[str]: + def _get_sample_locuses( + num: int, + locus_values_csv_path: str, + locus_csv_column: str, + keep: List[str] = None, + ) -> List[str]: tmp = pd.read_csv(locus_values_csv_path) - others = tmp[(~tmp[locus_csv_column].isin(keep))].sample(n=num -len(keep)).sort_index() + others = ( + tmp[(~tmp[locus_csv_column].isin(keep))] + .sample(n=num - len(keep)) + .sort_index() + ) kept = tmp[tmp[locus_csv_column].isin(keep)].sort_index() - res = pd.concat([others,kept]).sort_index() + res = pd.concat([others, kept]).sort_index() return res[locus_csv_column].to_list() diff --git a/src/tuneinsight/utils/errors.py b/src/tuneinsight/utils/errors.py index 1515ac3..9739736 100644 --- a/src/tuneinsight/utils/errors.py +++ b/src/tuneinsight/utils/errors.py @@ -9,38 +9,38 @@ try: ipython = get_ipython() - ipython_traceback = ipython._showtraceback # pylint: disable=W0212 + ipython_traceback = ipython._showtraceback # pylint: disable=W0212 ipython_set = True except AttributeError: warnings.warn("unable to get ipython, traceback suppression is disabled") + @contextmanager def except_handler(exc_handler): "Sets a custom exception handler for the scope of a 'with' block." sys.excepthook = exc_handler if ipython_set: - ipython._showtraceback = exc_handler # pylint: disable=W0212 + ipython._showtraceback = exc_handler # pylint: disable=W0212 yield -def hide_traceback(err_type, value, traceback): # pylint: disable=W0613 - ''' +def hide_traceback(err_type, value, traceback): # pylint: disable=W0613 + """ hide_traceback is a custom exception handler function which does not display the traceback Args: err_type (_type_): the type of the exceptionc value (_type_): the value of the exception traceback (_type_): the traceback which is not used - ''' - print(': '.join([str(err_type.__name__), str(value)])) - sys.excepthook = sys.__excepthook__ # pylint: disable=W0212 + """ + print(": ".join([str(err_type.__name__), str(value)])) + sys.excepthook = sys.__excepthook__ # pylint: disable=W0212 if ipython_set: - ipython._showtraceback = ipython_traceback # pylint: disable=W0212 - + ipython._showtraceback = ipython_traceback # pylint: disable=W0212 def hidden_traceback_scope(): - ''' + """ hided_traceback_scope returns a hided traceback scope that is such that any error raised in this scope will have its traceback hideen - ''' + """ return except_handler(hide_traceback) diff --git a/src/tuneinsight/utils/generator.py b/src/tuneinsight/utils/generator.py index 1329049..2532fdb 100644 --- a/src/tuneinsight/utils/generator.py +++ b/src/tuneinsight/utils/generator.py @@ -3,102 +3,110 @@ import pandas as pd -class MixedGenerator(Random): +class MixedGenerator(Random): cols: List[str] - generators: List[Callable[[],Any]] - + generators: List[Callable[[], Any]] def __init__(self): self.cols = [] self.generators = [] - def addRangeColumn(self,column: str, r: List[float]): + def addRangeColumn(self, column: str, r: List[float]): self.cols.append(column) + def generator(): - min_val,max_val = r[0],r[1] + min_val, max_val = r[0], r[1] return self.random() * (max_val - min_val) + min_val + self.generators.append(generator) - def addCategoricalColumn(self,column: str,categories: List[str]): + def addCategoricalColumn(self, column: str, categories: List[str]): self.cols.append(column) + def generator(): - return categories[self.randint(0,len(categories) -1)] + return categories[self.randint(0, len(categories) - 1)] + self.generators.append(generator) - def generate(self,num_rows: int) -> pd.DataFrame: + def generate(self, num_rows: int) -> pd.DataFrame: data = [] for _ in range(num_rows): tmp = [] for g in self.generators: tmp.append(g()) data.append(tmp) - return pd.DataFrame(data=data,columns=self.cols) - - + return pd.DataFrame(data=data, columns=self.cols) class PatientGenerator(MixedGenerator): - - age_ranges: List[List[int]] = [[3,15],[16,45],[45,70],[70,105]] - age_weights: List[float] = [1,2,3,2] - gender_choices: List[str] = ["female","male","agender","bigender"] - gender_weights: List[float] = [0.45,0.45,0.05,0.05] - district_choices: List[str] = ["Geneva","Vaud","Bern","Fribourg","Neuchatel"] - district_weights: List[float] = [1,1,1,1,1] + age_ranges: List[List[int]] = [[3, 15], [16, 45], [45, 70], [70, 105]] + age_weights: List[float] = [1, 2, 3, 2] + gender_choices: List[str] = ["female", "male", "agender", "bigender"] + gender_weights: List[float] = [0.45, 0.45, 0.05, 0.05] + district_choices: List[str] = ["Geneva", "Vaud", "Bern", "Fribourg", "Neuchatel"] + district_weights: List[float] = [1, 1, 1, 1, 1] weight_factor: float = 1 height_factor: float = 1 - age_height_ranges = [5,10,20,60,80,200] - age_height_averages = [70,120,150,180,170,160] - origin_choices: List[str] = ["Swiss","Europe","North America","South America","Africa","Asia","Oceania"] - origin_weights: List[float] = [20,10,2,3,7,5,1] - + age_height_ranges = [5, 10, 20, 60, 80, 200] + age_height_averages = [70, 120, 150, 180, 170, 160] + origin_choices: List[str] = [ + "Swiss", + "Europe", + "North America", + "South America", + "Africa", + "Asia", + "Oceania", + ] + origin_weights: List[float] = [20, 10, 2, 3, 7, 5, 1] def __init__(self): - self.age_ranges = [[3,15],[16,45],[45,70],[70,105]] - self.age_weights = [1,2,3,2] - self.gender_choices = ["female","male","agender","bigender"] - self.gender_weights = [0.48,0.48,0.02,0.02] - self.district_weights = [0.1,0.3,0.3,0.3] - - def get_age_average(self,age: int) -> float: - for i,k in enumerate(self.age_height_ranges): + self.age_ranges = [[3, 15], [16, 45], [45, 70], [70, 105]] + self.age_weights = [1, 2, 3, 2] + self.gender_choices = ["female", "male", "agender", "bigender"] + self.gender_weights = [0.48, 0.48, 0.02, 0.02] + self.district_weights = [0.1, 0.3, 0.3, 0.3] + + def get_age_average(self, age: int) -> float: + for i, k in enumerate(self.age_height_ranges): if age < k: return self.age_height_averages[i] - return self.age_height_averages[len(self.age_height_averages) -1] - + return self.age_height_averages[len(self.age_height_averages) - 1] def random_district(self) -> str: - return self.choices(self.district_choices,weights=self.district_weights,k=1)[0] + return self.choices(self.district_choices, weights=self.district_weights, k=1)[ + 0 + ] def random_gender(self) -> str: - return self.choices(self.gender_choices,weights=self.gender_weights,k=1)[0] + return self.choices(self.gender_choices, weights=self.gender_weights, k=1)[0] + def age_to_height(self, age: int) -> float: + return ( + (self.random() * 0.2 + 0.8 + self.random() * 0.1) + * self.get_age_average(age) + * self.height_factor + ) - def age_to_height(self,age: int) -> float: - return (self.random() * 0.2 + 0.8 + self.random() * 0.1) * self.get_age_average(age) * self.height_factor - - - def height_to_weight(self,height: float) -> float: + def height_to_weight(self, height: float) -> float: return (self.random() * 0.4 + 0.8) * height * 0.35 - def random_age(self) -> int: - chosen_range = self.choices(self.age_ranges,weights=self.age_weights,k=1)[0] + chosen_range = self.choices(self.age_ranges, weights=self.age_weights, k=1)[0] return self.random_age_from_range(chosen_range) - - def random_origin(self,district: str) -> str: + def random_origin(self, district: str) -> str: weights = self.origin_weights.copy() if district == "Fribourg": weights[0] += 10 if district == "Vaud": weights[0] += 5 - return self.choices(self.origin_choices,weights=weights,k=1)[0] + return self.choices(self.origin_choices, weights=weights, k=1)[0] - def random_age_from_range(self,range_values: List[int]) -> int: - min_val,max_val = range_values[0],range_values[1] - return self.randint(min_val,max_val) + def random_age_from_range(self, range_values: List[int]) -> int: + min_val, max_val = range_values[0], range_values[1] + return self.randint(min_val, max_val) def random_patient(self) -> List[Any]: age = self.random_age() @@ -115,17 +123,17 @@ def random_patient(self) -> List[Any]: addFactor = 1.05 height = self.age_to_height(age) * addFactor weight = self.height_to_weight(height) - return [district,origin,gender,age,height,weight] + return [district, origin, gender, age, height, weight] @staticmethod def columns() -> List[str]: - return ["district","origin","gender","age","height","weight"] + return ["district", "origin", "gender", "age", "height", "weight"] - def new_dataframe(self,num_rows: int) ->pd.DataFrame: + def new_dataframe(self, num_rows: int) -> pd.DataFrame: data = self.generate(num_rows=num_rows) - return pd.DataFrame(data=data,columns=self.columns()) + return pd.DataFrame(data=data, columns=self.columns()) - def generate(self,num_rows: int): + def generate(self, num_rows: int): data = [] for _ in range(num_rows): data.append(self.random_patient()) diff --git a/src/tuneinsight/utils/io.py b/src/tuneinsight/utils/io.py index 54881e8..d99f235 100644 --- a/src/tuneinsight/utils/io.py +++ b/src/tuneinsight/utils/io.py @@ -2,11 +2,8 @@ import pandas as pd - - - -def data_to_bytes(data,remove_header:bool = False) -> bytes: - ''' +def data_to_bytes(data, remove_header: bool = False) -> bytes: + """ data_to_bytes converts a dataset to csv bytes Args: @@ -15,16 +12,16 @@ def data_to_bytes(data,remove_header:bool = False) -> bytes: Returns: bytes: the csv bytes - ''' + """ df = pd.DataFrame(data) buf = io.BytesIO() - df.to_csv(buf,index=False,header=not remove_header) + df.to_csv(buf, index=False, header=not remove_header) return buf.getvalue() -def data_from_bytes(buf: bytes,no_header: bool = False) -> pd.DataFrame: - ''' +def data_from_bytes(buf: bytes, no_header: bool = False) -> pd.DataFrame: + """ data_from_bytes convert the provided csv bytes to a dataframe Args: @@ -33,7 +30,7 @@ def data_from_bytes(buf: bytes,no_header: bool = False) -> pd.DataFrame: Returns: pd.DataFrame: the resulting dataframe - ''' + """ if no_header: - return pd.read_csv(io.BytesIO(buf),header=None) + return pd.read_csv(io.BytesIO(buf), header=None) return pd.read_csv(io.BytesIO(buf)) diff --git a/src/tuneinsight/utils/model_performance_eval.py b/src/tuneinsight/utils/model_performance_eval.py index cbed3b3..87cfd93 100644 --- a/src/tuneinsight/utils/model_performance_eval.py +++ b/src/tuneinsight/utils/model_performance_eval.py @@ -2,13 +2,16 @@ import math import numpy as np -def mse(y_true: List[float]=None, y_pred: List[float]=None) -> float: - return np.square(np.subtract(y_true,y_pred)).mean() -def rmse(y_true: List[float]=None, y_pred: List[float]=None) -> float: - return math.sqrt(mse(y_true,y_pred)) +def mse(y_true: List[float] = None, y_pred: List[float] = None) -> float: + return np.square(np.subtract(y_true, y_pred)).mean() -def r2_score(y_true: List[float]=None, y_pred: List[float]=None) -> float: + +def rmse(y_true: List[float] = None, y_pred: List[float] = None) -> float: + return math.sqrt(mse(y_true, y_pred)) + + +def r2_score(y_true: List[float] = None, y_pred: List[float] = None) -> float: if len(y_pred) != len(y_true): raise Exception("Input sizes should be equal") size = len(y_pred) @@ -22,5 +25,5 @@ def r2_score(y_true: List[float]=None, y_pred: List[float]=None) -> float: ss_t += (y_true[i] - mean_y) ** 2 ss_r += (y_true[i] - y_pred[i]) ** 2 - r2 = 1 - (ss_r/ss_t) + r2 = 1 - (ss_r / ss_t) return r2 diff --git a/src/tuneinsight/utils/plaintext_gwas.py b/src/tuneinsight/utils/plaintext_gwas.py index a0d787c..2ef1d66 100644 --- a/src/tuneinsight/utils/plaintext_gwas.py +++ b/src/tuneinsight/utils/plaintext_gwas.py @@ -1,38 +1,40 @@ -from math import erf,sqrt +from math import erf, sqrt import numpy as np + def phi(x): #'Cumulative distribution function for the standard normal distribution' return (1.0 + erf(x / sqrt(2.0))) / 2.0 + def GWASLinReg(V, X, Y): """ - p: the number of patients - v: the number of variants - f: the number of covariates + p: the number of patients + v: the number of variants + f: the number of covariates - input: + input: - V: the (v * p) matrix of variants - X: the (p * (f+1)) matrix of covariates (with intercept) - Y: the (1 * p) vector of phenotypes/labels/conditions + V: the (v * p) matrix of variants + X: the (p * (f+1)) matrix of covariates (with intercept) + Y: the (1 * p) vector of phenotypes/labels/conditions - output: + output: - P: the (v * 1) vector of p-values + P: the (v * 1) vector of p-values """ v = V.shape[0] p = V.shape[1] - f = X.shape[1]-1 + f = X.shape[1] - 1 - assert f > -1 # X must include intercept + assert f > -1 # X must include intercept assert X.shape[0] == p assert Y.shape[0] == p P = [0 for i in range(v)] - #Iterates over all the variants + # Iterates over all the variants for i in range(v): # S = 1 || X || V S = np.c_[X, V[i].T] @@ -41,20 +43,19 @@ def GWASLinReg(V, X, Y): STSInv = np.linalg.inv(S.T @ S) # err - err = pow(STSInv[f+1][f+1], 0.5) + err = pow(STSInv[f + 1][f + 1], 0.5) # w = Y x S x (S^T x S)^-1 w = (Y @ S) @ STSInv - #yhat = Y x S x (S^T x S)^-1 x S^T + # yhat = Y x S x (S^T x S)^-1 x S^T Yhat = w @ S.T # mse = sqrt(sum((y[i] - yhat[i])^2) / p) mse = Y - Yhat - mse = pow(np.inner(mse, mse)/p, 0.5) + mse = pow(np.inner(mse, mse) / p, 0.5) # p-value = 2 * cdf(-|beta/(mse*err)|( - P[i] = 2 * phi(-abs(w[f+1] / (mse * err))) - + P[i] = 2 * phi(-abs(w[f + 1] / (mse * err))) return np.array(P) diff --git a/src/tuneinsight/utils/plots.py b/src/tuneinsight/utils/plots.py index 818972b..31d233f 100644 --- a/src/tuneinsight/utils/plots.py +++ b/src/tuneinsight/utils/plots.py @@ -7,8 +7,16 @@ this_path = str(Path(__file__).parent) -def style_plot(axis:plt.Axes, fig:plt.figure, title:str, x_label:str, y_label:str, size:tuple = (8,4), local=False): - """ Style a plot with Tune Insight branding +def style_plot( + axis: plt.Axes, + fig: plt.figure, + title: str, + x_label: str, + y_label: str, + size: tuple = (8, 4), + local=False, +): + """Style a plot with Tune Insight branding Args: axis (plt.Axes): axis on which to apply styling @@ -30,24 +38,29 @@ def style_plot(axis:plt.Axes, fig:plt.figure, title:str, x_label:str, y_label:st plt.xticks(font=font_regular, fontsize=8) plt.yticks(font=font_regular, fontsize=8) - fig.set_size_inches(size[0],size[1]) + fig.set_size_inches(size[0], size[1]) fig.tight_layout() if not local: - plt.text(0.5, -0.15, "The computation of these results was made possible by Tune Insight's Federated Confidential Computing.", horizontalalignment='center', - verticalalignment='top', - transform=axis.transAxes, - font=font_light, - fontsize=8) - + plt.text( + 0.5, + -0.15, + "The computation of these results was made possible by Tune Insight's Federated Confidential Computing.", + horizontalalignment="center", + verticalalignment="top", + transform=axis.transAxes, + font=font_light, + fontsize=8, + ) logo = Image.open(this_path + "/graphical/TuneInsight_logo.png") - rsize = logo.resize((np.array(logo.size)/4).astype(int)) + rsize = logo.resize((np.array(logo.size) / 4).astype(int)) axis.figure.figimage(rsize, 10, 0, alpha=0.9, zorder=1) -def style_title(axis: plt.Axes, title:str = "", fontsize:int = 15): - """ Style plot title with Tune Insight branding font. + +def style_title(axis: plt.Axes, title: str = "", fontsize: int = 15): + """Style plot title with Tune Insight branding font. Args: axis (plt.Axes): axis on which to apply styling @@ -58,13 +71,13 @@ def style_title(axis: plt.Axes, title:str = "", fontsize:int = 15): axis.set_title(title, font=font_regular, fontsize=fontsize) -def style_suptitle(fig,title= "",fontsize=15): +def style_suptitle(fig, title="", fontsize=15): font_regular = Path(this_path + "/graphical/MontserratRegular.ttf") - fig.suptitle(title,size=fontsize,font=font_regular) + fig.suptitle(title, size=fontsize, font=font_regular) def style_ylabel(axis: plt.Axes, y_label: str, fontsize: int = 10): - """ Style plot y label with Tune Insight branding font + """Style plot y label with Tune Insight branding font Args: axis (plt.Axes): axis on which to apply styling @@ -74,8 +87,9 @@ def style_ylabel(axis: plt.Axes, y_label: str, fontsize: int = 10): font_med = Path(this_path + "/graphical/MontserratMedium.ttf") axis.set_ylabel(y_label, font=font_med, fontsize=fontsize) -def add_ti_branding(axis: plt.Axes, x=1.5, ha='center', local=False): - """ Add Tune Insight logo and credits + +def add_ti_branding(axis: plt.Axes, x=1.5, ha="center", local=False): + """Add Tune Insight logo and credits Args: axis (plt.Axes): axis on which to add branding @@ -86,20 +100,25 @@ def add_ti_branding(axis: plt.Axes, x=1.5, ha='center', local=False): if not local: font_light = Path(this_path + "/graphical/MontserratLight.ttf") ymin, ymax = axis.get_ylim() - adjust = 0.1 * (ymax-ymin) - plt.text(x, ymin - adjust, "The computation of these results was made possible by Tune Insight's Federated Confidential Computing.", + adjust = 0.1 * (ymax - ymin) + plt.text( + x, + ymin - adjust, + "The computation of these results was made possible by Tune Insight's Federated Confidential Computing.", horizontalalignment=ha, - verticalalignment='top', + verticalalignment="top", font=font_light, - fontsize=8) + fontsize=8, + ) logo = Image.open(this_path + "/graphical/TuneInsight_logo.png") - rsize = logo.resize((np.array(logo.size)/4).astype(int)) + rsize = logo.resize((np.array(logo.size) / 4).astype(int)) axis.figure.figimage(rsize, 10, 0, alpha=0.9, zorder=1) -def hist(x, title, bins=30, hist_range=(0,30), local=True): - """ Create histogram with Tune Insight branding. + +def hist(x, title, bins=30, hist_range=(0, 30), local=True): + """Create histogram with Tune Insight branding. Args: x (_type_): values to plot @@ -114,13 +133,12 @@ def hist(x, title, bins=30, hist_range=(0,30), local=True): # plot fig, ax = plt.subplots() + ax.hist(x, bins=bins, range=hist_range, color="#D05F5C", edgecolor="black") - ax.hist(x,bins=bins,range=hist_range, color="#D05F5C", edgecolor='black') - - - style_plot(ax, fig, title, "","", size=(6,4), local=local) + style_plot(ax, fig, title, "", "", size=(6, 4), local=local) plt.show() + def get_path(): print(Path(__file__).parent) diff --git a/src/tuneinsight/utils/privacy.py b/src/tuneinsight/utils/privacy.py new file mode 100644 index 0000000..312572d --- /dev/null +++ b/src/tuneinsight/utils/privacy.py @@ -0,0 +1,118 @@ +"""Utilities for Differential Privacy.""" + +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt + +from tuneinsight.utils.plots import style_plot + + +class RatioEstimator: + """Compute confidence intevals for the ratio of two values computed with differential privacy. + + This class uses simulated samples to estimate various properties of the observed values. + + """ + + def __init__( + self, + numerator: float, + denominator: float, + noise_scale: float, + noise_scale_denominator: float = None, + num_samples: int = int(1e6), + ): + """ + Create simulated samples of the ratio under DP. + + Args + numerator: the numerator of the ratio, observed with Laplace noise. + denominator: the denominator of the ratio, observer with Laplace noise. + noise_scale: the scale of the Laplace noise added to the numerator (and the denominator, if not specified). + noise_scale_denominator: the scale of the Laplace noise added to the denominator (if None, noise_scale is used). + num_samples (int, default 1e6): number of samples to use in the Monte-Carlo estimation. + """ + + laplace_noises = np.random.laplace(loc=0, scale=1, size=(2, num_samples)) + + numerators = numerator + noise_scale * laplace_noises[0, :] + if noise_scale_denominator is None: + noise_scale_denominator = noise_scale + denominators = denominator + noise_scale * laplace_noises[1, :] + + self.observed = numerator / denominator + self.samples = numerators / denominators + + def confidence_intervals(self, p: list[float] = (95, 99)): + """ + Estimate confidence intervals for the ratio. + + Args + p: the probabilities of the confidence interval (in percentages, in [0, 100]). + """ + results = [] + for perc in p: + d = ( + 100 - perc + ) / 2 # The fraction to "remove" on either side of the interval. + v_low, v_hi = np.percentile(self.samples, [d, (100 - d)]) + results.append([perc, v_low, v_hi]) + + return pd.DataFrame(results, columns=["Percentage", "CI (min)", "CI (max)"]) + + def draw_distribution(self, ci_color="k"): + """Display the shape of this distribution in a matplotlib figure. + + Args + ci_color: if not None, the 95% and 99% confidence intervals are displayed in this color. + + """ + plt.style.use("bmh") + fig, axis = plt.subplots() + + # Extract a normalizes histogram. + hist, bin_edges = np.histogram(self.samples, bins=100) + hist_norm = hist / hist.sum() + bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2 + bin_width = bin_edges[1:] - bin_edges[:-1] + # Several plots for good effects. + max_height = hist_norm.max() + axis.bar( + bin_centers, + hist_norm, + width=bin_width, + alpha=0.8, + color="#D05F5C", + edgecolor="black", + ) + axis.plot( + [self.observed, self.observed], + [0, 1.2 * max_height], + "k--", + lw=1, + alpha=0.8, + ) + # plt.violinplot(self.samples, vert=False, showextrema=False, positions=[0], widths=max_height) + if ci_color is not None: + for p, w in zip([95, 99], [0.1, 0.05]): + ci = self.confidence_intervals([p]).iloc[0] + cmin = ci["CI (min)"] + cmax = ci["CI (max)"] + axis.plot([cmin] * 2, [0, w * max_height], c=ci_color) + axis.plot([cmax] * 2, [0, w * max_height], c=ci_color) + axis.plot( + [cmin, cmax], [0.99 * w * max_height] * 2, c=ci_color, alpha=0.3 + ) + axis.text(cmax, w * max_height * 1.1, f"{p}% ") + # Adjust visibility window. + plt.xlim([bin_edges.min(), bin_edges.max()]) + plt.ylim([0, 1.2 * max_height]) + + style_plot( + axis, + fig, + title="Distribution of true values", + x_label="Possible values", + y_label="Likelihood", + size=(6, 6), + ) diff --git a/src/tuneinsight/utils/remotedf.py b/src/tuneinsight/utils/remotedf.py new file mode 100644 index 0000000..4bc0bcc --- /dev/null +++ b/src/tuneinsight/utils/remotedf.py @@ -0,0 +1,362 @@ +"""Utilities to define preprocessing operations using only Pandas operations. + +This module defines the RemoteDataFrame class, which provides a Pandas-like interface +to the preprocessing operations supported by the API. + +The goal is to enable users to seemlessly swap between local DataFrame objects and +the RemoteDataFrame without changing the code. This enables them to test the code +locally and immediately use it remotely. + +""" + +from typing import Union + +import numpy as np +import pandas as pd + +from tuneinsight.api.sdk import models +from tuneinsight.computations.preprocessing import PreprocessingBuilder + +# Operations currently supported +# - add_columns +# - one_hot_encoding +# - counts +# - dropna +# - rename +# - set_index +# - reset_index +# - transpose +# - astype +# - apply_mapping +# - cut +# - set_columns // select (not sure what the difference is) +# - filter +# - custom (apply, sort of) + +# The following are not in native Pandas. We can add them to this file if we want, the easiest way +# being to copy-paste the code from the preprocessing module. +# - apply_regex +# - extract +# - time_diff +# - *deviation_squares +# - *quantiles + +# The following operations are not even in preprocessing.py! +# - PHONETICENCODING +# - DROP +# - SURVIVAL (create_survival_columns is already a nice interface). + + +# Internal classes that serve as holders for pending operations. + + +class _SelectedColumn: + """The output of a selection operation (df[column_name]).""" + + def __init__(self, remotedf, name: str): + self.remotedf = remotedf + self.name = name + + def __add__(self, other_column): + assert isinstance(other_column, _SelectedColumn) + return _SumOfColumns(self, other_column) + + # Some operations only apply to one column at a time. + # The actual operation is not applied until the variable is set. + def replace(self, to_replace: dict, inplace=False, default=""): + assert inplace is False, "replace cannot be done in place." + return _ApplyMappingOperation(self, to_replace, default) + + # All the following magic operations enable the filter operations (for comparisons). + def __eq__(self, value): + return _FilterOperation(self, models.ComparisonType.EQUAL, value, False) + + def __ne__(self, value): + return _FilterOperation(self, models.ComparisonType.NEQUAL, value, False) + + def __lt__(self, value): + return _FilterOperation(self, models.ComparisonType.LESS, value, True) + + def __le__(self, value): + return _FilterOperation(self, models.ComparisonType.LESSEQ, value, True) + + def __gt__(self, value): + return _FilterOperation(self, models.ComparisonType.GREATER, value, True) + + def __ge__(self, value): + return _FilterOperation(self, models.ComparisonType.GREATEREQ, value, True) + + +class _PendingOperation: + """Operations that have been initiated but still need to be added to the chain. + + _PendingOperations are typically created when operations are performed on a + _SelectedColumn object (e.g., a sum of two variables). These operations still + need a "destination" (the name of the column created for their output) before + they can be committed to the chain. This occurs when the user assigns an item + to the value, e.g., remotedf["new_column"] = p: _PendingOperation. + + """ + + def commit(self, output_name: str): + raise NotImplementedError() + + +class _SumOfColumns(_PendingOperation): + """The output of the sum of one or more selected columns.""" + + def __init__(self, operand1, operand2, sep="", numerical=True): + self.columns = [operand1, operand2] + assert operand1.remotedf == operand2.remotedf, "Must have the same dataset." + # These variables are typically unaffected in the usual syntax, but can be + # modified manually if the user wants to. + self.numerical = numerical + self.sep = sep + + def commit(self, output_name: str): + df = self.columns[0].remotedf + df.builder.add_columns( + input_cols=[c.name for c in self.columns], + output=output_name, + sep=self.sep, + numerical=self.numerical, + ) + + +class _ApplyMappingOperation(_PendingOperation): + """A pending apply_mapping operation on a column.""" + + def __init__(self, column: _SelectedColumn, to_replace: dict, default: str = ""): + self.column = column + self.to_replace = to_replace + self.default = default + + def commit(self, output_name: str): + df = self.column.remotedf + df.builder.apply_mapping( + self.column.name, output_name, self.to_replace, self.default + ) + + +class _CutOperation(_PendingOperation): + """A pending cut operation on a column.""" + + def __init__(self, column: _SelectedColumn, bins: list[float], labels: list[str]): + self.column = column + self.bins = bins + self.labels = labels + + def commit(self, output_name: str): + df = self.column.remotedf + df.builder.cut(self.column.name, output_name, self.bins, self.labels) + + +class _FilterOperation: + """A filter operation on a column. + + This one is different from other pending operations in that it will be used + as the "output_column" operand: df[f: _FilterOperation] outputs df with + the filter operation applied. + + """ + + def __init__( + self, + column: _SelectedColumn, + comparator: models.ComparisonType, + value: str, + numerical: bool = False, + ): + self.column = column + self.comparator = comparator + self.value = value + self.numerical = numerical + + def apply(self): + df = self.column.remotedf + df.builder.filter( + self.column.name, self.comparator, str(self.value), self.numerical + ) + + +# Main class: + + +class RemoteDataFrame: + """DataFrame-like interface for remote post-processing operations.""" + + def __init__(self, builder: PreprocessingBuilder): + self.builder = builder + + # Magic methods to mimic Pandas objects. + def __getitem__(self, column): # self[column] + # If the argument of the selection is a _FilterOperation, apply the filter and return self. + if isinstance(column, _FilterOperation): + column.apply() + return self + # Otherwise, this is a selection operation for a single column. + assert isinstance(column, str), "Can only select one column." + return _SelectedColumn(self, column) + + def __setitem__(self, column, value): # self[column] = value + assert isinstance(column, str), "Can only set columns." + # Setting a column = creating a column using a different operation depending on the operand. + # Special case: value 1 (create a column of counts). + if value == 1: + self.builder.counts(output_column_name=column) + # Pending operations are operations that concern one or more columns, and whose output must + # be stored in a newly-created column. + elif isinstance(value, _PendingOperation): + value.commit(column) + else: + raise ValueError(f"Invalid type for value {value} in assignment.") + + # Re-implementing Pandas methods. + def dropna(self, subset: list[str] = None, inplace=True): + assert inplace is True, "dropna must be done inplace." + self.builder.dropna(subset) + return self + + def rename( + self, mapper: dict, axis="columns", copy=True, errors="raise", inplace=True + ): + assert inplace is True, "rename must be done inplace." + # Convert Pandas inputs into inputs compatible with the API. + axis = {"columns": models.RenameAxis.COLUMNS, "index": models.RenameAxis.INDEX}[ + axis + ] + errors = {"raise": True, "ignore": False}[errors] + self.builder.rename(mapper, axis=axis, copy=copy, errors=errors) + return self + + def set_index( + self, + keys: Union[str, list[str]], + drop: bool = True, + append: bool = False, + inplace=True, + ): + assert inplace is True, "set_index must be done inplace." + if isinstance(keys, str): + keys = [keys] + assert isinstance(keys, list) and all( + isinstance(k, str) for k in keys + ), "keys must be a list of column names, or a single column name." + self.builder.set_index(columns=keys, drop=drop, append=append) + return self + + def reset_index(self, drop: bool = False, level: list[str] = None, inplace=True): + assert inplace is True, "reset_index must be done inplace." + self.builder.reset_index(drop=drop, level=level) + return self + + def transpose(self, copy=False): + self.builder.transpose(copy) + return self + + def astype(self, dtype, copy=True, errors="raise"): + assert isinstance(dtype, dict), "Must provide types as a dictionary." + converted = {str: "str", int: "int", float: "float"} + dtype = {k: converted.get(v, v) for k, v in dtype.items()} + errors = {"raise": True, "ignore": False}[errors] + self.builder.astype(dtype, copy, errors) + return self + + +# We also wrap Pandas operations with our interface. The goal is to be able to use these +# variables with either Pandas Dataframes or RemoteDataFrames. + + +def get_dummies( + df: Union[pd.DataFrame, RemoteDataFrame], + target_column: str, + prefix: str, + specified_types: list[str], +): + """Create dummies (one-hot encoding) for a given column and collection of values.""" + if isinstance(df, RemoteDataFrame): + df.builder.one_hot_encoding(target_column, prefix, specified_types) + return df + if isinstance(df, pd.DataFrame): + # Instead of using pd.get_dummies, manually create the columns for each specified value. + prefix_sep = "" + if prefix != "": + prefix_sep = "_" + for value in specified_types: + df[f"{prefix}{prefix_sep}{value}"] = df[target_column] == value + return df + # If neither a Remote or Pandas DataFrame. + raise ValueError(f"Invalid type for get_dummies: {type(df)}.") + + +def cut( + df: Union[pd.DataFrame, pd.Series, _SelectedColumn, RemoteDataFrame], + bins=list[float], + labels: list[str] = None, + column: str = None, +): + """Discretize a continuous column into bins. Similar to pd.cut. + + Args: + df: the [Remote]DataFrame to cut. This should be either a column of the data, or the column argument should be specified. + bins: edges of the bins for the discretization. There will be len(bins)-1 bins. + labels (optional): how to label each bin in the output. + column (optional): the column to cut. + + """ + assert np.iterable(bins), "Only iterable `bins` are supported." + if column is not None: + df = df[column] + if isinstance(df, pd.Series): + return pd.cut(df, bins, labels=labels) + if isinstance(df, _SelectedColumn): + return _CutOperation(df, bins, labels) + if isinstance(df, RemoteDataFrame): + raise ValueError("Cannot cut a whole dataset: select a column.") + raise ValueError(f"Invalid type for cut: {type(df)}.") + + +def select( + df: Union[pd.DataFrame, RemoteDataFrame], + columns: list[str], + create_if_missing: bool = False, + dummy_value: str = "", +): + """Select a set of columns. Equivalent to df = df[columns], with additional functionalities.""" + if isinstance(df, pd.DataFrame): + if create_if_missing: + for col in columns: + if col not in df.columns: + df[col] = dummy_value + return df[columns] + if isinstance(df, RemoteDataFrame): + df.builder.select(columns) + return df + raise ValueError(f"Invalid type for select: {type(df)}") + + +def custom(name: str = "", description: str = ""): + """Decorator for custom operations with signature pd.DataFrame -> pd.DataFrame. + + Wraps a function from DataFrame to DataFrame to transparently handle RemoteDataFrames. + If the input is a RemoteDataFrame, the function call to func is added to the + preprocessing chain as a custom operation. + + Args: + - name: the name of the function, for documentation purposes. + - description: the description of the function, for documentation purposes. + + """ + + def decorator(func): + def wrappedfunc(df): + if isinstance(df, pd.DataFrame): + return func(df) + if isinstance(df, RemoteDataFrame): + df.builder.custom(function=func, name=name, description=description) + return df + raise ValueError(f"Invalid type for custom function: {type(df)}.") + + return wrappedfunc + + return decorator diff --git a/src/tuneinsight/utils/testing.py b/src/tuneinsight/utils/testing.py index c3b750c..ae4f498 100644 --- a/src/tuneinsight/utils/testing.py +++ b/src/tuneinsight/utils/testing.py @@ -7,12 +7,12 @@ from tuneinsight.utils.generator import PatientGenerator from tuneinsight.client.diapason import Diapason + class TestCase3Nodes(unittest.TestCase): """ TestCase3Nodes is a base test case class to test the SDK with a 3 node configuration """ - generator: PatientGenerator = None clients: List[Diapason] = None root_node: Diapason = None @@ -20,13 +20,16 @@ class TestCase3Nodes(unittest.TestCase): delta: float = 0.0001 test_data_path: str = "test/data/tiny.csv" - def verify_success(self,response: Response): + def verify_success(self, response: Response): status_codes_min = 200 status_codes_max = 210 - if response.status_code < status_codes_min or response.status_code > status_codes_max: - print("wrong status code: ",response.content) - self.assertGreaterEqual(response.status_code,status_codes_min) - self.assertGreater(status_codes_max,response.status_code) + if ( + response.status_code < status_codes_min + or response.status_code > status_codes_max + ): + print("wrong status code: ", response.content) + self.assertGreaterEqual(response.status_code, status_codes_min) + self.assertGreater(status_codes_max, response.status_code) @staticmethod def aggregate_data(data): @@ -36,12 +39,11 @@ def aggregate_data(data): for i in range(vec_length): result_row.append(float(0)) for row in data: - for i,v in enumerate(row): + for i, v in enumerate(row): result_row[i] += float(v) new_data.append(result_row) return new_data - @staticmethod def concatenate_rows(datas): new_data = [] @@ -50,14 +52,11 @@ def concatenate_rows(datas): new_data.append(row) return new_data - - def aggregate_matrices(self,datas): + def aggregate_matrices(self, datas): tmp = self.concatenate_rows(datas) return self.aggregate_data(tmp) - - - def floats_equal(self,v1: float,v2: float): + def floats_equal(self, v1: float, v2: float): """ floats_equal compares two float values and allows an error equal to delta @@ -65,10 +64,9 @@ def floats_equal(self,v1: float,v2: float): v1 (float): the first value to compare v2 (float): the second value to compare """ - self.assertGreater(self.delta,abs(float(v1) - float(v2))) - + self.assertGreater(self.delta, abs(float(v1) - float(v2))) - def vals_equal(self,v1: Any,v2: Any): + def vals_equal(self, v1: Any, v2: Any): """ vals_equal compares equality between two values if the two values are declared as non equal, then it checks wether their float representations match @@ -78,9 +76,9 @@ def vals_equal(self,v1: Any,v2: Any): v2 (Any): the second value to compare """ if v1 != v2: - self.floats_equal(v1,v2) + self.floats_equal(v1, v2) - def compare_data(self,vals1: List[List[Any]],vals2: List[List[Any]]): + def compare_data(self, vals1: List[List[Any]], vals2: List[List[Any]]): """ compare_data compares two matrices and asserts equality for dimensions and values @@ -88,19 +86,26 @@ def compare_data(self,vals1: List[List[Any]],vals2: List[List[Any]]): vals1 (List[List[Any]]): the first matrix to compare vals2 (List[List[Any]]): the second matrix to compare """ - self.assertEqual(len(vals1),len(vals2)) - for i,row in enumerate(vals1): - self.assertEqual(len(row),len(vals2[i])) - for j,v in enumerate(row): - self.vals_equal(v,vals2[i][j]) - - - def compare_csv(self,expected_cols: List[str],actual_cols: List[str],expected_vals: List[List[Any]],actual_vals: List[List[Any]]): - self.compare_data([expected_cols],[actual_cols]) - self.compare_data(expected_vals,actual_vals) + self.assertEqual(len(vals1), len(vals2)) + for i, row in enumerate(vals1): + self.assertEqual(len(row), len(vals2[i])) + for j, v in enumerate(row): + self.vals_equal(v, vals2[i][j]) + + def compare_csv( + self, + expected_cols: List[str], + actual_cols: List[str], + expected_vals: List[List[Any]], + actual_vals: List[List[Any]], + ): + self.compare_data([expected_cols], [actual_cols]) + self.compare_data(expected_vals, actual_vals) @staticmethod - def get_csv(filename: str,with_header: bool = True) -> Tuple[List[str],List[List[str]]]: + def get_csv( + filename: str, with_header: bool = True + ) -> Tuple[List[str], List[List[str]]]: """ get_csv reads a csv @@ -113,8 +118,8 @@ def get_csv(filename: str,with_header: bool = True) -> Tuple[List[str],List[List """ columns = [] data = [] - with open(filename,encoding='utf-8') as csv_file: - reader = csv.reader(csv_file, delimiter=',') + with open(filename, encoding="utf-8") as csv_file: + reader = csv.reader(csv_file, delimiter=",") line_count = 0 for row in reader: if line_count == 0 and with_header: @@ -123,8 +128,7 @@ def get_csv(filename: str,with_header: bool = True) -> Tuple[List[str],List[List else: data.append(row) line_count += 1 - return columns,data - + return columns, data def setUp(self): """ @@ -140,32 +144,39 @@ def setUp(self): self.root_node = self.clients[0] self.generator = PatientGenerator() self.generator.seed("test") - self.generator.age_ranges = [[2,12],[13,20],[20,35],[35,55],[55,80],[80,110]] - self.generator.age_weights = [1,2,3,3,2,1] - self.generator.district_weights = [10,5,1,1,1] - self.generator.age_height_ranges = [10,20,50,80,110] - self.generator.age_height_averages = [100,165,185,165,160] - self.generator.gender_weights = [50,50,1,1] - self.generator.origin_weights = [5,10,1,1,10,10,1] - - - - -def partition_dataframe(df: pd.DataFrame,num: int = 3,seed: int=0) -> List[pd.DataFrame]: + self.generator.age_ranges = [ + [2, 12], + [13, 20], + [20, 35], + [35, 55], + [55, 80], + [80, 110], + ] + self.generator.age_weights = [1, 2, 3, 3, 2, 1] + self.generator.district_weights = [10, 5, 1, 1, 1] + self.generator.age_height_ranges = [10, 20, 50, 80, 110] + self.generator.age_height_averages = [100, 165, 185, 165, 160] + self.generator.gender_weights = [50, 50, 1, 1] + self.generator.origin_weights = [5, 10, 1, 1, 10, 10, 1] + + +def partition_dataframe( + df: pd.DataFrame, num: int = 3, seed: int = 0 +) -> List[pd.DataFrame]: if num == 1: return [df] if num > len(df): raise ValueError("number of partitions cannot be higher then the row count") part_size = int(len(df) / num) - partition = df.sample(n=part_size,random_state=seed) + partition = df.sample(n=part_size, random_state=seed) remaining = df.drop(index=partition.index) - partitions = partition_dataframe(remaining,num-1,seed) + partitions = partition_dataframe(remaining, num - 1, seed) partitions.append(partition) return partitions def sigmoid(z: np.ndarray) -> np.ndarray: - ''' + """ sigmoid applies the sigmoid activation on z Args: @@ -173,11 +184,17 @@ def sigmoid(z: np.ndarray) -> np.ndarray: Returns: np.ndarray: the transformed array - ''' + """ return 1 / (1 + np.exp(-z)) -def regression_prediction(weights: np.ndarray,bias: np.ndarray,inputs: np.ndarray,activation: callable = None) -> np.ndarray: - ''' + +def regression_prediction( + weights: np.ndarray, + bias: np.ndarray, + inputs: np.ndarray, + activation: callable = None, +) -> np.ndarray: + """ regression_prediction computes the regression prediction given the weights, bias and input datasets. An additional optional activation function can be provided to be applied after the linear transformation @@ -189,8 +206,8 @@ def regression_prediction(weights: np.ndarray,bias: np.ndarray,inputs: np.ndarra Returns: np.ndarray: the numpy array of predicted values - ''' - z = np.dot(weights.T,inputs.T) + bias + """ + z = np.dot(weights.T, inputs.T) + bias if activation is not None: return activation(z) return z diff --git a/src/tuneinsight/utils/time_tools.py b/src/tuneinsight/utils/time_tools.py index fc1a9f6..974c314 100644 --- a/src/tuneinsight/utils/time_tools.py +++ b/src/tuneinsight/utils/time_tools.py @@ -5,7 +5,7 @@ nanosecond: int = 1 microsecond: int = 1000 * nanosecond millisecond: int = 1000 * microsecond -second : int = millisecond * 1000 +second: int = millisecond * 1000 minute: int = 60 * second hour: int = 60 * minute day: int = 24 * hour