diff --git a/README.md b/README.md index 47debb3..f79c342 100644 --- a/README.md +++ b/README.md @@ -21,17 +21,22 @@ pip install msfabricutils -### Fabric API -- Create, update, delete workspaces, lakehouses, notebooks using the Fabric REST API. +### Thin Fabric API wrapper +Intregate functions to manage workspaces, lakehouses, notebooks, environments, libraries, etc. +For more details, see the [Fabric API Reference](https://mrjsj.github.io/msfabricutils/core/fabric-api/) -### Command Line Interface -- Create, update, delete workspaces, lakehouses, notebooks using the built-in CLI. +### Fabric API as CLI +CLI for managing workspaces, lakehouses, notebooks, environments, libraries, etc. To get started, run: ```bash msfu --help ``` +![msfu CLI help](assets/images/cli-help.png) + + + ### Fabric DuckDB Connection Seamless integration between DuckDB and Microsoft Fabric Lakehouses for data exploration and analysis. diff --git a/assets/images/cli-help.png b/assets/images/cli-help.png new file mode 100644 index 0000000..7110980 Binary files /dev/null and b/assets/images/cli-help.png differ diff --git a/assets/images/cli-lakehouse-create.png b/assets/images/cli-lakehouse-create.png new file mode 100644 index 0000000..71dc982 Binary files /dev/null and b/assets/images/cli-lakehouse-create.png differ diff --git a/docs/core/fabric-api/capacity.md b/docs/core/fabric-api/capacity.md new file mode 100644 index 0000000..4259fed --- /dev/null +++ b/docs/core/fabric-api/capacity.md @@ -0,0 +1,9 @@ +# Capacity + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.capacity + diff --git a/docs/core/fabric-api/dashboard.md b/docs/core/fabric-api/dashboard.md new file mode 100644 index 0000000..3c70000 --- /dev/null +++ b/docs/core/fabric-api/dashboard.md @@ -0,0 +1,9 @@ +# Dashboard + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.dashboard + diff --git a/docs/core/fabric-api/data_pipeline.md b/docs/core/fabric-api/data_pipeline.md new file mode 100644 index 0000000..802e034 --- /dev/null +++ b/docs/core/fabric-api/data_pipeline.md @@ -0,0 +1,9 @@ +# Data Pipeline + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.data_pipeline + diff --git a/docs/core/fabric-api/datamart.md b/docs/core/fabric-api/datamart.md new file mode 100644 index 0000000..9199e62 --- /dev/null +++ b/docs/core/fabric-api/datamart.md @@ -0,0 +1,9 @@ +# Datamart + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.datamart + diff --git a/docs/core/fabric-api/environment.md b/docs/core/fabric-api/environment.md new file mode 100644 index 0000000..0dd4099 --- /dev/null +++ b/docs/core/fabric-api/environment.md @@ -0,0 +1,9 @@ +# Environment + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.environment + diff --git a/docs/core/fabric-api/eventhouse.md b/docs/core/fabric-api/eventhouse.md new file mode 100644 index 0000000..af57f39 --- /dev/null +++ b/docs/core/fabric-api/eventhouse.md @@ -0,0 +1,9 @@ +# Eventhouse + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.eventhouse + diff --git a/docs/core/fabric-api/eventstream.md b/docs/core/fabric-api/eventstream.md new file mode 100644 index 0000000..00f5524 --- /dev/null +++ b/docs/core/fabric-api/eventstream.md @@ -0,0 +1,9 @@ +# Eventstream + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.eventstream + diff --git a/docs/core/fabric-api/index.md b/docs/core/fabric-api/index.md index 2a38ed6..dfd8dec 100644 --- a/docs/core/fabric-api/index.md +++ b/docs/core/fabric-api/index.md @@ -1,4 +1,14 @@ # Fabric API -A collection of functions to interact with the Fabric API. Automatically handles pagination and authentication. -The functions can either be called with an `id` or `name` parameter, however it is recommended to use the `id` as using the name requires more API requests, and is thus slower. \ No newline at end of file +A collection of functions to interact with the Fabric API. I tried to mimic the API as closely as possible, however there are some differences, especially in relation to item defintions. + +While the APIs with item definitions takes multiple item parts as base64 encoded strings, these wrapper functions take a path to the folder containing the item parts, e.g. + +- `path/to/myReport.Report` +- `path/to/mySemanticModel.SemanticModel` +- `path/to/myNotebook.Notebook`. + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). \ No newline at end of file diff --git a/docs/core/fabric-api/kql_dashboard.md b/docs/core/fabric-api/kql_dashboard.md new file mode 100644 index 0000000..051f588 --- /dev/null +++ b/docs/core/fabric-api/kql_dashboard.md @@ -0,0 +1,9 @@ +# Kql Dashboard + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.kql_dashboard + diff --git a/docs/core/fabric-api/kql_database.md b/docs/core/fabric-api/kql_database.md new file mode 100644 index 0000000..7e4a5b8 --- /dev/null +++ b/docs/core/fabric-api/kql_database.md @@ -0,0 +1,9 @@ +# Kql Database + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.kql_database + diff --git a/docs/core/fabric-api/kql_queryset.md b/docs/core/fabric-api/kql_queryset.md new file mode 100644 index 0000000..46f70ff --- /dev/null +++ b/docs/core/fabric-api/kql_queryset.md @@ -0,0 +1,9 @@ +# Kql Queryset + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.kql_queryset + diff --git a/docs/core/fabric-api/lakehouse.md b/docs/core/fabric-api/lakehouse.md index e47e046..7430f99 100644 --- a/docs/core/fabric-api/lakehouse.md +++ b/docs/core/fabric-api/lakehouse.md @@ -1,3 +1,9 @@ # Lakehouse -::: msfabricutils.core.lakehouse \ No newline at end of file +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.lakehouse + diff --git a/docs/core/fabric-api/long_running_operation.md b/docs/core/fabric-api/long_running_operation.md new file mode 100644 index 0000000..37bc1b6 --- /dev/null +++ b/docs/core/fabric-api/long_running_operation.md @@ -0,0 +1,9 @@ +# Long Running Operation + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.long_running_operation + diff --git a/docs/core/fabric-api/mirrored_database.md b/docs/core/fabric-api/mirrored_database.md new file mode 100644 index 0000000..c8d2073 --- /dev/null +++ b/docs/core/fabric-api/mirrored_database.md @@ -0,0 +1,9 @@ +# Mirrored Database + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.mirrored_database + diff --git a/docs/core/fabric-api/mirrored_warehouse.md b/docs/core/fabric-api/mirrored_warehouse.md new file mode 100644 index 0000000..4eab2db --- /dev/null +++ b/docs/core/fabric-api/mirrored_warehouse.md @@ -0,0 +1,9 @@ +# Mirrored Warehouse + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.mirrored_warehouse + diff --git a/docs/core/fabric-api/ml_experiment.md b/docs/core/fabric-api/ml_experiment.md new file mode 100644 index 0000000..a44f509 --- /dev/null +++ b/docs/core/fabric-api/ml_experiment.md @@ -0,0 +1,9 @@ +# Ml Experiment + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.ml_experiment + diff --git a/docs/core/fabric-api/ml_model.md b/docs/core/fabric-api/ml_model.md new file mode 100644 index 0000000..2b2a04e --- /dev/null +++ b/docs/core/fabric-api/ml_model.md @@ -0,0 +1,9 @@ +# Ml Model + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.ml_model + diff --git a/docs/core/fabric-api/notebook.md b/docs/core/fabric-api/notebook.md index c744c9e..e1a6c3b 100644 --- a/docs/core/fabric-api/notebook.md +++ b/docs/core/fabric-api/notebook.md @@ -1,3 +1,9 @@ # Notebook -::: msfabricutils.core.notebook \ No newline at end of file +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.notebook + diff --git a/docs/core/fabric-api/paginated_report.md b/docs/core/fabric-api/paginated_report.md new file mode 100644 index 0000000..7faf347 --- /dev/null +++ b/docs/core/fabric-api/paginated_report.md @@ -0,0 +1,9 @@ +# Paginated Report + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.paginated_report + diff --git a/docs/core/fabric-api/reflex.md b/docs/core/fabric-api/reflex.md new file mode 100644 index 0000000..d888ea4 --- /dev/null +++ b/docs/core/fabric-api/reflex.md @@ -0,0 +1,9 @@ +# Reflex + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.reflex + diff --git a/docs/core/fabric-api/report.md b/docs/core/fabric-api/report.md new file mode 100644 index 0000000..e5cb1ed --- /dev/null +++ b/docs/core/fabric-api/report.md @@ -0,0 +1,9 @@ +# Report + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.report + diff --git a/docs/core/fabric-api/semantic_model.md b/docs/core/fabric-api/semantic_model.md new file mode 100644 index 0000000..099823a --- /dev/null +++ b/docs/core/fabric-api/semantic_model.md @@ -0,0 +1,9 @@ +# Semantic Model + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.semantic_model + diff --git a/docs/core/fabric-api/spark_job_definition.md b/docs/core/fabric-api/spark_job_definition.md new file mode 100644 index 0000000..742c24e --- /dev/null +++ b/docs/core/fabric-api/spark_job_definition.md @@ -0,0 +1,9 @@ +# Spark Job Definition + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.spark_job_definition + diff --git a/docs/core/fabric-api/sql_endpoint.md b/docs/core/fabric-api/sql_endpoint.md index 781e870..ef06fcf 100644 --- a/docs/core/fabric-api/sql_endpoint.md +++ b/docs/core/fabric-api/sql_endpoint.md @@ -1,3 +1,9 @@ -# SQL Endpoint +# Sql Endpoint + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.sql_endpoint -::: msfabricutils.core.sql_endpoint \ No newline at end of file diff --git a/docs/core/fabric-api/warehouse.md b/docs/core/fabric-api/warehouse.md new file mode 100644 index 0000000..55e9f68 --- /dev/null +++ b/docs/core/fabric-api/warehouse.md @@ -0,0 +1,9 @@ +# Warehouse + +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.warehouse + diff --git a/docs/core/fabric-api/workspace.md b/docs/core/fabric-api/workspace.md index 27e90c2..3c4694b 100644 --- a/docs/core/fabric-api/workspace.md +++ b/docs/core/fabric-api/workspace.md @@ -1,3 +1,9 @@ # Workspace -::: msfabricutils.core.workspace \ No newline at end of file +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). + +::: msfabricutils.rest_api.workspace + diff --git a/docs/usage/cli.md b/docs/usage/cli.md index 130ca1c..4cd6f7c 100644 --- a/docs/usage/cli.md +++ b/docs/usage/cli.md @@ -4,23 +4,15 @@ The CLI is a way to interact with the Microsoft Fabric REST API. It includes com For complete documentation, run `msfu --help`. -## Examples - -### Workspace +!!! warning + The functions are not fully tested yet. + Use with caution. + Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues). -```bash -msfu workspace create --name "My Workspace" --description "My workspace description" -``` -### Lakehouse - -```bash -msfu lakehouse create --name "My Lakehouse" --workspace-id "beefbeef-beef-beef-beef-beefbeefbeef" --enable-schemas -``` +## Examples -### Notebook +![msfu CLI help](/assets/images/cli-help.png) -```bash -msfu notebook create --path "path/to/notebook.Notebook" --workspace-id "beefbeef-beef-beef-beef-beefbeefbeef" -``` +![msfu CLI lakehouse create](/assets/images/cli-lakehouse-create.png) diff --git a/mkdocs.yml b/mkdocs.yml index b56a422..6396447 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -34,6 +34,7 @@ theme: plugins: - search: lang: en + - include_dir_to_nav - gen-files: scripts: - docs/gen_ref_pages.py @@ -67,12 +68,7 @@ nav: - API Reference: - Core: - core/authentication.md - - Fabric API: - - core/fabric-api/index.md - - core/fabric-api/workspace.md - - core/fabric-api/lakehouse.md - - core/fabric-api/notebook.md - - core/fabric-api/sql_endpoint.md + - Fabric API: core/fabric-api - ETL: - etl/index.md - etl/read.md diff --git a/scripts/cli/commands/capacity.yaml b/scripts/cli/commands/capacity.yaml new file mode 100644 index 0000000..0a29e26 --- /dev/null +++ b/scripts/cli/commands/capacity.yaml @@ -0,0 +1,13 @@ +command: capacity +subcommands: + list: + endpoint: capacities + method: get + description: List capacities for a workspace. + panel: Capacity + args: + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. diff --git a/scripts/cli/commands/dashboard.yaml b/scripts/cli/commands/dashboard.yaml new file mode 100644 index 0000000..3172015 --- /dev/null +++ b/scripts/cli/commands/dashboard.yaml @@ -0,0 +1,18 @@ +command: dashboard +subcommands: + list: + endpoint: workspaces/{workspace_id}/dashboards + method: get + description: List dashboards for a workspace. + panel: Dashboard + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list dashboards for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. diff --git a/scripts/cli/commands/data_pipeline.yaml b/scripts/cli/commands/data_pipeline.yaml new file mode 100644 index 0000000..1a050a5 --- /dev/null +++ b/scripts/cli/commands/data_pipeline.yaml @@ -0,0 +1,109 @@ +command: data-pipeline +subcommands: + create: + endpoint: workspaces/{workspace_id}/dataPipelines + method: post + description: Create a data pipeline. + panel: Data Pipeline + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the data pipeline in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the data pipeline. + - name: description + type: str + required: false + arg_type: payload + description: The description of the data pipeline. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/dataPipelines/{data_pipeline_id} + method: get + description: Get a data pipeline. + panel: Data Pipeline + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the data pipeline from. + - name: data-pipeline-id + type: str + required: true + arg_type: path + description: The id of the data pipeline to get. + + list: + endpoint: workspaces/{workspace_id}/dataPipelines + method: get + description: List data pipelines for a workspace. + panel: Data Pipeline + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list data pipelines for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/dataPipelines/{data_pipeline_id} + method: patch + description: Update a data pipeline. + panel: Data Pipeline + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update the data pipeline in. + - name: data-pipeline-id + type: str + required: true + arg_type: path + description: The id of the data pipeline to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the data pipeline. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the data pipeline. + + delete: + endpoint: workspaces/{workspace_id}/dataPipelines/{data_pipeline_id} + method: delete + description: Delete a data pipeline. + panel: Data Pipeline + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete the data pipeline from. + - name: data-pipeline-id + type: str + required: true + arg_type: path + description: The id of the data pipeline to delete. diff --git a/scripts/cli/commands/datamart.yaml b/scripts/cli/commands/datamart.yaml new file mode 100644 index 0000000..2d85a5d --- /dev/null +++ b/scripts/cli/commands/datamart.yaml @@ -0,0 +1,18 @@ +command: datamart +subcommands: + list: + endpoint: workspaces/{workspace_id}/datamarts + method: get + description: List datamarts for a workspace. + panel: Datamart + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list datamarts for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. diff --git a/scripts/cli/commands/environment.yaml b/scripts/cli/commands/environment.yaml new file mode 100644 index 0000000..b3f0a48 --- /dev/null +++ b/scripts/cli/commands/environment.yaml @@ -0,0 +1,338 @@ +command: environment +subcommands: + create: + endpoint: workspaces/{workspace_id}/environments + method: post + panel: Environment + description: Create an environment. + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the environment in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the environment. + - name: description + type: str + required: false + arg_type: payload + description: The description of the environment. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/environments/{environment_id} + method: get + description: Get an environment. + panel: Environment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the environment from. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to get. + + list: + endpoint: workspaces/{workspace_id}/environments + method: get + description: List environments for a workspace. + panel: Environment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list environments for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/environments/{environment_id} + method: patch + description: Update an environment. + panel: Environment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update the environment in. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the environment. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the environment. + + delete: + endpoint: workspaces/{workspace_id}/environments/{environment_id} + method: delete + description: Delete an environment. + panel: Environment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete the environment from. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to delete. + + get-spark-compute-published-settings: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/sparkComputePublishedSettings + method: get + description: Get spark compute published settings for an environment. + panel: Spark Compute + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the spark compute published settings for. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to get the spark compute published settings for. + + get-spark-compute-staging-settings: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/staging/sparkcompute + method: get + description: Get spark compute staging settings for an environment. + panel: Spark Compute + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the spark compute staging settings for. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to get the spark compute staging settings for. + + update-spark-compute-staging-settings: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/staging/sparkcompute + method: patch + description: Update spark compute staging settings for an environment. + panel: Spark Compute + custom_payload: + value: | + { + "instancePool": { + "name": instance_pool_name, + "type": instance_pool_type + }, + "driverCores": driver_cores, + "driverMemory": driver_memory, + "executorCores": executor_cores, + "executorMemory": executor_memory, + "dynamicExecutorAllocation": { + "enabled": dynamic_executor_allocation_enabled, + "minExecutors": min_executors, + "maxExecutors": max_executors + }, + "sparkProperties": { + "spark.acls.enable": spark_acls_enable, + "spark.admin.acls": spark_admin_acls + }, + "runtimeVersion": runtime_version + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update the spark compute staging settings for. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to update the spark compute staging settings for. + - name: instance-pool-name + type: str + required: false + description: The name of the instance pool to use for Spark Compute settings. + - name: instance-pool-type + type: str + required: false + description: The type of the instance pool to use for Spark Compute settings. + - name: driver-cores + type: int + required: false + description: The number of cores to use for the driver. + - name: driver-memory + type: str + required: false + description: The memory to use for the driver. + - name: executor-cores + type: int + required: false + description: The number of cores to use for the executors. + - name: executor-memory + type: str + required: false + description: The memory to use for the executors. + - name: dynamic-executor-allocation-enabled + type: bool + required: false + description: Whether to enable dynamic executor allocation. + - name: min-executors + type: int + required: false + description: The minimum number of executors to use for dynamic executor allocation. + - name: max-executors + type: int + required: false + description: The maximum number of executors to use for dynamic executor allocation. + - name: spark-acls-enable + type: str + required: false + description: Whether to enable Spark ACLs. + - name: spark-admin-acls + type: str + required: false + description: The admin ACLs to use for Spark. + - name: runtime-version + type: str + required: false + description: The runtime version to use for Spark Compute settings. + + get-published-libraries: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/libraries + method: get + description: Get published libraries for an environment. + panel: Libraries + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the published libraries for. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to get the published libraries for. + + + get-staging-libraries: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/staging/libraries + method: get + description: Get staging libraries for an environment. + panel: Libraries + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the staging libraries for. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to get the staging libraries for. + + delete-staging-library: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/staging/libraries + method: delete + description: Delete a staging library for an environment. + panel: Libraries + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete the staging library from. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to delete the staging library from. + - name: library-to-delete + type: str + required: true + arg_type: query + description: The library file to delete. Must include the file extension. + + upload-staging-library: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/staging/libraries + method: post + description: Upload a staging library for an environment. + panel: Libraries + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to upload the staged library to. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to upload the staging library to. + + publish: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/staging/publish + method: post + description: Publish an environment. + panel: Environment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to publish the environment for. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to publish. + + cancel-publish: + endpoint: workspaces/{workspace_id}/environments/{environment_id}/staging/cancelPublish + method: post + description: Cancel a publish operation for an environment. + panel: Environment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to cancel the publish operation for. + - name: environment-id + type: str + required: true + arg_type: path + description: The id of the environment to cancel the publish operation for. diff --git a/scripts/cli/commands/eventhouse.yaml b/scripts/cli/commands/eventhouse.yaml new file mode 100644 index 0000000..15e2438 --- /dev/null +++ b/scripts/cli/commands/eventhouse.yaml @@ -0,0 +1,215 @@ +command: eventhouse +subcommands: + create: + endpoint: workspaces/{workspace_id}/eventhouses + method: post + description: Create an eventhouse. + panel: Eventhouse + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "EventhouseProperties.json", + "payload": EventhouseProperties, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the eventhouse in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the eventhouse. + - name: eventhouse-path + type: str + required: true + arg_type: load_content + content_files: + - EventhouseProperties.json + - .platform + description: The path to the eventhouse to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the eventhouse. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/eventhouses/{eventhouse_id} + method: get + description: Get an eventhouse. + panel: Eventhouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the eventhouse from. + - name: eventhouse-id + type: str + required: true + arg_type: path + description: The id of the eventhouse to get. + + list: + endpoint: workspaces/{workspace_id}/eventhouses + method: get + description: List eventhouses for a workspace. + panel: Eventhouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list eventhouses for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/eventhouses/{eventhouse_id} + method: patch + description: Update an eventhouse. + panel: Eventhouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: eventhouse-id + type: str + required: true + arg_type: path + description: The id of the eventhouse to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the eventhouse. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the eventhouse. + + delete: + endpoint: workspaces/{workspace_id}/eventhouses/{eventhouse_id} + method: delete + description: Delete an eventhouse. + panel: Eventhouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: eventhouse-id + type: str + required: true + arg_type: path + description: The id of the eventhouse to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/eventhouses/{eventhouse_id}/getDefinition + method: get + description: Get the definition of an eventhouse. + panel: EventhouseDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the eventhouse definition from. + - name: eventhouse-id + type: str + required: true + arg_type: path + description: The id of the eventhouse to get the definition from. + - name: format + type: str + required: false + arg_type: query + description: The format of the Eventhouse definition. Supported format is \"eventhouse\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/eventhouses/{eventhouse_id}/updateDefinition + method: post + description: Update the definition of an eventhouse. + panel: EventhouseDefinition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "EventhouseProperties.json", + "payload": EventhouseProperties, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: eventhouse-id + type: str + required: true + arg_type: path + description: The id of the eventhouse to update. + - name: eventhouse-path + type: str + required: true + arg_type: load_content + content_files: + - EventhouseProperties.json + - .platform + description: The path to the eventhouse to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/eventstream.yaml b/scripts/cli/commands/eventstream.yaml new file mode 100644 index 0000000..fadb621 --- /dev/null +++ b/scripts/cli/commands/eventstream.yaml @@ -0,0 +1,215 @@ +command: eventstream +subcommands: + create: + endpoint: workspaces/{workspace_id}/eventstreams + method: post + description: Create an eventstream. + panel: Eventstream + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "eventstream.json", + "payload": eventstream, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the eventstream in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the eventstream. + - name: eventstream-path + type: str + required: true + arg_type: load_content + content_files: + - eventstream.json + - .platform + description: The path to the eventstream to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the eventstream. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/eventstreams/{eventstream_id} + method: get + description: Get an eventstream. + panel: Eventstream + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the eventstream from. + - name: eventstream-id + type: str + required: true + arg_type: path + description: The id of the eventstream to get. + + list: + endpoint: workspaces/{workspace_id}/eventstreams + method: get + description: List eventstreams for a workspace. + panel: Eventstream + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list eventstreams for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/eventstreams/{eventstream_id} + method: patch + description: Update an eventstream. + panel: Eventstream + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: eventstream-id + type: str + required: true + arg_type: path + description: The id of the eventstream to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the eventstream. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the eventstream. + + delete: + endpoint: workspaces/{workspace_id}/eventstreams/{eventstream_id} + method: delete + description: Delete an eventstream. + panel: Eventstream + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: eventstream-id + type: str + required: true + arg_type: path + description: The id of the eventstream to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/eventstreams/{eventstream_id}/getDefinition + method: get + description: Get the definition of an eventstream. + panel: EventstreamDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the eventstream definition from. + - name: eventstream-id + type: str + required: true + arg_type: path + description: The id of the eventstream to get the definition from. + - name: format + type: str + required: false + arg_type: query + description: The format of the Eventstream definition. Supported format is \"eventstream\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/eventstreams/{eventstream_id}/updateDefinition + method: post + description: Update the definition of an eventstream. + panel: EventstreamDefinition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "eventstream.json", + "payload": eventstream, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: eventstream-id + type: str + required: true + arg_type: path + description: The id of the eventstream to update. + - name: eventstream-path + type: str + required: true + arg_type: load_content + content_files: + - eventstream.json + - .platform + description: The path to the eventstream to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/kql_dashboard.yaml b/scripts/cli/commands/kql_dashboard.yaml new file mode 100644 index 0000000..195a450 --- /dev/null +++ b/scripts/cli/commands/kql_dashboard.yaml @@ -0,0 +1,215 @@ +command: kql-dashboard +subcommands: + create: + endpoint: workspaces/{workspace_id}/kqlDashboards + method: post + description: Create a kql dashboard. + panel: KqlDashboard + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "RealTimeDashboard.json", + "payload": RealTimeDashboard, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the kql dashboard in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the kql dashboard. + - name: kql-dashboard-path + type: str + required: true + arg_type: load_content + content_files: + - RealTimeDashboard.json + - .platform + description: The path to the kql dashboard to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the kql dashboard. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id} + method: get + description: Get a kql dashboard. + panel: KqlDashboard + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the kql dashboard from. + - name: kql-dashboard-id + type: str + required: true + arg_type: path + description: The id of the kql dashboard to get. + + list: + endpoint: workspaces/{workspace_id}/kqlDashboards + method: get + description: List kql dashboards for a workspace. + panel: KqlDashboard + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list kql dashboards for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id} + method: patch + description: Update a kql dashboard. + panel: KqlDashboard + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: kql-dashboard-id + type: str + required: true + arg_type: path + description: The id of the kql dashboard to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the kql dashboard. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the kql dashboard. + + delete: + endpoint: workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id} + method: delete + description: Delete a kql dashboard. + panel: KqlDashboard + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: kql-dashboard-id + type: str + required: true + arg_type: path + description: The id of the kql dashboard to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id}/getDefinition + method: get + description: Get the definition of a kql dashboard. + panel: KqlDashboardDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the kql dashboard definition from. + - name: kql-dashboard-id + type: str + required: true + arg_type: path + description: The id of the kql dashboard to get the definition from. + # - name: format + # type: str + # required: false + # arg_type: query + # description: The format of the KqlDashboard definition. Supported format is \"kqlDashboard\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id}/updateDefinition + method: post + description: Update the definition of a kql dashboard. + panel: KqlDashboardDefinition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "RealTimeDashboard.json", + "payload": RealTimeDashboard, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: kql-dashboard-id + type: str + required: true + arg_type: path + description: The id of the kql dashboard to update. + - name: kql-dashboard-path + type: str + required: true + arg_type: load_content + content_files: + - RealTimeDashboard.json + - .platform + description: The path to the kql dashboard to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/kql_database.yaml b/scripts/cli/commands/kql_database.yaml new file mode 100644 index 0000000..1ea7be0 --- /dev/null +++ b/scripts/cli/commands/kql_database.yaml @@ -0,0 +1,227 @@ +command: kql-database +subcommands: + create: + endpoint: workspaces/{workspace_id}/kqlDatabases + method: post + description: Create a kql database. + panel: KqlDatabase + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "DatabaseProperties.json", + "payload": DatabaseProperties, + "payloadType": "InlineBase64" + }, + { + "path": "DatabaseSchema.kql", + "payload": DatabaseSchema, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the kql database in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the kql database. + - name: kql-database-path + type: str + required: true + arg_type: load_content + content_files: + - DatabaseProperties.json + - DatabaseSchema.kql + - .platform + description: The path to the kql database to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the kql database. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/kqlDatabases/{kql_database_id} + method: get + description: Get a kql database. + panel: KqlDatabase + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the kql database from. + - name: kql-database-id + type: str + required: true + arg_type: path + description: The id of the kql database to get. + + list: + endpoint: workspaces/{workspace_id}/kqlDatabases + method: get + description: List kql databases for a workspace. + panel: KqlDatabase + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list kql databases for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/kqlDatabases/{kql_database_id} + method: patch + description: Update a kql database. + panel: KqlDatabase + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: kql-database-id + type: str + required: true + arg_type: path + description: The id of the kql database to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the kql database. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the kql database. + + delete: + endpoint: workspaces/{workspace_id}/kqlDatabases/{kql_database_id} + method: delete + description: Delete a kql database. + panel: KqlDatabase + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: kql-database-id + type: str + required: true + arg_type: path + description: The id of the kql database to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/kqlDatabases/{kql_database_id}/getDefinition + method: get + description: Get the definition of a kql database. + panel: KqlDatabaseDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the kql database definition from. + - name: kql-database-id + type: str + required: true + arg_type: path + description: The id of the kql database to get the definition from. + # - name: format + # type: str + # required: false + # arg_type: query + # description: The format of the KqlDatabase definition. Supported format is \"kqlDatabase\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/kqlDatabases/{kql_database_id}/updateDefinition + method: post + description: Update the definition of a kql database. + panel: KqlDatabaseDefinition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "DatabaseProperties.json", + "payload": DatabaseProperties, + "payloadType": "InlineBase64" + }, + { + "path": "DatabaseSchema.kql", + "payload": DatabaseSchema, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: kql-database-id + type: str + required: true + arg_type: path + description: The id of the kql database to update. + - name: kql-database-path + type: str + required: true + arg_type: load_content + content_files: + - DatabaseProperties.json + - DatabaseSchema.kql + - .platform + description: The path to the kql database to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/kql_queryset.yaml b/scripts/cli/commands/kql_queryset.yaml new file mode 100644 index 0000000..f2ddd0c --- /dev/null +++ b/scripts/cli/commands/kql_queryset.yaml @@ -0,0 +1,215 @@ +command: kql-queryset +subcommands: + create: + endpoint: workspaces/{workspace_id}/kqlQuerysets + method: post + description: Create a kql queryset. + panel: KqlQueryset + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "RealtimeQueryset.json", + "payload": RealtimeQueryset, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the kql queryset in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the kql queryset. + - name: kql-database-path + type: str + required: true + arg_type: load_content + content_files: + - RealtimeQueryset.json + - .platform + description: The path to the kql queryset to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the kql queryset. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id} + method: get + description: Get a kql queryset. + panel: KqlQueryset + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the kql queryset from. + - name: kql-queryset-id + type: str + required: true + arg_type: path + description: The id of the kql queryset to get. + + list: + endpoint: workspaces/{workspace_id}/kqlQuerysets + method: get + description: List kql querysets for a workspace. + panel: KqlQueryset + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list kql querysets for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id} + method: patch + description: Update a kql queryset. + panel: KqlQueryset + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: kql-queryset-id + type: str + required: true + arg_type: path + description: The id of the kql queryset to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the kql queryset. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the kql queryset. + + delete: + endpoint: workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id} + method: delete + description: Delete a kql queryset. + panel: KqlQueryset + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: kql-queryset-id + type: str + required: true + arg_type: path + description: The id of the kql queryset to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id}/getDefinition + method: get + description: Get the definition of a kql queryset. + panel: KqlQuerysetDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the kql queryset definition from. + - name: kql-queryset-id + type: str + required: true + arg_type: path + description: The id of the kql queryset to get the definition from. + # - name: format + # type: str + # required: false + # arg_type: query + # description: The format of the KqlDatabase definition. Supported format is \"kqlDatabase\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id}/updateDefinition + method: post + description: Update the definition of a kql queryset. + panel: KqlQuerysetDefinition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "RealtimeQueryset.json", + "payload": RealtimeQueryset, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: kql-queryset-id + type: str + required: true + arg_type: path + description: The id of the kql queryset to update. + - name: kql-queryset-path + type: str + required: true + arg_type: load_content + content_files: + - RealtimeQueryset.json + - .platform + description: The path to the kql queryset to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/lakehouse.yaml b/scripts/cli/commands/lakehouse.yaml new file mode 100644 index 0000000..0f68f9f --- /dev/null +++ b/scripts/cli/commands/lakehouse.yaml @@ -0,0 +1,274 @@ +command: lakehouse +subcommands: + create: + endpoint: workspaces/{workspace_id}/lakehouses + method: post + description: Create a lakehouse. + panel: Lakehouse + custom_payload: + condition: enable_schemas is True + value: | + "enableSchemas": True, + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the lakehouse in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the lakehouse. + - name: description + type: str + required: false + arg_type: payload + description: The description of the lakehouse. + - name: enable-schemas + type: bool + required: false + default: false + description: Whether the lakehouse is schema enabled. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/lakehouses/{lakehouse_id} + method: get + description: Get a lakehouse. + panel: Lakehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the lakehouse from. + - name: lakehouse-id + type: str + required: true + arg_type: path + description: The id of the lakehouse to get. + + list: + endpoint: workspaces/{workspace_id}/lakehouses + method: get + description: List lakehouses for a workspace. + panel: Lakehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list lakehouses for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/lakehouses/{lakehouse_id} + method: patch + description: Update a lakehouse. + panel: Lakehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: lakehouse-id + type: str + required: true + arg_type: path + description: The id of the lakehouse to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the lakehouse. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the lakehouse. + + delete: + endpoint: workspaces/{workspace_id}/lakehouses/{lakehouse_id} + method: delete + description: Delete a lakehouse. + panel: Lakehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: lakehouse-id + type: str + required: true + arg_type: path + description: The id of the lakehouse to delete. + + run-background-job: + endpoint: workspaces/{workspace_id}/lakehouses/{lakehouse_id}/jobs/instances + method: post + description: Run on-demand table maintenance job instance. + panel: Job + custom_payload: + value: | + "executionData": { + "tableName": table_name, + "schemaName": schema_name, + "optimizeSettings": { + "vOrder": v_order, + "zOrderBy": z_order_columns + }, + "vacuumSettings": { + "retentionPeriod": retention_period + } + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create a job for. + - name: lakehouse-id + type: str + required: true + arg_type: path + description: The id of the lakehouse to create a job for. + - name: job-type + type: str + required: true + arg_type: query + description: The type of the job to create. Must be \"TableMaintenance\". + - name: table-name + type: str + required: true + description: The name of the table to run the job on. + - name: schema-name + type: str + required: false + description: The name of the schema to run the job on. Only applicable for schema enabled lakehouses. + - name: v-order + type: bool + required: false + description: If table should be v-ordered. + - name: z-order-columns + type: List[str] + required: false + description: List of columns to z-order by. + - name: retention-period + type: str + required: false + description: Retention periode in format d:hh:mm:ss. Overrides the default retention period. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + list-tables: + endpoint: workspaces/{workspace_id}/lakehouses/{lakehouse_id}/tables + method: get + description: List tables in a lakehouse. + panel: Table + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list tables for. + - name: lakehouse-id + type: str + required: true + arg_type: path + description: The id of the lakehouse to list tables for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + - name: max-results + type: int + required: false + arg_type: query + description: The maximum number of results to return. + + load-table: + endpoint: workspaces/{workspace_id}/lakehouses/{lakehouse_id}/tables/{table_name}/load + method: post + description: Load a table. + panel: Table + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to load the table for. + - name: lakehouse-id + type: str + required: true + arg_type: path + description: The id of the lakehouse to load the table for. + - name: table-name + type: str + required: true + arg_type: path + description: The name of the table to load. + - name: relative-path + type: str + required: true + arg_type: payload + description: The relative path to the table to load. + - name: path-type + type: str + required: true + arg_type: payload + description: The type of the path to load. Either \"File\" or \"Folder\". + - name: format + type: str + required: false + arg_type: payload + description: The format of the files to load. Must be \"Parquet\" or \"Csv\". + - name: header + type: bool + required: false + arg_type: payload + description: Whether the file has a header row. Only applicable for csv files. + - name: delimiter + type: str + required: false + arg_type: payload + description: The delimiter of the csv files. Only applicable for csv files. + - name: mode + type: str + required: false + arg_type: payload + description: The mode to load the table in. Either \"Overwrite\" or \"Append\". + - name: file-extension + type: str + required: false + arg_type: payload + description: The file extension of the files to load. + - name: recursive + type: bool + required: false + arg_type: payload + description: Whether to search data files recursively or not, when loading from a folder. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. diff --git a/scripts/cli/commands/long_running_operation.yaml b/scripts/cli/commands/long_running_operation.yaml new file mode 100644 index 0000000..9128dbf --- /dev/null +++ b/scripts/cli/commands/long_running_operation.yaml @@ -0,0 +1,25 @@ +command: long-running-operation +subcommands: + get-state: + endpoint: operations/{operation_id} + method: get + panel: Long Running Operation + description: Get the state of the long running operation. + args: + - name: operation-id + required: true + type: str + arg_type: path + description: The ID of the long running operation. + + get-result: + endpoint: operations/{operation_id}/result + method: get + panel: Long Running Operation + description: Get the result of the long running operation. Only available when the operation status is `Succeeded`. + args: + - name: operation-id + required: true + type: str + arg_type: path + description: The ID of the long running operation. diff --git a/scripts/cli/commands/mirrored_database.yaml b/scripts/cli/commands/mirrored_database.yaml new file mode 100644 index 0000000..a30971d --- /dev/null +++ b/scripts/cli/commands/mirrored_database.yaml @@ -0,0 +1,215 @@ +command: mirrored-database +subcommands: + create: + endpoint: workspaces/{workspace_id}/mirroredDatabases + method: post + description: Create a mirrored database. + panel: mirroredDatabase + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "mirroredDatabase.json", + "payload": mirroredDatabase, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the mirrored database in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the mirrored database. + - name: mirrored-database-path + type: str + required: true + arg_type: load_content + content_files: + - mirroredDatabase.json + - .platform + description: The path to the mirrored database to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the mirrored database. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id} + method: get + description: Get a mirrored database. + panel: mirroredDatabase + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the mirrored database from. + - name: mirrored-database-id + type: str + required: true + arg_type: path + description: The id of the mirrored database to get. + + list: + endpoint: workspaces/{workspace_id}/mirroredDatabases + method: get + description: List mirrored databases for a workspace. + panel: mirroredDatabase + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list mirrored databases for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id} + method: patch + description: Update a mirrored database. + panel: mirroredDatabase + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: mirrored-database-id + type: str + required: true + arg_type: path + description: The id of the mirrored database to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the mirrored database. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the mirrored database. + + delete: + endpoint: workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id} + method: delete + description: Delete a mirrored database. + panel: mirroredDatabase + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: mirrored-database-id + type: str + required: true + arg_type: path + description: The id of the mirrored database to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id}/getDefinition + method: get + description: Get the definition of a mirrored database. + panel: mirroredDatabaseDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the mirrored database definition from. + - name: mirrored-database-id + type: str + required: true + arg_type: path + description: The id of the mirrored database to get the definition from. + # - name: format + # type: str + # required: false + # arg_type: query + # description: The format of the mirroredDatabase definition. Supported format is \"mirroredDatabase\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id}/updateDefinition + method: post + description: Update the definition of a mirrored database. + panel: mirroredDatabaseDefinition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "mirroredDatabase.json", + "payload": mirroredDatabase, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: mirrored-database-id + type: str + required: true + arg_type: path + description: The id of the mirrored database to update. + - name: mirrored-database-path + type: str + required: true + arg_type: load_content + content_files: + - mirroredDatabase.json + - .platform + description: The path to the mirrored database to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/mirrored_warehouse.yaml b/scripts/cli/commands/mirrored_warehouse.yaml new file mode 100644 index 0000000..de2d181 --- /dev/null +++ b/scripts/cli/commands/mirrored_warehouse.yaml @@ -0,0 +1,18 @@ +command: mirrored-warehouse +subcommands: + list: + endpoint: workspaces/{workspace_id}/mirroredWarehouses + method: get + description: List mirrored warehouses for a workspace. + panel: MirroredWarehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list mirrored warehouses for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. diff --git a/scripts/cli/commands/ml_experiment.yaml b/scripts/cli/commands/ml_experiment.yaml new file mode 100644 index 0000000..94c7b3f --- /dev/null +++ b/scripts/cli/commands/ml_experiment.yaml @@ -0,0 +1,105 @@ +command: ml-experiment +subcommands: + create: + endpoint: workspaces/{workspace_id}/mlExperiments + method: post + description: Create a ml experiment. + panel: Ml Experiment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the ml experiment in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the ml experiment. + - name: description + type: str + required: false + arg_type: payload + description: The description of the ml experiment. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/mlExperiments/{ml_experiment_id} + method: get + description: Get a ml experiment. + panel: Ml Experiment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the ml experiment from. + - name: ml-experiment-id + type: str + required: true + arg_type: path + description: The id of the ml experiment to get. + + list: + endpoint: workspaces/{workspace_id}/mlExperiments + method: get + description: List ml experiments for a workspace. + panel: Ml Experiment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list data pipelines for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/mlExperiments/{ml_experiment_id} + method: patch + description: Update a ml experiment. + panel: Ml Experiment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update the ml experiment in. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the ml experiment. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the ml experiment. + + delete: + endpoint: workspaces/{workspace_id}/mlExperiments/{ml_experiment_id} + method: delete + description: Delete a ml experiment. + panel: Ml Experiment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete the ml experiment from. + - name: ml-experiment-id + type: str + required: true + arg_type: path + description: The id of the ml experiment to delete. + diff --git a/scripts/cli/commands/ml_model.yaml b/scripts/cli/commands/ml_model.yaml new file mode 100644 index 0000000..0433e93 --- /dev/null +++ b/scripts/cli/commands/ml_model.yaml @@ -0,0 +1,105 @@ +command: ml-model +subcommands: + create: + endpoint: workspaces/{workspace_id}/mlModels + method: post + description: Create a ml model. + panel: Ml Model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the ml model in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the ml model. + - name: description + type: str + required: false + arg_type: payload + description: The description of the ml model. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/mlModels/{ml_model_id} + method: get + description: Get a ml model. + panel: Ml Model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the ml model from. + - name: ml-model-id + type: str + required: true + arg_type: path + description: The id of the ml model to get. + + list: + endpoint: workspaces/{workspace_id}/mlModels + method: get + description: List ml models for a workspace. + panel: Ml Model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list data pipelines for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/mlModels/{ml_model_id} + method: patch + description: Update a ml model. + panel: Ml Model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update the ml model in. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the ml model. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the ml model. + + delete: + endpoint: workspaces/{workspace_id}/mlModels/{ml_model_id} + method: delete + description: Delete a ml model. + panel: Ml Model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete the ml model from. + - name: ml-model-id + type: str + required: true + arg_type: path + description: The id of the ml model to delete. + diff --git a/scripts/cli/commands/notebook.yaml b/scripts/cli/commands/notebook.yaml new file mode 100644 index 0000000..65d29f9 --- /dev/null +++ b/scripts/cli/commands/notebook.yaml @@ -0,0 +1,217 @@ +command: notebook +subcommands: + create: + endpoint: workspaces/{workspace_id}/notebooks + method: post + description: Create a notebook. + panel: Notebook + custom_payload: + value: | + "definition": { + "format": "ipynb", + "parts": [ + { + "path": "notebook-content.py", + "payload": notebook_content, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the notebook in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the notebook. + - name: notebook-path + type: str + required: true + arg_type: load_content + content_files: + - notebook-content.py + - .platform + description: The path to the notebook to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the notebook. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/notebooks/{notebook_id} + method: get + description: Get a notebook. + panel: Notebook + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the notebook from. + - name: notebook-id + type: str + required: true + arg_type: path + description: The id of the notebook to get. + + list: + endpoint: workspaces/{workspace_id}/notebooks + method: get + description: List notebooks for a workspace. + panel: Notebook + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list notebooks for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/notebooks/{notebook_id} + method: patch + description: Update a notebook. + panel: Notebook + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: notebook-id + type: str + required: true + arg_type: path + description: The id of the notebook to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the notebook. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the notebook. + + delete: + endpoint: workspaces/{workspace_id}/notebooks/{notebook_id} + method: delete + description: Delete a notebook. + panel: Notebook + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: notebook-id + type: str + required: true + arg_type: path + description: The id of the notebook to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/notebooks/{notebook_id}/getDefinition + method: get + description: Get the definition of a notebook. + panel: NotebookDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the notebook definition from. + - name: notebook-id + type: str + required: true + arg_type: path + description: The id of the notebook to get the definition from. + - name: format + type: str + required: false + arg_type: query + description: The format of the Notebook definition. Supported format is \"ipynb\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/notebooks/{notebook_id}/updateDefinition + method: post + description: Update the definition of a notebook. + panel: NotebookDefinition + custom_payload: + value: | + "definition": { + "format": "ipynb", + "parts": [ + { + "path": "notebook-content.py", + "payload": notebook_content, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: notebook-id + type: str + required: true + arg_type: path + description: The id of the notebook to update. + - name: notebook-path + type: str + required: true + arg_type: load_content + content_files: + - notebook-content.py + - .platform + description: The path to the notebook to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/paginated_report.yaml b/scripts/cli/commands/paginated_report.yaml new file mode 100644 index 0000000..3d13722 --- /dev/null +++ b/scripts/cli/commands/paginated_report.yaml @@ -0,0 +1,35 @@ +command: paginated-report +subcommands: + list: + endpoint: paginated-reports + method: get + description: List paginated reports for a workspace. + panel: Paginated Report + args: + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: paginated-reports/{paginated_report_id} + method: patch + description: Update a paginated report. + panel: Paginated Report + args: + - name: paginated-report-id + type: str + required: true + arg_type: path + description: The id of the paginated report to update. + - name: display-name + type: str + required: false + arg_type: payload + description: The display name of the paginated report. + - name: description + type: str + required: false + arg_type: payload + description: The description of the paginated report. diff --git a/scripts/cli/commands/reflex.yaml b/scripts/cli/commands/reflex.yaml new file mode 100644 index 0000000..1d3766c --- /dev/null +++ b/scripts/cli/commands/reflex.yaml @@ -0,0 +1,215 @@ +command: reflex +subcommands: + create: + endpoint: workspaces/{workspace_id}/reflexes + method: post + description: Create a reflex. + panel: reflex + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "ReflexEntities.json", + "payload": ReflexEntities, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the reflex in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the reflex. + - name: reflex-path + type: str + required: true + arg_type: load_content + content_files: + - ReflexEntities.json + - .platform + description: The path to the reflex to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the reflex. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/reflexes/{reflex_id} + method: get + description: Get a reflex. + panel: reflex + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the reflex from. + - name: reflex-id + type: str + required: true + arg_type: path + description: The id of the reflex to get. + + list: + endpoint: workspaces/{workspace_id}/reflexes + method: get + description: List reflexes for a workspace. + panel: reflex + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list reflexes for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/reflexes/{reflex_id} + method: patch + description: Update a reflex. + panel: reflex + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: reflex-id + type: str + required: true + arg_type: path + description: The id of the reflex to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the reflex. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the reflex. + + delete: + endpoint: workspaces/{workspace_id}/reflexes/{reflex_id} + method: delete + description: Delete a reflex. + panel: reflex + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: reflex-id + type: str + required: true + arg_type: path + description: The id of the reflex to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/reflexes/{reflex_id}/getDefinition + method: get + description: Get the definition of a reflex. + panel: reflexDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the reflex definition from. + - name: reflex-id + type: str + required: true + arg_type: path + description: The id of the reflex to get the definition from. + - name: format + type: str + required: false + arg_type: query + description: The format of the reflex definition. Supported format is \"ipynb\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/reflexes/{reflex_id}/updateDefinition + method: post + description: Update the definition of a reflex. + panel: reflexDefinition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "ReflexEntities.json", + "payload": ReflexEntities, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: reflex-id + type: str + required: true + arg_type: path + description: The id of the reflex to update. + - name: reflex-path + type: str + required: true + arg_type: load_content + content_files: + - ReflexEntities.json + - .platform + description: The path to the reflex to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/report.yaml b/scripts/cli/commands/report.yaml new file mode 100644 index 0000000..2ae51ae --- /dev/null +++ b/scripts/cli/commands/report.yaml @@ -0,0 +1,227 @@ +command: report +subcommands: + create: + endpoint: workspaces/{workspace_id}/reports + method: post + description: Create a report. + panel: report + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "definition.pbir", + "payload": definition, + "payloadType": "InlineBase64" + }, + { + "path": "report.json", + "payload": report, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the report in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the report. + - name: report-path + type: str + required: true + arg_type: load_content + content_files: + - definition.pbir + - report.json + - .platform + description: The path to the report to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the report. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/reports/{report_id} + method: get + description: Get a report. + panel: report + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the report from. + - name: report-id + type: str + required: true + arg_type: path + description: The id of the report to get. + + list: + endpoint: workspaces/{workspace_id}/reports + method: get + description: List reports for a workspace. + panel: report + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list reports for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/reports/{report_id} + method: patch + description: Update a report. + panel: report + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: report-id + type: str + required: true + arg_type: path + description: The id of the report to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the report. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the report. + + delete: + endpoint: workspaces/{workspace_id}/reports/{report_id} + method: delete + description: Delete a report. + panel: report + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: report-id + type: str + required: true + arg_type: path + description: The id of the report to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/reports/{report_id}/getDefinition + method: get + description: Get the definition of a report. + panel: reportDefinition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the report definition from. + - name: report-id + type: str + required: true + arg_type: path + description: The id of the report to get the definition from. + - name: format + type: str + required: false + arg_type: query + description: The format of the report definition. Supported format is \"ipynb\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/reports/{report_id}/updateDefinition + method: post + description: Update the definition of a report. + panel: reportDefinition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "definition.pbir", + "payload": definition, + "payloadType": "InlineBase64" + }, + { + "path": "report.json", + "payload": report, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: report-id + type: str + required: true + arg_type: path + description: The id of the report to update. + - name: report-path + type: str + required: true + arg_type: load_content + content_files: + - definition.pbir + - report.json + - .platform + description: The path to the report to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/semantic_model.yaml b/scripts/cli/commands/semantic_model.yaml new file mode 100644 index 0000000..a521a82 --- /dev/null +++ b/scripts/cli/commands/semantic_model.yaml @@ -0,0 +1,227 @@ +command: semantic-model +subcommands: + create: + endpoint: workspaces/{workspace_id}/semanticModels + method: post + description: Create a semantic model. + panel: Semantic model + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "definition.pbism", + "payload": definition, + "payloadType": "InlineBase64" + }, + { + "path": "model.bim", + "payload": model, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the semantic model in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the semantic model. + - name: semantic-model-path + type: str + required: true + arg_type: load_content + content_files: + - definition.pbism + - model.bim + - .platform + description: The path to the semantic model to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the semantic model. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/semanticModels/{semantic_model_id} + method: get + description: Get a semantic model. + panel: Semantic model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the semantic model from. + - name: semantic-model-id + type: str + required: true + arg_type: path + description: The id of the semantic model to get. + + list: + endpoint: workspaces/{workspace_id}/semanticModels + method: get + description: List semantic models for a workspace. + panel: Semantic model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list semantic models for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/semanticModels/{semantic_model_id} + method: patch + description: Update a semantic model. + panel: Semantic model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: semantic-model-id + type: str + required: true + arg_type: path + description: The id of the semantic model to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the semantic model. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the semantic model. + + delete: + endpoint: workspaces/{workspace_id}/semanticModels/{semantic_model_id} + method: delete + description: Delete a semantic model. + panel: Semantic model + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: semantic-model-id + type: str + required: true + arg_type: path + description: The id of the semantic model to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/semanticModels/{semantic_model_id}/getDefinition + method: get + description: Get the definition of a semantic model. + panel: Semantic model definition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the semantic model definition from. + - name: semantic-model-id + type: str + required: true + arg_type: path + description: The id of the semantic model to get the definition from. + - name: format + type: str + required: false + arg_type: query + description: The format of the semantic model definition. Supported format is \"ipynb\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/semanticModels/{semantic_model_id}/updateDefinition + method: post + description: Update the definition of a semantic model. + panel: Semantic model definition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "definition.pbism", + "payload": definition, + "payloadType": "InlineBase64" + }, + { + "path": "model.bim", + "payload": model, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: semantic-model-id + type: str + required: true + arg_type: path + description: The id of the semantic model to update. + - name: semantic-model-path + type: str + required: true + arg_type: load_content + content_files: + - definition.pbism + - model.bim + - .platform + description: The path to the semantic model to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + diff --git a/scripts/cli/commands/spark_job_definition.yaml b/scripts/cli/commands/spark_job_definition.yaml new file mode 100644 index 0000000..9a79f12 --- /dev/null +++ b/scripts/cli/commands/spark_job_definition.yaml @@ -0,0 +1,242 @@ +command: spark-job-definition +subcommands: + create: + endpoint: workspaces/{workspace_id}/sparkJobDefinitions + method: post + description: Create a spark job definition. + panel: Spark Job Definition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "SparkJobDefinitionV1.json", + "payload": SparkJobDefinitionV1, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the spark job definition in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the spark job definition. + - name: spark-job-definition-path + type: str + required: true + arg_type: load_content + content_files: + - SparkJobDefinitionV1.json + - .platform + description: The path to the spark job definition to load content from. + - name: description + type: str + required: false + arg_type: payload + description: The description of the spark job definition. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id} + method: get + description: Get a spark job definition. + panel: Spark Job Definition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the spark job definition from. + - name: spark-job-definition-id + type: str + required: true + arg_type: path + description: The id of the spark job definition to get. + + list: + endpoint: workspaces/{workspace_id}/sparkJobDefinitions + method: get + description: List spark job definitions for a workspace. + panel: Spark Job Definition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list spark job definitions for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id} + method: patch + description: Update a spark job definition. + panel: Spark Job Definition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: spark-job-definition-id + type: str + required: true + arg_type: path + description: The id of the spark job definition to update. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the spark job definition. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the spark job definition. + + delete: + endpoint: workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id} + method: delete + description: Delete a spark job definition. + panel: Spark Job Definition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + - name: spark-job-definition-id + type: str + required: true + arg_type: path + description: The id of the spark job definition to delete. + + get-definition: + endpoint: workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}/getDefinition + method: get + description: Get the definition of a spark job definition. + panel: Spark Job Definition + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the spark job definition definition from. + - name: spark-job-definition-id + type: str + required: true + arg_type: path + description: The id of the spark job definition to get the definition from. + # - name: format + # type: str + # required: false + # arg_type: query + # description: The format of the spark job definition definition. Supported format is \"SparkJobDefinitionV1\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + update-definition: + endpoint: workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}/updateDefinition + method: post + description: Update the definition of a spark job definition. + panel: Spark Job Definition + custom_payload: + value: | + "definition": { + "parts": [ + { + "path": "SparkJobDefinitionV1.json", + "payload": SparkJobDefinitionV1, + "payloadType": "InlineBase64" + }, + { + "path": ".platform", + "payload": platform, + "payloadType": "InlineBase64" + } + ] + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: spark-job-definition-id + type: str + required: true + arg_type: path + description: The id of the spark job definition to update. + - name: spark-job-definition-path + type: str + required: true + arg_type: load_content + content_files: + - SparkJobDefinitionV1.json + - .platform + description: The path to the spark job definition to load content from. + - name: update-metadata + type: bool + required: false + default: false + arg_type: query + description: When set to true, the item's metadata is updated using the metadata in the .platform file. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + run-background-job: + endpoint: workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}/jobs/instances + method: post + description: Run on-demand spark job instance. + panel: Job + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create a job for. + - name: spark-job-definition-id + type: str + required: true + arg_type: path + description: The id of the spark job definition to create a job for. + - name: job-type + type: str + required: true + arg_type: query + description: The type of the job to create. Must be \"sparkJob\". + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. \ No newline at end of file diff --git a/scripts/cli/commands/sql_endpoint.yaml b/scripts/cli/commands/sql_endpoint.yaml new file mode 100644 index 0000000..6fb0e91 --- /dev/null +++ b/scripts/cli/commands/sql_endpoint.yaml @@ -0,0 +1,18 @@ +command: sql-endpoint +subcommands: + list: + endpoint: workspaces/{workspace_id}/sqlEndpoints + method: get + description: List SQL endpoints for a workspace. + panel: SQL Endpoint + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list SQL endpoints for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. diff --git a/scripts/cli/commands/warehouse.yaml b/scripts/cli/commands/warehouse.yaml new file mode 100644 index 0000000..0a416a1 --- /dev/null +++ b/scripts/cli/commands/warehouse.yaml @@ -0,0 +1,104 @@ +command: warehouse +subcommands: + create: + endpoint: workspaces/{workspace_id}/warehouses + method: post + description: Create a warehouse. + panel: Warehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to create the warehouse in. + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the warehouse. + - name: description + type: str + required: false + arg_type: payload + description: The description of the warehouse. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. + + get: + endpoint: workspaces/{workspace_id}/warehouses/{warehouse_id} + method: get + description: Get a warehouse. + panel: Warehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get the warehouse from. + - name: warehouse-id + type: str + required: true + arg_type: path + description: The id of the warehouse to get. + + list: + endpoint: workspaces/{workspace_id}/warehouses + method: get + description: List warehouses for a workspace. + panel: Warehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list warehouses for. + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + + update: + endpoint: workspaces/{workspace_id}/warehouses/{warehouse_id} + method: patch + description: Update a warehouse. + panel: Warehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update the warehouse in. + - name: display-name + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The display name of the warehouse. + - name: description + type: str + required: false + arg_type: payload + arg_group_type_id: at-least-1 + description: The description of the warehouse. + + delete: + endpoint: workspaces/{workspace_id}/warehouses/{warehouse_id} + method: delete + description: Delete a warehouse. + panel: Warehouse + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete the warehouse from. + - name: warehouse-id + type: str + required: true + arg_type: path + description: The id of the warehouse to delete. diff --git a/scripts/cli/commands/workspace.yaml b/scripts/cli/commands/workspace.yaml new file mode 100644 index 0000000..7c714bc --- /dev/null +++ b/scripts/cli/commands/workspace.yaml @@ -0,0 +1,239 @@ +command: workspace +subcommands: + create: + endpoint: workspaces + method: post + description: Create a workspace. + panel: Workspace + args: + - name: display-name + type: str + required: true + arg_type: payload + description: The display name of the workspace. + - name: description + type: str + required: false + arg_type: payload + description: The description of the workspace. + - name: capacity-id + type: str + required: false + arg_type: payload + description: The capacity id to assign the workspace to. + + get: + endpoint: workspaces/{workspace_id} + method: get + description: Get a workspace. + panel: Workspace + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get. + + list: + endpoint: workspaces + method: get + description: List workspaces. + panel: Workspace + args: + - name: continuation-token + type: str + required: false + arg_type: query + description: A token for retrieving the next page of results. + - name: roles + type: str + required: false + arg_type: query + description: A list of roles. Separate values using a comma. If not provided, all workspaces are returned. + + update: + endpoint: "workspaces/{workspace_id}" + method: patch + description: Update a workspace. + panel: Workspace + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to update. + - name: display-name + type: str + required: false + arg_type: payload + description: The display name of the workspace. + - name: description + type: str + required: false + arg_type: payload + description: The description of the workspace. + + delete: + endpoint: workspaces/{workspace_id} + method: delete + description: Delete a workspace. + panel: Workspace + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to delete. + + assign-to-capacity: + endpoint: workspaces/{workspace_id}/assignToCapacity + method: post + description: Assign a workspace to a capacity. + panel: Capacity + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to assign to a capacity. + - name: capacity-id + type: str + required: true + arg_type: payload + description: The id of the capacity to assign the workspace to. + + unassign-from-capacity: + endpoint: workspaces/{workspace_id}/unassignFromCapacity + method: post + description: Unassign a workspace from a capacity. + panel: Capacity + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to unassign from a capacity. + + add-role-assignment: + endpoint: workspaces/{workspace_id}/roleAssignments + method: post + description: Add a role assignment to a workspace. + panel: Role Assignment + custom_payload: + value: | + "principal": { + "id": principal_id, + "type": principal_type, + } + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to add a role assignment to. + - name: role + type: str + required: true + arg_type: payload + description: The role to add to the workspace. + - name: principal-id + type: str + required: true + description: The id of the principal. + - name: principal-type + type: str + required: true + description: The type of the principal. + + get-role-assignment: + endpoint: workspaces/{workspace_id}/roleAssignments/{role_assignment_id} + method: get + description: Get a role assignment for a workspace. + panel: Role Assignment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to get a role assignment for. + - name: role-assignment-id + type: str + required: true + arg_type: path + description: The id of the role assignment to get. + + list-role-assignments: + endpoint: workspaces/{workspace_id}/roleAssignments + method: get + description: List role assignments for a workspace. + panel: Role Assignment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to list role assignments for. + + update-role-assignment: + endpoint: workspaces/{workspace_id}/roleAssignments/{role_assignment_id} + method: patch + description: Update a role assignment for a workspace. + panel: Role Assignment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The workspace ID. + - name: role-assignment-id + type: str + required: true + arg_type: path + description: The workspace role assignment ID. + + delete-role-assignment: + endpoint: workspaces/{workspace_id}/roleAssignments/{role_assignment_id} + method: delete + description: Delete a role assignment from a workspace. + panel: Role Assignment + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to add a role assignment to. + - name: role-assignment-id + type: str + required: true + arg_type: path + description: The id of the role assignment to delete. + + deprovision-identity: + endpoint: workspaces/{workspace_id}/deprovisionIdentity + method: post + description: Deprovision an identity from a workspace. + panel: Identity + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The id of the workspace to deprovision an identity from. + + provision-identity: + endpoint: workspaces/{workspace_id}/provisionIdentity + method: post + description: Provision an identity to a workspace. + panel: Identity + args: + - name: workspace-id + type: str + required: true + arg_type: path + description: The ID of the workspace. + - name: await-lro + type: bool + required: false + default: false + arg_type: lro + description: Whether to await the long running operation. diff --git a/scripts/cli/generate_commands.py b/scripts/cli/generate_commands.py new file mode 100644 index 0000000..9563456 --- /dev/null +++ b/scripts/cli/generate_commands.py @@ -0,0 +1,413 @@ +import os +import re +from enum import Enum +from typing import Any, Dict, List, Optional + +from jinja2 import Template +from pydantic import BaseModel, field_validator +from yaml import safe_load + +BASE_URL = "https://api.fabric.microsoft.com/v1" + + +class ArgType(str, Enum): + PAYLOAD = "payload" + PATH = "path" + QUERY = "query" + LRO = "lro" + LOAD_CONTENT = "load_content" + SAVE_CONTENT = "save_content" + + +class Type(str, Enum): + STRING = "str" + BOOLEAN = "bool" + INTEGER = "int" + + +class Method(str, Enum): + GET = "get" + POST = "post" + PATCH = "patch" + DELETE = "delete" + + +class SubcommandArg(BaseModel): + name: str + type: str + description: str + required: Optional[bool] = None + arg_type: Optional[ArgType] = None + arg_group_type_id: Optional[str] = None + default: Optional[Any] = None + + @field_validator("name") + def validate_name(cls, v): + if not re.match(r"^[a-z-]+$", v): + raise ValueError("name must contain only lowercase letters and hyphens") + return v + + @field_validator("description") + def validate_description(cls, v): + if not v[-1] == ".": + raise ValueError("description must end with a period") + return v + + +class CustomPayload(BaseModel): + condition: Optional[str] = None + value: str + + +class Subcommand(BaseModel): + endpoint: str + method: str + description: str + panel: str + args: List[SubcommandArg] + custom_payload: Optional[CustomPayload] = None + + @field_validator("description") + def validate_description(cls, v): + if not v[-1] == ".": + raise ValueError("description must end with a period") + return v + + +class Command(BaseModel): + command: str + subcommands: Dict[str, Subcommand] + + +api_template = """ +import json +import logging +from uuid import UUID +from typing import List +from msfabricutils.enums import PrincipalType, WorkspaceRole + +import logging +import base64 +import requests +import typer +from typing_extensions import Annotated + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.core.operations import wait_for_long_running_operation +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +{%- for name, subcommand in subcommands.items() %} + +def {{ module_name | replace('-', '_') }}_{{ name | replace('-', '_') }}( + {%- for arg in subcommand.args %} + {{ arg.snake_case }}: {{ arg.type.split('|')[1] if '|' in arg.type else arg.type }}{{ ' = None' if not arg.required else '' }}, + {%- endfor %} + {%- for arg in subcommand.args if arg.arg_type == 'lro' %} + timeout: int = 60 * 5, + {%- endfor %} + preview: bool = True, +) -> requests.Response: + \"\"\" + {{ subcommand.description }} + + Args: + {%- for arg in subcommand.args %} + {{ arg.snake_case }} ({{ (arg.type.split('|')[1] if '|' in arg.type else arg.type).strip() }}{{ ' | None' if not arg.required else '' }}): {{ arg.description }} + {%- endfor %} + {%- for arg in subcommand.args if arg.arg_type == 'lro' %} + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + {%- endfor %} + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + \"\"\" + + url = f"{{ base_url.strip('/') }}/{{ subcommand.endpoint.strip('/') }}" # noqa + url = f"{url}?" + {%- for arg in subcommand.args if arg.arg_type == 'query' %} + if {{ arg.snake_case }} is not None: + url = f"{url}{{arg.camel_case}}={% raw %}{{% endraw %}{{arg.snake_case}}{% raw %}}{% endraw %}&" + {%- endfor %} + url = url.rstrip('&?') + + method = "{{ subcommand.method }}" + token = get_fabric_bearer_token() + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {token}" + } + + {%- for arg in subcommand.args if arg.arg_type == 'load_content' %} + {%- for content_file in arg.content_files %} + + with open({{ arg.snake_case }}.rstrip('/') + "/{{ content_file }}", "r") as f: + {{ content_file.strip('.').split('.')[0].replace('-', '_') }} = base64.b64encode(f.read().encode()).decode() + {%- endfor %} + + {%- endfor %} + + data = {} + {%- for arg in subcommand.args if arg.arg_type == 'payload' %} + data["{{ arg.camel_case }}"] = {{ arg.snake_case }} + {%- endfor %} + {%- if subcommand.custom_payload %} + {%- if subcommand.custom_payload.condition %} + if {{ subcommand.custom_payload.condition }}: + custom_payload = { + {{subcommand.custom_payload.value | trim}} + } + data = { + **data, + **custom_payload + } + {%- else %} + custom_payload = { + {{subcommand.custom_payload.value | trim}}} + data = { + **data, + **custom_payload + } + {%- endif %} + {%- endif %} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\\n{method.upper()}\\n") + typer.echo(f"URL:\\n{url}\\n") + typer.echo(f"Data:\\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\\n") + typer.echo(f"Headers:\\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + {%- for arg in subcommand.args if arg.arg_type == 'lro' %} + case 202: + if {{ arg.snake_case }} is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, + retry_after=retry_after, + timeout=timeout + ) + return response + {%- endfor %} + case _: + return response + +{%- endfor %} +""".strip() + +command_template = """ +import json +import logging +from uuid import UUID +from typing import List +from msfabricutils.enums import PrincipalType, WorkspaceRole + +import requests +import typer +from typing_extensions import Annotated + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.core.operations import wait_for_long_running_operation +from msfabricutils.common.remove_none import remove_none +{%- for name, subcommand in subcommands.items() %} +from msfabricutils.rest_api import {{ module_name | replace('-', '_') }}_{{ name | replace('-', '_') }} +{%- endfor %} + +app = typer.Typer( + help="[bold]{{ (subcommands.keys() | list)[:5] | join(', ') }}[/bold]", + rich_markup_mode="rich", +) + + +{%- for name, subcommand in subcommands.items() %} + +@app.command(help="{{ subcommand.description }}", rich_help_panel="{{ subcommand.panel }}") +def {{ name | replace('-', '_') }}( + {%- for arg in subcommand.args %} + {{ arg.snake_case }}: Annotated[{{ arg.type.split('|')[0] }}, typer.Option("--{{ arg.name }}", rich_help_panel="Arguments", show_default={{ 'default' in arg }}, help="{{ arg.description }}")]{{ ' = ' + arg.default | string if 'default' in arg else ' = None' if not arg.required else '' }}, + {%- endfor %} + {%- for arg in subcommand.args if arg.arg_type == 'lro' %} + timeout: Annotated[int, typer.Option("--timeout", show_default=True, help="Timeout for the long running operation (seconds)")] = 60 * 5, + {%- endfor %} + no_preview: Annotated[bool, typer.Option("--no-preview", "--yes", "-y", rich_help_panel="Arguments", show_default=True, help="Preview the command before executing it. You will be asked to confirm the request before it is executed.")] = False, + quiet: Annotated[bool, typer.Option("--quiet", show_default=True, help="Whether to run in quiet mode. Sets the logging level to WARNING.")] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + + {%- for group in subcommand.at_least_groups.values() %} + {%- set args_snake_case = [] %} + {%- set args_kebab_case = [] %} + {%- for arg in group %} + {%- set args_snake_case = args_snake_case.append(arg.snake_case) %} + {%- set args_kebab_case = args_kebab_case.append("--" + arg.name) %} + {%- endfor %} + + if not any([{{args_snake_case|join(', ')}}]): + raise typer.BadParameter("At least one of the following arguments is required: {{args_kebab_case|join(', ')}}") + {%- endfor %} + + {%- for group in subcommand.mut_groups.values() %} + {%- set args_snake_case = [] %} + {%- set args_kebab_case = [] %} + {%- for arg in group %} + {%- set args_snake_case = args_snake_case.append(arg.snake_case) %} + {%- set args_kebab_case = args_kebab_case.append("--" + arg.name) %} + {%- endfor %} + + if all({{args_snake_case|join(', ')}}): + raise typer.BadParameter("At most one of the following arguments is allowed: {{args_kebab_case|join(', ')}}") + {%- endfor %} + + + + response = {{ module_name | replace('-', '_') }}_{{ name | replace('-', '_') }}( + {%- for arg in subcommand.args %} + {{ arg.snake_case }}={{ arg.snake_case }}, + {%- endfor %} + {%- for arg in subcommand.args if arg.arg_type == 'lro' %} + timeout=timeout, + {%- endfor %} + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output +{%- endfor %} +""".strip() + + +def generate_module(): + subdir = "commands" + commands_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), subdir) + commands = [f for f in os.listdir(commands_dir) if f.endswith(".yaml") or f.endswith(".yml")] + + modules = [] + for command in commands: + with open(os.path.join(commands_dir, command), "r") as f: + yaml_data = safe_load(f) + + try: + Command.model_validate(yaml_data) + except Exception as e: + print(f"Validation error in {command}: {e}") + raise e + # print("YAML Data:", yaml_data) + # print("Subcommands:", yaml_data.get("subcommands", {})) + command_name = yaml_data.get("command") + if command_name is None: + raise ValueError(f"Command not found in {command}!") + if "subcommands" in yaml_data: + subcommands = [] + for name, details in yaml_data["subcommands"].items(): + mut_groups = {} + + at_least_groups = {} + + for arg in details.get("args", []): + arg["snake_case"] = arg["name"].replace("-", "_") + arg["camel_case"] = arg["name"].split("-")[0] + "".join( + word.capitalize() for word in arg["name"].split("-")[1:] + ) + + for arg in details.get("args", []): + if arg.get("arg_group_type_id"): + if arg.get("arg_group_type_id").startswith("mut"): + if arg.get("arg_group_type_id") not in mut_groups: + mut_groups[arg.get("arg_group_type_id")] = [] + mut_groups[arg.get("arg_group_type_id")].append(arg) + elif arg.get("arg_group_type_id").startswith("at-least"): + if arg.get("arg_group_type_id") not in at_least_groups: + at_least_groups[arg.get("arg_group_type_id")] = [] + at_least_groups[arg.get("arg_group_type_id")].append(arg) + + # print(arg.get("custom_payload")) + # print("Args:", details.get("args", [])) + details["mut_groups"] = mut_groups + details["at_least_groups"] = at_least_groups + # print(f"Mut groups: {mut_groups}") + # print(f"At least groups: {at_least_groups}") + # print(f"\nSubcommand {name}:") + + subcommands.append(name) + template = Template(command_template) + command_module = template.render( + subcommands=yaml_data["subcommands"], module_name=command_name, base_url=BASE_URL + ) + with open(f"src/msfabricutils/cli/commands/{command_name.replace('-', '_')}.py", "w") as f: + f.write(command_module) + modules.append( + (command_name, [f"{command_name}_{subcommand}" for subcommand in subcommands]) + ) + + template = Template(api_template) + api_module = template.render( + subcommands=yaml_data["subcommands"], module_name=command_name, base_url=BASE_URL + ) + with open(f"src/msfabricutils/rest_api/{command_name.replace('-', '_')}.py", "w") as f: + f.write(api_module) + + with open("src/msfabricutils/rest_api/__init__.py", "w") as f: + for command, subcommands in modules: + for subcommand in subcommands: + f.write( + f"from .{command.replace('-', '_')} import {subcommand.replace('-', '_')}\n" + ) + f.write("\n") + f.write("__all__ = (\n") + for command, subcommands in modules: + for subcommand in subcommands: + f.write(f' "{subcommand.replace("-", "_")}",\n') + f.write(")\n") + + with open("src/msfabricutils/cli/commands/__init__.py", "w") as f: + for module, subcommands in sorted(modules): + f.write( + f"from .{module.replace('-', '_')} import app as {module.replace('-', '_')}_app\n" + ) + + f.write("\n") + f.write("COMMANDS = {\n") + for module, subcommands in sorted(modules): + f.write(f" '{module}': {module.replace('-', '_')}_app,\n") + f.write("}") + + for module, subcommands in sorted(modules): + with open(f"docs/core/fabric-api/{module.replace('-', '_')}.md", "w") as f: + f.write(f"# {module.replace('-', ' ').title()}\n\n") + f.write("!!! warning\n") + f.write(" The functions are not fully tested yet.\n") + f.write(" Use with caution.\n") + f.write( + " Please report any issues to the [GitHub repository](https://github.com/mrjsj/msfabricutils/issues).\n\n" + ) + f.write(f"::: msfabricutils.rest_api.{module.replace('-', '_')}\n\n") + + +if __name__ == "__main__": + generate_module() diff --git a/src/msfabricutils/cli/cli.py b/src/msfabricutils/cli/cli.py index fb237f0..c1e4823 100644 --- a/src/msfabricutils/cli/cli.py +++ b/src/msfabricutils/cli/cli.py @@ -1,255 +1,51 @@ -import argparse import logging -import sys -from typing import Callable - -from msfabricutils import __version__ -from msfabricutils.cli.lakehouse import create_lakehouse_command, delete_lakehouse_command -from msfabricutils.cli.notebook import ( - bulk_create_notebook_command, - create_notebook_command, - delete_notebook_command, +from importlib.metadata import PackageNotFoundError, version + +import typer +from typing_extensions import Annotated + +from msfabricutils.cli.commands import COMMANDS + +app = typer.Typer( + name="msfabricutils", + help="""[bold red]Authentication:[/bold red] +This tool uses automatic authentication. +You must be logged in to Azure (e.g., via 'az login') before using this tool. +[bold yellow]Important:[/bold yellow] +This tool is in active development. The commands and subcommands are subject to change. +""", + no_args_is_help=True, + rich_markup_mode="rich", ) -from msfabricutils.cli.workspace import create_workspace_command, delete_workspace_command - - -def create_parser(): - """Creates the main parser and subparsers.""" - examples = """ -Examples: - Create a workspace: - msfu workspace create --name "My Workspace" --description "My Workspace Description" --capacity-id "beefbeef-beef-beef-beef-beefbeefbeef" --on-conflict "update" - - Create a lakehouse: - msfu lakehouse create --name "My Lakehouse" --description "My Lakehouse Description" --workspace-id "beefbeef-beef-beef-beef-beefbeefbeef" --on-conflict "update" - - Create a single notebook: - msfu notebook create --path "path/to/notebook.Notebook" --workspace-id "beefbeef-beef-beef-beef-beefbeefbeef" - - Create multiple notebooks: - msfu notebook create --path "directory/of/notebooks" "path/to/notebook.Notebook" --workspace-id "beefbeef-beef-beef-beef-beefbeefbeef" - """ - - parser = argparse.ArgumentParser( - prog="msfabricutils", - description="Utility CLI for Microsoft Fabric REST API operations", - epilog=examples, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - parser.add_argument("--version", "-v", action="version", version=__version__) - parser.add_argument( - "--log-level", - "-l", - type=str, - default="INFO", - choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], - help="The log level to use. Defaults to INFO.", - ) - parser.add_argument( - "--show-azure-identity-logs", - action="store_true", - default=False, - help="Show Azure Identity logs. Defaults to False.", - ) - - subparsers = parser.add_subparsers(dest="command", help="Subcommands") - - register_workspace_commands(subparsers) - register_lakehouse_commands(subparsers) - register_notebook_commands(subparsers) - - return parser -def register_workspace_commands(subparsers: argparse._SubParsersAction): - """Registers the workspace commands.""" - workspace_parser = subparsers.add_parser("workspace", help="Workspace commands") - workspace_subparsers = workspace_parser.add_subparsers( - dest="workspace", help="Workspace commands" - ) - add_subcommand( - subparsers=workspace_subparsers, - name="create", - handler=create_workspace_command, - required_args=["--name"], - choices_args={"--on-conflict": ["error", "ignore", "update"]}, - optional_args=["--description", "--capacity-id"], - ) - add_subcommand( - subparsers=workspace_subparsers, - name="delete", - handler=delete_workspace_command, - mutually_exclusive_args=["--id", "--name"], - choices_args={"--on-conflict": ["error", "ignore"]}, - ) +def version_callback(value: bool): + if value: + try: + ver = version("msfabricutils") + except PackageNotFoundError: + ver = "0.0.0" + typer.echo(f"msfabricutils: v{ver}") + raise typer.Exit() -def register_lakehouse_commands(subparsers: argparse._SubParsersAction): - """Registers the lakehouse commands.""" - lakehouse_parser = subparsers.add_parser("lakehouse", help="Lakehouse commands") - lakehouse_subparsers = lakehouse_parser.add_subparsers( - dest="lakehouse", help="Lakehouse commands" - ) - - add_subcommand( - subparsers=lakehouse_subparsers, - name="create", - handler=create_lakehouse_command, - required_args=["--name", "--workspace-id"], - has_long_running_operation=True, - choices_args={ - "--on-conflict": ["error", "ignore", "update"], - }, - optional_args=["--description"], - flags=["--enable-schemas"], - ) - add_subcommand( - subparsers=lakehouse_subparsers, - name="delete", - handler=delete_lakehouse_command, - required_args=["--workspace-id"], - mutually_exclusive_args=["--id", "--name"], - choices_args={"--on-conflict": ["error", "ignore"]}, - ) - - -def register_notebook_commands(subparsers: argparse._SubParsersAction): - """Registers the notebook commands.""" - notebook_parser = subparsers.add_parser("notebook", help="Notebook commands") - notebook_subparsers = notebook_parser.add_subparsers(dest="notebook", help="Notebook commands") - - add_subcommand( - subparsers=notebook_subparsers, - name="create", - handler=create_notebook_command, - required_args=["--workspace-id", "--path"], - optional_args=["--name", "--description"], - has_long_running_operation=True, - choices_args={"--on-conflict": ["error", "ignore", "update"]}, - ) - add_subcommand( - subparsers=notebook_subparsers, - name="bulk-create", - handler=bulk_create_notebook_command, - required_args=["--workspace-id"], - nargs=["--path"], - has_long_running_operation=True, - choices_args={"--on-conflict": ["error", "ignore", "update"]}, - ) - add_subcommand( - subparsers=notebook_subparsers, - name="delete", - handler=delete_notebook_command, - required_args=["--workspace-id"], - mutually_exclusive_args=["--id", "--name"], - choices_args={"--on-conflict": ["error", "ignore"]}, - ) - - -def add_subcommand( - subparsers: argparse._SubParsersAction, - name: str, - handler: Callable, - required_args: list[str] | None = None, - nargs: list[str] | None = None, - choices_args: dict[str, list[str]] | None = None, - mutually_exclusive_args: list[str] | None = None, - optional_args: list[str] | None = None, - has_long_running_operation: bool = False, - flags: list[str] | None = None, +@app.callback() +def common_options( + version: Annotated[ + bool, + typer.Option("--version", "-v", help="Show version and exit", callback=version_callback), + ] = False, ): - """Adds a subcommand to the parser. - - Args: - subparsers (argparse._SubParsersAction): The subparsers to add the subcommand to. - name (str): The name of the subcommand. - handler (Callable): The handler function to call when the subcommand is invoked. - required_args (list[str] | None): The required arguments for the subcommand. - nargs (list[str] | None): The nargs arguments for the subcommand. - choices_args (dict[str, list[str]] | None): The choices arguments for the subcommand. The default choice is the first in the list. - optional_args (list[str] | None): The optional arguments for the subcommand. - """ - - if not required_args: - required_args = [] - - if not choices_args: - choices_args = {} - - if not optional_args: - optional_args = [] - - if not nargs: - nargs = [] - - if not flags: - flags = [] - - create_parser = subparsers.add_parser(name, help=f"{name.capitalize()} commands") - - for arg in required_args: - create_parser.add_argument( - arg, required=True, help=f"The {arg.lstrip('-')} of the {subparsers.dest} to {name}." - ) - - for arg in nargs: - create_parser.add_argument( - arg, nargs="+", help=f"The {arg.lstrip('-')} of the {subparsers.dest}s to {name}." - ) - - for arg in optional_args: - create_parser.add_argument( - arg, required=False, help=f"The {arg.lstrip('-')} of the {subparsers.dest} to {name}." - ) - - for flag in flags: - create_parser.add_argument( - flag, action="store_true", default=False, help=f"{flag.lstrip('-')} flag for the {subparsers.dest} to {name}." - ) - - if has_long_running_operation: - create_parser.add_argument( - "--no-wait", action="store_true", default=False, help="Do not wait for the long running operation to complete." - ) - - if mutually_exclusive_args: - argument_group = create_parser.add_mutually_exclusive_group(required=True) - for arg in mutually_exclusive_args: - argument_group.add_argument( - arg, help=f"The {arg.lstrip('-')} of the {subparsers.dest} to {name}." - ) - - for arg, choices in choices_args.items(): - create_parser.add_argument( - arg, - type=str, - choices=choices, - default=choices[0], - help=f"The {arg.lstrip('-')} of the {subparsers.dest} to {name}. Defaults to `{choices[0]}`.", - ) - - create_parser.set_defaults(func=handler) + pass def main(): - parser = create_parser() - args = parser.parse_args() - - logging.basicConfig( - level=args.log_level, - format='{"timestamp": "%(asctime)s", "level": "%(levelname)s", "message": "%(message)s"}', - ) - - try: - azure_log_level = args.log_level if args.show_azure_identity_logs else logging.CRITICAL - logging.getLogger("azure").setLevel(azure_log_level) - args.func(args) - except Exception as e: - logging.error(e) - sys.stderr.write(str(e)) - sys.exit(1) + logging.basicConfig(level=logging.INFO) + logging.getLogger("azure").setLevel(logging.CRITICAL) - sys.exit(0) + for command, sub_app in COMMANDS.items(): + app.add_typer(sub_app, name=command) + app() if __name__ == "__main__": diff --git a/src/msfabricutils/cli/commands/__init__.py b/src/msfabricutils/cli/commands/__init__.py new file mode 100644 index 0000000..d5d2414 --- /dev/null +++ b/src/msfabricutils/cli/commands/__init__.py @@ -0,0 +1,53 @@ +from .capacity import app as capacity_app +from .dashboard import app as dashboard_app +from .data_pipeline import app as data_pipeline_app +from .datamart import app as datamart_app +from .environment import app as environment_app +from .eventhouse import app as eventhouse_app +from .eventstream import app as eventstream_app +from .kql_dashboard import app as kql_dashboard_app +from .kql_database import app as kql_database_app +from .kql_queryset import app as kql_queryset_app +from .lakehouse import app as lakehouse_app +from .long_running_operation import app as long_running_operation_app +from .mirrored_database import app as mirrored_database_app +from .mirrored_warehouse import app as mirrored_warehouse_app +from .ml_experiment import app as ml_experiment_app +from .ml_model import app as ml_model_app +from .notebook import app as notebook_app +from .paginated_report import app as paginated_report_app +from .reflex import app as reflex_app +from .report import app as report_app +from .semantic_model import app as semantic_model_app +from .spark_job_definition import app as spark_job_definition_app +from .sql_endpoint import app as sql_endpoint_app +from .warehouse import app as warehouse_app +from .workspace import app as workspace_app + +COMMANDS = { + "capacity": capacity_app, + "dashboard": dashboard_app, + "data-pipeline": data_pipeline_app, + "datamart": datamart_app, + "environment": environment_app, + "eventhouse": eventhouse_app, + "eventstream": eventstream_app, + "kql-dashboard": kql_dashboard_app, + "kql-database": kql_database_app, + "kql-queryset": kql_queryset_app, + "lakehouse": lakehouse_app, + "long-running-operation": long_running_operation_app, + "mirrored-database": mirrored_database_app, + "mirrored-warehouse": mirrored_warehouse_app, + "ml-experiment": ml_experiment_app, + "ml-model": ml_model_app, + "notebook": notebook_app, + "paginated-report": paginated_report_app, + "reflex": reflex_app, + "report": report_app, + "semantic-model": semantic_model_app, + "spark-job-definition": spark_job_definition_app, + "sql-endpoint": sql_endpoint_app, + "warehouse": warehouse_app, + "workspace": workspace_app, +} diff --git a/src/msfabricutils/cli/commands/capacity.py b/src/msfabricutils/cli/commands/capacity.py new file mode 100644 index 0000000..391a660 --- /dev/null +++ b/src/msfabricutils/cli/commands/capacity.py @@ -0,0 +1,70 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import capacity_list + +app = typer.Typer( + help="[bold]list[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="List capacities for a workspace.", rich_help_panel="Capacity") +def list( + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = capacity_list( + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/dashboard.py b/src/msfabricutils/cli/commands/dashboard.py new file mode 100644 index 0000000..b07a04c --- /dev/null +++ b/src/msfabricutils/cli/commands/dashboard.py @@ -0,0 +1,80 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import dashboard_list + +app = typer.Typer( + help="[bold]list[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="List dashboards for a workspace.", rich_help_panel="Dashboard") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list dashboards for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = dashboard_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/data_pipeline.py b/src/msfabricutils/cli/commands/data_pipeline.py new file mode 100644 index 0000000..4c4becf --- /dev/null +++ b/src/msfabricutils/cli/commands/data_pipeline.py @@ -0,0 +1,410 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + data_pipeline_create, + data_pipeline_delete, + data_pipeline_get, + data_pipeline_list, + data_pipeline_update, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a data pipeline.", rich_help_panel="Data Pipeline") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the data pipeline in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the data pipeline.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the data pipeline.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = data_pipeline_create( + workspace_id=workspace_id, + display_name=display_name, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a data pipeline.", rich_help_panel="Data Pipeline") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the data pipeline from.", + ), + ], + data_pipeline_id: Annotated[ + str, + typer.Option( + "--data-pipeline-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the data pipeline to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = data_pipeline_get( + workspace_id=workspace_id, + data_pipeline_id=data_pipeline_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List data pipelines for a workspace.", rich_help_panel="Data Pipeline") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list data pipelines for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = data_pipeline_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a data pipeline.", rich_help_panel="Data Pipeline") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update the data pipeline in.", + ), + ], + data_pipeline_id: Annotated[ + str, + typer.Option( + "--data-pipeline-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the data pipeline to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the data pipeline.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the data pipeline.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = data_pipeline_update( + workspace_id=workspace_id, + data_pipeline_id=data_pipeline_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a data pipeline.", rich_help_panel="Data Pipeline") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete the data pipeline from.", + ), + ], + data_pipeline_id: Annotated[ + str, + typer.Option( + "--data-pipeline-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the data pipeline to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = data_pipeline_delete( + workspace_id=workspace_id, + data_pipeline_id=data_pipeline_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/datamart.py b/src/msfabricutils/cli/commands/datamart.py new file mode 100644 index 0000000..c217190 --- /dev/null +++ b/src/msfabricutils/cli/commands/datamart.py @@ -0,0 +1,80 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import datamart_list + +app = typer.Typer( + help="[bold]list[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="List datamarts for a workspace.", rich_help_panel="Datamart") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list datamarts for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = datamart_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/environment.py b/src/msfabricutils/cli/commands/environment.py new file mode 100644 index 0000000..de75372 --- /dev/null +++ b/src/msfabricutils/cli/commands/environment.py @@ -0,0 +1,1168 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + environment_cancel_publish, + environment_create, + environment_delete, + environment_delete_staging_library, + environment_get, + environment_get_published_libraries, + environment_get_spark_compute_published_settings, + environment_get_spark_compute_staging_settings, + environment_get_staging_libraries, + environment_list, + environment_publish, + environment_update, + environment_update_spark_compute_staging_settings, + environment_upload_staging_library, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create an environment.", rich_help_panel="Environment") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the environment in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the environment.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the environment.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_create( + workspace_id=workspace_id, + display_name=display_name, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get an environment.", rich_help_panel="Environment") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the environment from.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_get( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List environments for a workspace.", rich_help_panel="Environment") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list environments for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update an environment.", rich_help_panel="Environment") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update the environment in.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the environment.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the environment.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = environment_update( + workspace_id=workspace_id, + environment_id=environment_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete an environment.", rich_help_panel="Environment") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete the environment from.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_delete( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Get spark compute published settings for an environment.", rich_help_panel="Spark Compute" +) +def get_spark_compute_published_settings( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the spark compute published settings for.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to get the spark compute published settings for.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_get_spark_compute_published_settings( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Get spark compute staging settings for an environment.", rich_help_panel="Spark Compute" +) +def get_spark_compute_staging_settings( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the spark compute staging settings for.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to get the spark compute staging settings for.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_get_spark_compute_staging_settings( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Update spark compute staging settings for an environment.", + rich_help_panel="Spark Compute", +) +def update_spark_compute_staging_settings( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update the spark compute staging settings for.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to update the spark compute staging settings for.", + ), + ], + instance_pool_name: Annotated[ + str, + typer.Option( + "--instance-pool-name", + rich_help_panel="Arguments", + show_default=False, + help="The name of the instance pool to use for Spark Compute settings.", + ), + ] = None, + instance_pool_type: Annotated[ + str, + typer.Option( + "--instance-pool-type", + rich_help_panel="Arguments", + show_default=False, + help="The type of the instance pool to use for Spark Compute settings.", + ), + ] = None, + driver_cores: Annotated[ + int, + typer.Option( + "--driver-cores", + rich_help_panel="Arguments", + show_default=False, + help="The number of cores to use for the driver.", + ), + ] = None, + driver_memory: Annotated[ + str, + typer.Option( + "--driver-memory", + rich_help_panel="Arguments", + show_default=False, + help="The memory to use for the driver.", + ), + ] = None, + executor_cores: Annotated[ + int, + typer.Option( + "--executor-cores", + rich_help_panel="Arguments", + show_default=False, + help="The number of cores to use for the executors.", + ), + ] = None, + executor_memory: Annotated[ + str, + typer.Option( + "--executor-memory", + rich_help_panel="Arguments", + show_default=False, + help="The memory to use for the executors.", + ), + ] = None, + dynamic_executor_allocation_enabled: Annotated[ + bool, + typer.Option( + "--dynamic-executor-allocation-enabled", + rich_help_panel="Arguments", + show_default=False, + help="Whether to enable dynamic executor allocation.", + ), + ] = None, + min_executors: Annotated[ + int, + typer.Option( + "--min-executors", + rich_help_panel="Arguments", + show_default=False, + help="The minimum number of executors to use for dynamic executor allocation.", + ), + ] = None, + max_executors: Annotated[ + int, + typer.Option( + "--max-executors", + rich_help_panel="Arguments", + show_default=False, + help="The maximum number of executors to use for dynamic executor allocation.", + ), + ] = None, + spark_acls_enable: Annotated[ + str, + typer.Option( + "--spark-acls-enable", + rich_help_panel="Arguments", + show_default=False, + help="Whether to enable Spark ACLs.", + ), + ] = None, + spark_admin_acls: Annotated[ + str, + typer.Option( + "--spark-admin-acls", + rich_help_panel="Arguments", + show_default=False, + help="The admin ACLs to use for Spark.", + ), + ] = None, + runtime_version: Annotated[ + str, + typer.Option( + "--runtime-version", + rich_help_panel="Arguments", + show_default=False, + help="The runtime version to use for Spark Compute settings.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_update_spark_compute_staging_settings( + workspace_id=workspace_id, + environment_id=environment_id, + instance_pool_name=instance_pool_name, + instance_pool_type=instance_pool_type, + driver_cores=driver_cores, + driver_memory=driver_memory, + executor_cores=executor_cores, + executor_memory=executor_memory, + dynamic_executor_allocation_enabled=dynamic_executor_allocation_enabled, + min_executors=min_executors, + max_executors=max_executors, + spark_acls_enable=spark_acls_enable, + spark_admin_acls=spark_admin_acls, + runtime_version=runtime_version, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get published libraries for an environment.", rich_help_panel="Libraries") +def get_published_libraries( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the published libraries for.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to get the published libraries for.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_get_published_libraries( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get staging libraries for an environment.", rich_help_panel="Libraries") +def get_staging_libraries( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the staging libraries for.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to get the staging libraries for.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_get_staging_libraries( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a staging library for an environment.", rich_help_panel="Libraries") +def delete_staging_library( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete the staging library from.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to delete the staging library from.", + ), + ], + library_to_delete: Annotated[ + str, + typer.Option( + "--library-to-delete", + rich_help_panel="Arguments", + show_default=False, + help="The library file to delete. Must include the file extension.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_delete_staging_library( + workspace_id=workspace_id, + environment_id=environment_id, + library_to_delete=library_to_delete, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Upload a staging library for an environment.", rich_help_panel="Libraries") +def upload_staging_library( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to upload the staged library to.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to upload the staging library to.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_upload_staging_library( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Publish an environment.", rich_help_panel="Environment") +def publish( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to publish the environment for.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to publish.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_publish( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Cancel a publish operation for an environment.", rich_help_panel="Environment") +def cancel_publish( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to cancel the publish operation for.", + ), + ], + environment_id: Annotated[ + str, + typer.Option( + "--environment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the environment to cancel the publish operation for.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = environment_cancel_publish( + workspace_id=workspace_id, + environment_id=environment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/eventhouse.py b/src/msfabricutils/cli/commands/eventhouse.py new file mode 100644 index 0000000..d6e895d --- /dev/null +++ b/src/msfabricutils/cli/commands/eventhouse.py @@ -0,0 +1,622 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + eventhouse_create, + eventhouse_delete, + eventhouse_get, + eventhouse_get_definition, + eventhouse_list, + eventhouse_update, + eventhouse_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create an eventhouse.", rich_help_panel="Eventhouse") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the eventhouse in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the eventhouse.", + ), + ], + eventhouse_path: Annotated[ + str, + typer.Option( + "--eventhouse-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the eventhouse to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the eventhouse.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventhouse_create( + workspace_id=workspace_id, + display_name=display_name, + eventhouse_path=eventhouse_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get an eventhouse.", rich_help_panel="Eventhouse") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the eventhouse from.", + ), + ], + eventhouse_id: Annotated[ + str, + typer.Option( + "--eventhouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventhouse to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventhouse_get( + workspace_id=workspace_id, + eventhouse_id=eventhouse_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List eventhouses for a workspace.", rich_help_panel="Eventhouse") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list eventhouses for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventhouse_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update an eventhouse.", rich_help_panel="Eventhouse") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + eventhouse_id: Annotated[ + str, + typer.Option( + "--eventhouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventhouse to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the eventhouse.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the eventhouse.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = eventhouse_update( + workspace_id=workspace_id, + eventhouse_id=eventhouse_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete an eventhouse.", rich_help_panel="Eventhouse") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + eventhouse_id: Annotated[ + str, + typer.Option( + "--eventhouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventhouse to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventhouse_delete( + workspace_id=workspace_id, + eventhouse_id=eventhouse_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get the definition of an eventhouse.", rich_help_panel="EventhouseDefinition") +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the eventhouse definition from.", + ), + ], + eventhouse_id: Annotated[ + str, + typer.Option( + "--eventhouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventhouse to get the definition from.", + ), + ], + format: Annotated[ + str, + typer.Option( + "--format", + rich_help_panel="Arguments", + show_default=False, + help='The format of the Eventhouse definition. Supported format is "eventhouse".', + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventhouse_get_definition( + workspace_id=workspace_id, + eventhouse_id=eventhouse_id, + format=format, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update the definition of an eventhouse.", rich_help_panel="EventhouseDefinition") +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + eventhouse_id: Annotated[ + str, + typer.Option( + "--eventhouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventhouse to update.", + ), + ], + eventhouse_path: Annotated[ + str, + typer.Option( + "--eventhouse-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the eventhouse to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventhouse_update_definition( + workspace_id=workspace_id, + eventhouse_id=eventhouse_id, + eventhouse_path=eventhouse_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/eventstream.py b/src/msfabricutils/cli/commands/eventstream.py new file mode 100644 index 0000000..94beecd --- /dev/null +++ b/src/msfabricutils/cli/commands/eventstream.py @@ -0,0 +1,624 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + eventstream_create, + eventstream_delete, + eventstream_get, + eventstream_get_definition, + eventstream_list, + eventstream_update, + eventstream_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create an eventstream.", rich_help_panel="Eventstream") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the eventstream in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the eventstream.", + ), + ], + eventstream_path: Annotated[ + str, + typer.Option( + "--eventstream-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the eventstream to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the eventstream.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventstream_create( + workspace_id=workspace_id, + display_name=display_name, + eventstream_path=eventstream_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get an eventstream.", rich_help_panel="Eventstream") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the eventstream from.", + ), + ], + eventstream_id: Annotated[ + str, + typer.Option( + "--eventstream-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventstream to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventstream_get( + workspace_id=workspace_id, + eventstream_id=eventstream_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List eventstreams for a workspace.", rich_help_panel="Eventstream") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list eventstreams for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventstream_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update an eventstream.", rich_help_panel="Eventstream") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + eventstream_id: Annotated[ + str, + typer.Option( + "--eventstream-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventstream to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the eventstream.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the eventstream.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = eventstream_update( + workspace_id=workspace_id, + eventstream_id=eventstream_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete an eventstream.", rich_help_panel="Eventstream") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + eventstream_id: Annotated[ + str, + typer.Option( + "--eventstream-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventstream to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventstream_delete( + workspace_id=workspace_id, + eventstream_id=eventstream_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get the definition of an eventstream.", rich_help_panel="EventstreamDefinition") +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the eventstream definition from.", + ), + ], + eventstream_id: Annotated[ + str, + typer.Option( + "--eventstream-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventstream to get the definition from.", + ), + ], + format: Annotated[ + str, + typer.Option( + "--format", + rich_help_panel="Arguments", + show_default=False, + help='The format of the Eventstream definition. Supported format is "eventstream".', + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventstream_get_definition( + workspace_id=workspace_id, + eventstream_id=eventstream_id, + format=format, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Update the definition of an eventstream.", rich_help_panel="EventstreamDefinition" +) +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + eventstream_id: Annotated[ + str, + typer.Option( + "--eventstream-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the eventstream to update.", + ), + ], + eventstream_path: Annotated[ + str, + typer.Option( + "--eventstream-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the eventstream to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = eventstream_update_definition( + workspace_id=workspace_id, + eventstream_id=eventstream_id, + eventstream_path=eventstream_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/kql_dashboard.py b/src/msfabricutils/cli/commands/kql_dashboard.py new file mode 100644 index 0000000..43c9943 --- /dev/null +++ b/src/msfabricutils/cli/commands/kql_dashboard.py @@ -0,0 +1,616 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + kql_dashboard_create, + kql_dashboard_delete, + kql_dashboard_get, + kql_dashboard_get_definition, + kql_dashboard_list, + kql_dashboard_update, + kql_dashboard_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a kql dashboard.", rich_help_panel="KqlDashboard") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the kql dashboard in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the kql dashboard.", + ), + ], + kql_dashboard_path: Annotated[ + str, + typer.Option( + "--kql-dashboard-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the kql dashboard to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the kql dashboard.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_dashboard_create( + workspace_id=workspace_id, + display_name=display_name, + kql_dashboard_path=kql_dashboard_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a kql dashboard.", rich_help_panel="KqlDashboard") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the kql dashboard from.", + ), + ], + kql_dashboard_id: Annotated[ + str, + typer.Option( + "--kql-dashboard-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql dashboard to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_dashboard_get( + workspace_id=workspace_id, + kql_dashboard_id=kql_dashboard_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List kql dashboards for a workspace.", rich_help_panel="KqlDashboard") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list kql dashboards for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_dashboard_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a kql dashboard.", rich_help_panel="KqlDashboard") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + kql_dashboard_id: Annotated[ + str, + typer.Option( + "--kql-dashboard-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql dashboard to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the kql dashboard.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the kql dashboard.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = kql_dashboard_update( + workspace_id=workspace_id, + kql_dashboard_id=kql_dashboard_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a kql dashboard.", rich_help_panel="KqlDashboard") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + kql_dashboard_id: Annotated[ + str, + typer.Option( + "--kql-dashboard-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql dashboard to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_dashboard_delete( + workspace_id=workspace_id, + kql_dashboard_id=kql_dashboard_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Get the definition of a kql dashboard.", rich_help_panel="KqlDashboardDefinition" +) +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the kql dashboard definition from.", + ), + ], + kql_dashboard_id: Annotated[ + str, + typer.Option( + "--kql-dashboard-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql dashboard to get the definition from.", + ), + ], + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_dashboard_get_definition( + workspace_id=workspace_id, + kql_dashboard_id=kql_dashboard_id, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Update the definition of a kql dashboard.", rich_help_panel="KqlDashboardDefinition" +) +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + kql_dashboard_id: Annotated[ + str, + typer.Option( + "--kql-dashboard-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql dashboard to update.", + ), + ], + kql_dashboard_path: Annotated[ + str, + typer.Option( + "--kql-dashboard-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the kql dashboard to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_dashboard_update_definition( + workspace_id=workspace_id, + kql_dashboard_id=kql_dashboard_id, + kql_dashboard_path=kql_dashboard_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/kql_database.py b/src/msfabricutils/cli/commands/kql_database.py new file mode 100644 index 0000000..6eafb96 --- /dev/null +++ b/src/msfabricutils/cli/commands/kql_database.py @@ -0,0 +1,614 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + kql_database_create, + kql_database_delete, + kql_database_get, + kql_database_get_definition, + kql_database_list, + kql_database_update, + kql_database_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a kql database.", rich_help_panel="KqlDatabase") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the kql database in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the kql database.", + ), + ], + kql_database_path: Annotated[ + str, + typer.Option( + "--kql-database-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the kql database to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the kql database.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_database_create( + workspace_id=workspace_id, + display_name=display_name, + kql_database_path=kql_database_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a kql database.", rich_help_panel="KqlDatabase") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the kql database from.", + ), + ], + kql_database_id: Annotated[ + str, + typer.Option( + "--kql-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql database to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_database_get( + workspace_id=workspace_id, + kql_database_id=kql_database_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List kql databases for a workspace.", rich_help_panel="KqlDatabase") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list kql databases for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_database_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a kql database.", rich_help_panel="KqlDatabase") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + kql_database_id: Annotated[ + str, + typer.Option( + "--kql-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql database to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the kql database.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the kql database.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = kql_database_update( + workspace_id=workspace_id, + kql_database_id=kql_database_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a kql database.", rich_help_panel="KqlDatabase") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + kql_database_id: Annotated[ + str, + typer.Option( + "--kql-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql database to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_database_delete( + workspace_id=workspace_id, + kql_database_id=kql_database_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get the definition of a kql database.", rich_help_panel="KqlDatabaseDefinition") +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the kql database definition from.", + ), + ], + kql_database_id: Annotated[ + str, + typer.Option( + "--kql-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql database to get the definition from.", + ), + ], + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_database_get_definition( + workspace_id=workspace_id, + kql_database_id=kql_database_id, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Update the definition of a kql database.", rich_help_panel="KqlDatabaseDefinition" +) +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + kql_database_id: Annotated[ + str, + typer.Option( + "--kql-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql database to update.", + ), + ], + kql_database_path: Annotated[ + str, + typer.Option( + "--kql-database-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the kql database to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_database_update_definition( + workspace_id=workspace_id, + kql_database_id=kql_database_id, + kql_database_path=kql_database_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/kql_queryset.py b/src/msfabricutils/cli/commands/kql_queryset.py new file mode 100644 index 0000000..990a07c --- /dev/null +++ b/src/msfabricutils/cli/commands/kql_queryset.py @@ -0,0 +1,614 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + kql_queryset_create, + kql_queryset_delete, + kql_queryset_get, + kql_queryset_get_definition, + kql_queryset_list, + kql_queryset_update, + kql_queryset_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a kql queryset.", rich_help_panel="KqlQueryset") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the kql queryset in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the kql queryset.", + ), + ], + kql_database_path: Annotated[ + str, + typer.Option( + "--kql-database-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the kql queryset to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the kql queryset.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_queryset_create( + workspace_id=workspace_id, + display_name=display_name, + kql_database_path=kql_database_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a kql queryset.", rich_help_panel="KqlQueryset") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the kql queryset from.", + ), + ], + kql_queryset_id: Annotated[ + str, + typer.Option( + "--kql-queryset-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql queryset to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_queryset_get( + workspace_id=workspace_id, + kql_queryset_id=kql_queryset_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List kql querysets for a workspace.", rich_help_panel="KqlQueryset") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list kql querysets for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_queryset_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a kql queryset.", rich_help_panel="KqlQueryset") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + kql_queryset_id: Annotated[ + str, + typer.Option( + "--kql-queryset-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql queryset to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the kql queryset.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the kql queryset.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = kql_queryset_update( + workspace_id=workspace_id, + kql_queryset_id=kql_queryset_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a kql queryset.", rich_help_panel="KqlQueryset") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + kql_queryset_id: Annotated[ + str, + typer.Option( + "--kql-queryset-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql queryset to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_queryset_delete( + workspace_id=workspace_id, + kql_queryset_id=kql_queryset_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get the definition of a kql queryset.", rich_help_panel="KqlQuerysetDefinition") +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the kql queryset definition from.", + ), + ], + kql_queryset_id: Annotated[ + str, + typer.Option( + "--kql-queryset-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql queryset to get the definition from.", + ), + ], + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_queryset_get_definition( + workspace_id=workspace_id, + kql_queryset_id=kql_queryset_id, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Update the definition of a kql queryset.", rich_help_panel="KqlQuerysetDefinition" +) +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + kql_queryset_id: Annotated[ + str, + typer.Option( + "--kql-queryset-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the kql queryset to update.", + ), + ], + kql_queryset_path: Annotated[ + str, + typer.Option( + "--kql-queryset-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the kql queryset to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = kql_queryset_update_definition( + workspace_id=workspace_id, + kql_queryset_id=kql_queryset_id, + kql_queryset_path=kql_queryset_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/lakehouse.py b/src/msfabricutils/cli/commands/lakehouse.py new file mode 100644 index 0000000..036aded --- /dev/null +++ b/src/msfabricutils/cli/commands/lakehouse.py @@ -0,0 +1,832 @@ +import json +import logging +from typing import List + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + lakehouse_create, + lakehouse_delete, + lakehouse_get, + lakehouse_list, + lakehouse_list_tables, + lakehouse_load_table, + lakehouse_run_background_job, + lakehouse_update, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a lakehouse.", rich_help_panel="Lakehouse") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the lakehouse in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the lakehouse.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the lakehouse.", + ), + ] = None, + enable_schemas: Annotated[ + bool, + typer.Option( + "--enable-schemas", + rich_help_panel="Arguments", + show_default=True, + help="Whether the lakehouse is schema enabled.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = lakehouse_create( + workspace_id=workspace_id, + display_name=display_name, + description=description, + enable_schemas=enable_schemas, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a lakehouse.", rich_help_panel="Lakehouse") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the lakehouse from.", + ), + ], + lakehouse_id: Annotated[ + str, + typer.Option( + "--lakehouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the lakehouse to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = lakehouse_get( + workspace_id=workspace_id, + lakehouse_id=lakehouse_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List lakehouses for a workspace.", rich_help_panel="Lakehouse") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list lakehouses for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = lakehouse_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a lakehouse.", rich_help_panel="Lakehouse") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + lakehouse_id: Annotated[ + str, + typer.Option( + "--lakehouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the lakehouse to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the lakehouse.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the lakehouse.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = lakehouse_update( + workspace_id=workspace_id, + lakehouse_id=lakehouse_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a lakehouse.", rich_help_panel="Lakehouse") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + lakehouse_id: Annotated[ + str, + typer.Option( + "--lakehouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the lakehouse to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = lakehouse_delete( + workspace_id=workspace_id, + lakehouse_id=lakehouse_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Run on-demand table maintenance job instance.", rich_help_panel="Job") +def run_background_job( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create a job for.", + ), + ], + lakehouse_id: Annotated[ + str, + typer.Option( + "--lakehouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the lakehouse to create a job for.", + ), + ], + job_type: Annotated[ + str, + typer.Option( + "--job-type", + rich_help_panel="Arguments", + show_default=False, + help='The type of the job to create. Must be "TableMaintenance".', + ), + ], + table_name: Annotated[ + str, + typer.Option( + "--table-name", + rich_help_panel="Arguments", + show_default=False, + help="The name of the table to run the job on.", + ), + ], + schema_name: Annotated[ + str, + typer.Option( + "--schema-name", + rich_help_panel="Arguments", + show_default=False, + help="The name of the schema to run the job on. Only applicable for schema enabled lakehouses.", + ), + ] = None, + v_order: Annotated[ + bool, + typer.Option( + "--v-order", + rich_help_panel="Arguments", + show_default=False, + help="If table should be v-ordered.", + ), + ] = None, + z_order_columns: Annotated[ + List[str], + typer.Option( + "--z-order-columns", + rich_help_panel="Arguments", + show_default=False, + help="List of columns to z-order by.", + ), + ] = None, + retention_period: Annotated[ + str, + typer.Option( + "--retention-period", + rich_help_panel="Arguments", + show_default=False, + help="Retention periode in format d:hh:mm:ss. Overrides the default retention period.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = lakehouse_run_background_job( + workspace_id=workspace_id, + lakehouse_id=lakehouse_id, + job_type=job_type, + table_name=table_name, + schema_name=schema_name, + v_order=v_order, + z_order_columns=z_order_columns, + retention_period=retention_period, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List tables in a lakehouse.", rich_help_panel="Table") +def list_tables( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list tables for.", + ), + ], + lakehouse_id: Annotated[ + str, + typer.Option( + "--lakehouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the lakehouse to list tables for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + max_results: Annotated[ + int, + typer.Option( + "--max-results", + rich_help_panel="Arguments", + show_default=False, + help="The maximum number of results to return.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = lakehouse_list_tables( + workspace_id=workspace_id, + lakehouse_id=lakehouse_id, + continuation_token=continuation_token, + max_results=max_results, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Load a table.", rich_help_panel="Table") +def load_table( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to load the table for.", + ), + ], + lakehouse_id: Annotated[ + str, + typer.Option( + "--lakehouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the lakehouse to load the table for.", + ), + ], + table_name: Annotated[ + str, + typer.Option( + "--table-name", + rich_help_panel="Arguments", + show_default=False, + help="The name of the table to load.", + ), + ], + relative_path: Annotated[ + str, + typer.Option( + "--relative-path", + rich_help_panel="Arguments", + show_default=False, + help="The relative path to the table to load.", + ), + ], + path_type: Annotated[ + str, + typer.Option( + "--path-type", + rich_help_panel="Arguments", + show_default=False, + help='The type of the path to load. Either "File" or "Folder".', + ), + ], + format: Annotated[ + str, + typer.Option( + "--format", + rich_help_panel="Arguments", + show_default=False, + help='The format of the files to load. Must be "Parquet" or "Csv".', + ), + ] = None, + header: Annotated[ + bool, + typer.Option( + "--header", + rich_help_panel="Arguments", + show_default=False, + help="Whether the file has a header row. Only applicable for csv files.", + ), + ] = None, + delimiter: Annotated[ + str, + typer.Option( + "--delimiter", + rich_help_panel="Arguments", + show_default=False, + help="The delimiter of the csv files. Only applicable for csv files.", + ), + ] = None, + mode: Annotated[ + str, + typer.Option( + "--mode", + rich_help_panel="Arguments", + show_default=False, + help='The mode to load the table in. Either "Overwrite" or "Append".', + ), + ] = None, + file_extension: Annotated[ + str, + typer.Option( + "--file-extension", + rich_help_panel="Arguments", + show_default=False, + help="The file extension of the files to load.", + ), + ] = None, + recursive: Annotated[ + bool, + typer.Option( + "--recursive", + rich_help_panel="Arguments", + show_default=False, + help="Whether to search data files recursively or not, when loading from a folder.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = lakehouse_load_table( + workspace_id=workspace_id, + lakehouse_id=lakehouse_id, + table_name=table_name, + relative_path=relative_path, + path_type=path_type, + format=format, + header=header, + delimiter=delimiter, + mode=mode, + file_extension=file_extension, + recursive=recursive, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/long_running_operation.py b/src/msfabricutils/cli/commands/long_running_operation.py new file mode 100644 index 0000000..44c75e9 --- /dev/null +++ b/src/msfabricutils/cli/commands/long_running_operation.py @@ -0,0 +1,136 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + long_running_operation_get_result, + long_running_operation_get_state, +) + +app = typer.Typer( + help="[bold]get-state, get-result[/bold]", + rich_markup_mode="rich", +) + + +@app.command( + help="Get the state of the long running operation.", rich_help_panel="Long Running Operation" +) +def get_state( + operation_id: Annotated[ + str, + typer.Option( + "--operation-id", + rich_help_panel="Arguments", + show_default=False, + help="The ID of the long running operation.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = long_running_operation_get_state( + operation_id=operation_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Get the result of the long running operation. Only available when the operation status is `Succeeded`.", + rich_help_panel="Long Running Operation", +) +def get_result( + operation_id: Annotated[ + str, + typer.Option( + "--operation-id", + rich_help_panel="Arguments", + show_default=False, + help="The ID of the long running operation.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = long_running_operation_get_result( + operation_id=operation_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/mirrored_database.py b/src/msfabricutils/cli/commands/mirrored_database.py new file mode 100644 index 0000000..4a81abd --- /dev/null +++ b/src/msfabricutils/cli/commands/mirrored_database.py @@ -0,0 +1,617 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + mirrored_database_create, + mirrored_database_delete, + mirrored_database_get, + mirrored_database_get_definition, + mirrored_database_list, + mirrored_database_update, + mirrored_database_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a mirrored database.", rich_help_panel="mirroredDatabase") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the mirrored database in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the mirrored database.", + ), + ], + mirrored_database_path: Annotated[ + str, + typer.Option( + "--mirrored-database-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the mirrored database to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the mirrored database.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = mirrored_database_create( + workspace_id=workspace_id, + display_name=display_name, + mirrored_database_path=mirrored_database_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a mirrored database.", rich_help_panel="mirroredDatabase") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the mirrored database from.", + ), + ], + mirrored_database_id: Annotated[ + str, + typer.Option( + "--mirrored-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the mirrored database to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = mirrored_database_get( + workspace_id=workspace_id, + mirrored_database_id=mirrored_database_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List mirrored databases for a workspace.", rich_help_panel="mirroredDatabase") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list mirrored databases for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = mirrored_database_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a mirrored database.", rich_help_panel="mirroredDatabase") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + mirrored_database_id: Annotated[ + str, + typer.Option( + "--mirrored-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the mirrored database to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the mirrored database.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the mirrored database.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = mirrored_database_update( + workspace_id=workspace_id, + mirrored_database_id=mirrored_database_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a mirrored database.", rich_help_panel="mirroredDatabase") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + mirrored_database_id: Annotated[ + str, + typer.Option( + "--mirrored-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the mirrored database to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = mirrored_database_delete( + workspace_id=workspace_id, + mirrored_database_id=mirrored_database_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Get the definition of a mirrored database.", rich_help_panel="mirroredDatabaseDefinition" +) +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the mirrored database definition from.", + ), + ], + mirrored_database_id: Annotated[ + str, + typer.Option( + "--mirrored-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the mirrored database to get the definition from.", + ), + ], + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = mirrored_database_get_definition( + workspace_id=workspace_id, + mirrored_database_id=mirrored_database_id, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Update the definition of a mirrored database.", + rich_help_panel="mirroredDatabaseDefinition", +) +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + mirrored_database_id: Annotated[ + str, + typer.Option( + "--mirrored-database-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the mirrored database to update.", + ), + ], + mirrored_database_path: Annotated[ + str, + typer.Option( + "--mirrored-database-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the mirrored database to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = mirrored_database_update_definition( + workspace_id=workspace_id, + mirrored_database_id=mirrored_database_id, + mirrored_database_path=mirrored_database_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/mirrored_warehouse.py b/src/msfabricutils/cli/commands/mirrored_warehouse.py new file mode 100644 index 0000000..120199c --- /dev/null +++ b/src/msfabricutils/cli/commands/mirrored_warehouse.py @@ -0,0 +1,80 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import mirrored_warehouse_list + +app = typer.Typer( + help="[bold]list[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="List mirrored warehouses for a workspace.", rich_help_panel="MirroredWarehouse") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list mirrored warehouses for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = mirrored_warehouse_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/ml_experiment.py b/src/msfabricutils/cli/commands/ml_experiment.py new file mode 100644 index 0000000..97ac9ba --- /dev/null +++ b/src/msfabricutils/cli/commands/ml_experiment.py @@ -0,0 +1,400 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + ml_experiment_create, + ml_experiment_delete, + ml_experiment_get, + ml_experiment_list, + ml_experiment_update, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a ml experiment.", rich_help_panel="Ml Experiment") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the ml experiment in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the ml experiment.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the ml experiment.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = ml_experiment_create( + workspace_id=workspace_id, + display_name=display_name, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a ml experiment.", rich_help_panel="Ml Experiment") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the ml experiment from.", + ), + ], + ml_experiment_id: Annotated[ + str, + typer.Option( + "--ml-experiment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the ml experiment to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = ml_experiment_get( + workspace_id=workspace_id, + ml_experiment_id=ml_experiment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List ml experiments for a workspace.", rich_help_panel="Ml Experiment") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list data pipelines for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = ml_experiment_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a ml experiment.", rich_help_panel="Ml Experiment") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update the ml experiment in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the ml experiment.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the ml experiment.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = ml_experiment_update( + workspace_id=workspace_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a ml experiment.", rich_help_panel="Ml Experiment") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete the ml experiment from.", + ), + ], + ml_experiment_id: Annotated[ + str, + typer.Option( + "--ml-experiment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the ml experiment to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = ml_experiment_delete( + workspace_id=workspace_id, + ml_experiment_id=ml_experiment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/ml_model.py b/src/msfabricutils/cli/commands/ml_model.py new file mode 100644 index 0000000..204e8bd --- /dev/null +++ b/src/msfabricutils/cli/commands/ml_model.py @@ -0,0 +1,400 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + ml_model_create, + ml_model_delete, + ml_model_get, + ml_model_list, + ml_model_update, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a ml model.", rich_help_panel="Ml Model") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the ml model in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the ml model.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the ml model.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = ml_model_create( + workspace_id=workspace_id, + display_name=display_name, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a ml model.", rich_help_panel="Ml Model") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the ml model from.", + ), + ], + ml_model_id: Annotated[ + str, + typer.Option( + "--ml-model-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the ml model to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = ml_model_get( + workspace_id=workspace_id, + ml_model_id=ml_model_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List ml models for a workspace.", rich_help_panel="Ml Model") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list data pipelines for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = ml_model_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a ml model.", rich_help_panel="Ml Model") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update the ml model in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the ml model.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the ml model.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = ml_model_update( + workspace_id=workspace_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a ml model.", rich_help_panel="Ml Model") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete the ml model from.", + ), + ], + ml_model_id: Annotated[ + str, + typer.Option( + "--ml-model-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the ml model to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = ml_model_delete( + workspace_id=workspace_id, + ml_model_id=ml_model_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/notebook.py b/src/msfabricutils/cli/commands/notebook.py new file mode 100644 index 0000000..935c792 --- /dev/null +++ b/src/msfabricutils/cli/commands/notebook.py @@ -0,0 +1,622 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + notebook_create, + notebook_delete, + notebook_get, + notebook_get_definition, + notebook_list, + notebook_update, + notebook_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a notebook.", rich_help_panel="Notebook") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the notebook in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the notebook.", + ), + ], + notebook_path: Annotated[ + str, + typer.Option( + "--notebook-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the notebook to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the notebook.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = notebook_create( + workspace_id=workspace_id, + display_name=display_name, + notebook_path=notebook_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a notebook.", rich_help_panel="Notebook") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the notebook from.", + ), + ], + notebook_id: Annotated[ + str, + typer.Option( + "--notebook-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the notebook to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = notebook_get( + workspace_id=workspace_id, + notebook_id=notebook_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List notebooks for a workspace.", rich_help_panel="Notebook") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list notebooks for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = notebook_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a notebook.", rich_help_panel="Notebook") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + notebook_id: Annotated[ + str, + typer.Option( + "--notebook-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the notebook to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the notebook.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the notebook.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = notebook_update( + workspace_id=workspace_id, + notebook_id=notebook_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a notebook.", rich_help_panel="Notebook") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + notebook_id: Annotated[ + str, + typer.Option( + "--notebook-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the notebook to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = notebook_delete( + workspace_id=workspace_id, + notebook_id=notebook_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get the definition of a notebook.", rich_help_panel="NotebookDefinition") +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the notebook definition from.", + ), + ], + notebook_id: Annotated[ + str, + typer.Option( + "--notebook-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the notebook to get the definition from.", + ), + ], + format: Annotated[ + str, + typer.Option( + "--format", + rich_help_panel="Arguments", + show_default=False, + help='The format of the Notebook definition. Supported format is "ipynb".', + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = notebook_get_definition( + workspace_id=workspace_id, + notebook_id=notebook_id, + format=format, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update the definition of a notebook.", rich_help_panel="NotebookDefinition") +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + notebook_id: Annotated[ + str, + typer.Option( + "--notebook-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the notebook to update.", + ), + ], + notebook_path: Annotated[ + str, + typer.Option( + "--notebook-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the notebook to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = notebook_update_definition( + workspace_id=workspace_id, + notebook_id=notebook_id, + notebook_path=notebook_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/paginated_report.py b/src/msfabricutils/cli/commands/paginated_report.py new file mode 100644 index 0000000..87b8103 --- /dev/null +++ b/src/msfabricutils/cli/commands/paginated_report.py @@ -0,0 +1,148 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import paginated_report_list, paginated_report_update + +app = typer.Typer( + help="[bold]list, update[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="List paginated reports for a workspace.", rich_help_panel="Paginated Report") +def list( + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = paginated_report_list( + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a paginated report.", rich_help_panel="Paginated Report") +def update( + paginated_report_id: Annotated[ + str, + typer.Option( + "--paginated-report-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the paginated report to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the paginated report.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the paginated report.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = paginated_report_update( + paginated_report_id=paginated_report_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/reflex.py b/src/msfabricutils/cli/commands/reflex.py new file mode 100644 index 0000000..7899351 --- /dev/null +++ b/src/msfabricutils/cli/commands/reflex.py @@ -0,0 +1,622 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + reflex_create, + reflex_delete, + reflex_get, + reflex_get_definition, + reflex_list, + reflex_update, + reflex_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a reflex.", rich_help_panel="reflex") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the reflex in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the reflex.", + ), + ], + reflex_path: Annotated[ + str, + typer.Option( + "--reflex-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the reflex to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the reflex.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = reflex_create( + workspace_id=workspace_id, + display_name=display_name, + reflex_path=reflex_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a reflex.", rich_help_panel="reflex") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the reflex from.", + ), + ], + reflex_id: Annotated[ + str, + typer.Option( + "--reflex-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the reflex to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = reflex_get( + workspace_id=workspace_id, + reflex_id=reflex_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List reflexes for a workspace.", rich_help_panel="reflex") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list reflexes for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = reflex_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a reflex.", rich_help_panel="reflex") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + reflex_id: Annotated[ + str, + typer.Option( + "--reflex-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the reflex to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the reflex.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the reflex.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = reflex_update( + workspace_id=workspace_id, + reflex_id=reflex_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a reflex.", rich_help_panel="reflex") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + reflex_id: Annotated[ + str, + typer.Option( + "--reflex-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the reflex to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = reflex_delete( + workspace_id=workspace_id, + reflex_id=reflex_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get the definition of a reflex.", rich_help_panel="reflexDefinition") +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the reflex definition from.", + ), + ], + reflex_id: Annotated[ + str, + typer.Option( + "--reflex-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the reflex to get the definition from.", + ), + ], + format: Annotated[ + str, + typer.Option( + "--format", + rich_help_panel="Arguments", + show_default=False, + help='The format of the reflex definition. Supported format is "ipynb".', + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = reflex_get_definition( + workspace_id=workspace_id, + reflex_id=reflex_id, + format=format, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update the definition of a reflex.", rich_help_panel="reflexDefinition") +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + reflex_id: Annotated[ + str, + typer.Option( + "--reflex-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the reflex to update.", + ), + ], + reflex_path: Annotated[ + str, + typer.Option( + "--reflex-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the reflex to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = reflex_update_definition( + workspace_id=workspace_id, + reflex_id=reflex_id, + reflex_path=reflex_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/report.py b/src/msfabricutils/cli/commands/report.py new file mode 100644 index 0000000..e01af7e --- /dev/null +++ b/src/msfabricutils/cli/commands/report.py @@ -0,0 +1,622 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + report_create, + report_delete, + report_get, + report_get_definition, + report_list, + report_update, + report_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a report.", rich_help_panel="report") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the report in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the report.", + ), + ], + report_path: Annotated[ + str, + typer.Option( + "--report-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the report to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the report.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = report_create( + workspace_id=workspace_id, + display_name=display_name, + report_path=report_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a report.", rich_help_panel="report") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the report from.", + ), + ], + report_id: Annotated[ + str, + typer.Option( + "--report-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the report to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = report_get( + workspace_id=workspace_id, + report_id=report_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List reports for a workspace.", rich_help_panel="report") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list reports for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = report_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a report.", rich_help_panel="report") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + report_id: Annotated[ + str, + typer.Option( + "--report-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the report to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the report.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the report.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = report_update( + workspace_id=workspace_id, + report_id=report_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a report.", rich_help_panel="report") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + report_id: Annotated[ + str, + typer.Option( + "--report-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the report to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = report_delete( + workspace_id=workspace_id, + report_id=report_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get the definition of a report.", rich_help_panel="reportDefinition") +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the report definition from.", + ), + ], + report_id: Annotated[ + str, + typer.Option( + "--report-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the report to get the definition from.", + ), + ], + format: Annotated[ + str, + typer.Option( + "--format", + rich_help_panel="Arguments", + show_default=False, + help='The format of the report definition. Supported format is "ipynb".', + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = report_get_definition( + workspace_id=workspace_id, + report_id=report_id, + format=format, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update the definition of a report.", rich_help_panel="reportDefinition") +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + report_id: Annotated[ + str, + typer.Option( + "--report-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the report to update.", + ), + ], + report_path: Annotated[ + str, + typer.Option( + "--report-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the report to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = report_update_definition( + workspace_id=workspace_id, + report_id=report_id, + report_path=report_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/semantic_model.py b/src/msfabricutils/cli/commands/semantic_model.py new file mode 100644 index 0000000..3a9e341 --- /dev/null +++ b/src/msfabricutils/cli/commands/semantic_model.py @@ -0,0 +1,626 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + semantic_model_create, + semantic_model_delete, + semantic_model_get, + semantic_model_get_definition, + semantic_model_list, + semantic_model_update, + semantic_model_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a semantic model.", rich_help_panel="Semantic model") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the semantic model in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the semantic model.", + ), + ], + semantic_model_path: Annotated[ + str, + typer.Option( + "--semantic-model-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the semantic model to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the semantic model.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = semantic_model_create( + workspace_id=workspace_id, + display_name=display_name, + semantic_model_path=semantic_model_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a semantic model.", rich_help_panel="Semantic model") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the semantic model from.", + ), + ], + semantic_model_id: Annotated[ + str, + typer.Option( + "--semantic-model-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the semantic model to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = semantic_model_get( + workspace_id=workspace_id, + semantic_model_id=semantic_model_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List semantic models for a workspace.", rich_help_panel="Semantic model") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list semantic models for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = semantic_model_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a semantic model.", rich_help_panel="Semantic model") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + semantic_model_id: Annotated[ + str, + typer.Option( + "--semantic-model-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the semantic model to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the semantic model.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the semantic model.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = semantic_model_update( + workspace_id=workspace_id, + semantic_model_id=semantic_model_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a semantic model.", rich_help_panel="Semantic model") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + semantic_model_id: Annotated[ + str, + typer.Option( + "--semantic-model-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the semantic model to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = semantic_model_delete( + workspace_id=workspace_id, + semantic_model_id=semantic_model_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Get the definition of a semantic model.", rich_help_panel="Semantic model definition" +) +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the semantic model definition from.", + ), + ], + semantic_model_id: Annotated[ + str, + typer.Option( + "--semantic-model-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the semantic model to get the definition from.", + ), + ], + format: Annotated[ + str, + typer.Option( + "--format", + rich_help_panel="Arguments", + show_default=False, + help='The format of the semantic model definition. Supported format is "ipynb".', + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = semantic_model_get_definition( + workspace_id=workspace_id, + semantic_model_id=semantic_model_id, + format=format, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Update the definition of a semantic model.", rich_help_panel="Semantic model definition" +) +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + semantic_model_id: Annotated[ + str, + typer.Option( + "--semantic-model-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the semantic model to update.", + ), + ], + semantic_model_path: Annotated[ + str, + typer.Option( + "--semantic-model-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the semantic model to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = semantic_model_update_definition( + workspace_id=workspace_id, + semantic_model_id=semantic_model_id, + semantic_model_path=semantic_model_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/spark_job_definition.py b/src/msfabricutils/cli/commands/spark_job_definition.py new file mode 100644 index 0000000..914d8ef --- /dev/null +++ b/src/msfabricutils/cli/commands/spark_job_definition.py @@ -0,0 +1,714 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + spark_job_definition_create, + spark_job_definition_delete, + spark_job_definition_get, + spark_job_definition_get_definition, + spark_job_definition_list, + spark_job_definition_run_background_job, + spark_job_definition_update, + spark_job_definition_update_definition, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a spark job definition.", rich_help_panel="Spark Job Definition") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the spark job definition in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the spark job definition.", + ), + ], + spark_job_definition_path: Annotated[ + str, + typer.Option( + "--spark-job-definition-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the spark job definition to load content from.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the spark job definition.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = spark_job_definition_create( + workspace_id=workspace_id, + display_name=display_name, + spark_job_definition_path=spark_job_definition_path, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a spark job definition.", rich_help_panel="Spark Job Definition") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the spark job definition from.", + ), + ], + spark_job_definition_id: Annotated[ + str, + typer.Option( + "--spark-job-definition-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the spark job definition to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = spark_job_definition_get( + workspace_id=workspace_id, + spark_job_definition_id=spark_job_definition_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="List spark job definitions for a workspace.", rich_help_panel="Spark Job Definition" +) +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list spark job definitions for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = spark_job_definition_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a spark job definition.", rich_help_panel="Spark Job Definition") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + spark_job_definition_id: Annotated[ + str, + typer.Option( + "--spark-job-definition-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the spark job definition to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the spark job definition.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the spark job definition.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = spark_job_definition_update( + workspace_id=workspace_id, + spark_job_definition_id=spark_job_definition_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a spark job definition.", rich_help_panel="Spark Job Definition") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + spark_job_definition_id: Annotated[ + str, + typer.Option( + "--spark-job-definition-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the spark job definition to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = spark_job_definition_delete( + workspace_id=workspace_id, + spark_job_definition_id=spark_job_definition_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Get the definition of a spark job definition.", rich_help_panel="Spark Job Definition" +) +def get_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the spark job definition definition from.", + ), + ], + spark_job_definition_id: Annotated[ + str, + typer.Option( + "--spark-job-definition-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the spark job definition to get the definition from.", + ), + ], + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = spark_job_definition_get_definition( + workspace_id=workspace_id, + spark_job_definition_id=spark_job_definition_id, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command( + help="Update the definition of a spark job definition.", rich_help_panel="Spark Job Definition" +) +def update_definition( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + spark_job_definition_id: Annotated[ + str, + typer.Option( + "--spark-job-definition-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the spark job definition to update.", + ), + ], + spark_job_definition_path: Annotated[ + str, + typer.Option( + "--spark-job-definition-path", + rich_help_panel="Arguments", + show_default=False, + help="The path to the spark job definition to load content from.", + ), + ], + update_metadata: Annotated[ + bool, + typer.Option( + "--update-metadata", + rich_help_panel="Arguments", + show_default=True, + help="When set to true, the item's metadata is updated using the metadata in the .platform file.", + ), + ] = False, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = spark_job_definition_update_definition( + workspace_id=workspace_id, + spark_job_definition_id=spark_job_definition_id, + spark_job_definition_path=spark_job_definition_path, + update_metadata=update_metadata, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Run on-demand spark job instance.", rich_help_panel="Job") +def run_background_job( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create a job for.", + ), + ], + spark_job_definition_id: Annotated[ + str, + typer.Option( + "--spark-job-definition-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the spark job definition to create a job for.", + ), + ], + job_type: Annotated[ + str, + typer.Option( + "--job-type", + rich_help_panel="Arguments", + show_default=False, + help='The type of the job to create. Must be "sparkJob".', + ), + ], + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = spark_job_definition_run_background_job( + workspace_id=workspace_id, + spark_job_definition_id=spark_job_definition_id, + job_type=job_type, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/sql_endpoint.py b/src/msfabricutils/cli/commands/sql_endpoint.py new file mode 100644 index 0000000..86a07dd --- /dev/null +++ b/src/msfabricutils/cli/commands/sql_endpoint.py @@ -0,0 +1,80 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import sql_endpoint_list + +app = typer.Typer( + help="[bold]list[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="List SQL endpoints for a workspace.", rich_help_panel="SQL Endpoint") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list SQL endpoints for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = sql_endpoint_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/warehouse.py b/src/msfabricutils/cli/commands/warehouse.py new file mode 100644 index 0000000..08e08f0 --- /dev/null +++ b/src/msfabricutils/cli/commands/warehouse.py @@ -0,0 +1,400 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + warehouse_create, + warehouse_delete, + warehouse_get, + warehouse_list, + warehouse_update, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a warehouse.", rich_help_panel="Warehouse") +def create( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to create the warehouse in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the warehouse.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the warehouse.", + ), + ] = None, + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = warehouse_create( + workspace_id=workspace_id, + display_name=display_name, + description=description, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a warehouse.", rich_help_panel="Warehouse") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get the warehouse from.", + ), + ], + warehouse_id: Annotated[ + str, + typer.Option( + "--warehouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the warehouse to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = warehouse_get( + workspace_id=workspace_id, + warehouse_id=warehouse_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List warehouses for a workspace.", rich_help_panel="Warehouse") +def list( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list warehouses for.", + ), + ], + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = warehouse_list( + workspace_id=workspace_id, + continuation_token=continuation_token, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a warehouse.", rich_help_panel="Warehouse") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update the warehouse in.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the warehouse.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the warehouse.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + if not any([display_name, description]): + raise typer.BadParameter( + "At least one of the following arguments is required: --display-name, --description" + ) + + response = warehouse_update( + workspace_id=workspace_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a warehouse.", rich_help_panel="Warehouse") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete the warehouse from.", + ), + ], + warehouse_id: Annotated[ + str, + typer.Option( + "--warehouse-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the warehouse to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = warehouse_delete( + workspace_id=workspace_id, + warehouse_id=warehouse_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/commands/workspace.py b/src/msfabricutils/cli/commands/workspace.py new file mode 100644 index 0000000..7cc382d --- /dev/null +++ b/src/msfabricutils/cli/commands/workspace.py @@ -0,0 +1,976 @@ +import json +import logging + +import typer +from typing_extensions import Annotated + +from msfabricutils.rest_api import ( + workspace_add_role_assignment, + workspace_assign_to_capacity, + workspace_create, + workspace_delete, + workspace_delete_role_assignment, + workspace_deprovision_identity, + workspace_get, + workspace_get_role_assignment, + workspace_list, + workspace_list_role_assignments, + workspace_provision_identity, + workspace_unassign_from_capacity, + workspace_update, + workspace_update_role_assignment, +) + +app = typer.Typer( + help="[bold]create, get, list, update, delete[/bold]", + rich_markup_mode="rich", +) + + +@app.command(help="Create a workspace.", rich_help_panel="Workspace") +def create( + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the workspace.", + ), + ], + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the workspace.", + ), + ] = None, + capacity_id: Annotated[ + str, + typer.Option( + "--capacity-id", + rich_help_panel="Arguments", + show_default=False, + help="The capacity id to assign the workspace to.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_create( + display_name=display_name, + description=description, + capacity_id=capacity_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a workspace.", rich_help_panel="Workspace") +def get( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_get( + workspace_id=workspace_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List workspaces.", rich_help_panel="Workspace") +def list( + continuation_token: Annotated[ + str, + typer.Option( + "--continuation-token", + rich_help_panel="Arguments", + show_default=False, + help="A token for retrieving the next page of results.", + ), + ] = None, + roles: Annotated[ + str, + typer.Option( + "--roles", + rich_help_panel="Arguments", + show_default=False, + help="A list of roles. Separate values using a comma. If not provided, all workspaces are returned.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_list( + continuation_token=continuation_token, + roles=roles, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a workspace.", rich_help_panel="Workspace") +def update( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to update.", + ), + ], + display_name: Annotated[ + str, + typer.Option( + "--display-name", + rich_help_panel="Arguments", + show_default=False, + help="The display name of the workspace.", + ), + ] = None, + description: Annotated[ + str, + typer.Option( + "--description", + rich_help_panel="Arguments", + show_default=False, + help="The description of the workspace.", + ), + ] = None, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_update( + workspace_id=workspace_id, + display_name=display_name, + description=description, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a workspace.", rich_help_panel="Workspace") +def delete( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_delete( + workspace_id=workspace_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Assign a workspace to a capacity.", rich_help_panel="Capacity") +def assign_to_capacity( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to assign to a capacity.", + ), + ], + capacity_id: Annotated[ + str, + typer.Option( + "--capacity-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the capacity to assign the workspace to.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_assign_to_capacity( + workspace_id=workspace_id, + capacity_id=capacity_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Unassign a workspace from a capacity.", rich_help_panel="Capacity") +def unassign_from_capacity( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to unassign from a capacity.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_unassign_from_capacity( + workspace_id=workspace_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Add a role assignment to a workspace.", rich_help_panel="Role Assignment") +def add_role_assignment( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to add a role assignment to.", + ), + ], + role: Annotated[ + str, + typer.Option( + "--role", + rich_help_panel="Arguments", + show_default=False, + help="The role to add to the workspace.", + ), + ], + principal_id: Annotated[ + str, + typer.Option( + "--principal-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the principal.", + ), + ], + principal_type: Annotated[ + str, + typer.Option( + "--principal-type", + rich_help_panel="Arguments", + show_default=False, + help="The type of the principal.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_add_role_assignment( + workspace_id=workspace_id, + role=role, + principal_id=principal_id, + principal_type=principal_type, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Get a role assignment for a workspace.", rich_help_panel="Role Assignment") +def get_role_assignment( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to get a role assignment for.", + ), + ], + role_assignment_id: Annotated[ + str, + typer.Option( + "--role-assignment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the role assignment to get.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_get_role_assignment( + workspace_id=workspace_id, + role_assignment_id=role_assignment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="List role assignments for a workspace.", rich_help_panel="Role Assignment") +def list_role_assignments( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to list role assignments for.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_list_role_assignments( + workspace_id=workspace_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Update a role assignment for a workspace.", rich_help_panel="Role Assignment") +def update_role_assignment( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The workspace ID.", + ), + ], + role_assignment_id: Annotated[ + str, + typer.Option( + "--role-assignment-id", + rich_help_panel="Arguments", + show_default=False, + help="The workspace role assignment ID.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_update_role_assignment( + workspace_id=workspace_id, + role_assignment_id=role_assignment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Delete a role assignment from a workspace.", rich_help_panel="Role Assignment") +def delete_role_assignment( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to add a role assignment to.", + ), + ], + role_assignment_id: Annotated[ + str, + typer.Option( + "--role-assignment-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the role assignment to delete.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_delete_role_assignment( + workspace_id=workspace_id, + role_assignment_id=role_assignment_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Deprovision an identity from a workspace.", rich_help_panel="Identity") +def deprovision_identity( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The id of the workspace to deprovision an identity from.", + ), + ], + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_deprovision_identity( + workspace_id=workspace_id, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output + + +@app.command(help="Provision an identity to a workspace.", rich_help_panel="Identity") +def provision_identity( + workspace_id: Annotated[ + str, + typer.Option( + "--workspace-id", + rich_help_panel="Arguments", + show_default=False, + help="The ID of the workspace.", + ), + ], + await_lro: Annotated[ + bool, + typer.Option( + "--await-lro", + rich_help_panel="Arguments", + show_default=True, + help="Whether to await the long running operation.", + ), + ] = False, + timeout: Annotated[ + int, + typer.Option( + "--timeout", show_default=True, help="Timeout for the long running operation (seconds)" + ), + ] = 60 * 5, + no_preview: Annotated[ + bool, + typer.Option( + "--no-preview", + "--yes", + "-y", + rich_help_panel="Arguments", + show_default=True, + help="Preview the command before executing it. You will be asked to confirm the request before it is executed.", + ), + ] = False, + quiet: Annotated[ + bool, + typer.Option( + "--quiet", + show_default=True, + help="Whether to run in quiet mode. Sets the logging level to WARNING.", + ), + ] = False, +): + logger = logging.getLogger() + if quiet: + logger.setLevel(logging.WARNING) + + response = workspace_provision_identity( + workspace_id=workspace_id, + await_lro=await_lro, + timeout=timeout, + preview=not no_preview, + ) + + try: + content = response.json() + except json.JSONDecodeError: + content = response.text + + output = { + "url": response.url, + "method": response.request.method, + "status_code": response.status_code, + "reason": response.reason, + "headers": dict(response.headers), + "content": content, + } + + typer.echo(json.dumps(output, indent=2)) + return output diff --git a/src/msfabricutils/cli/lakehouse.py b/src/msfabricutils/cli/lakehouse.py deleted file mode 100644 index bac0595..0000000 --- a/src/msfabricutils/cli/lakehouse.py +++ /dev/null @@ -1,100 +0,0 @@ -import logging -from dataclasses import dataclass - -from msfabricutils.core.lakehouse import ( - create_workspace_lakehouse, - delete_workspace_lakehouse, - get_workspace_lakehouse, - update_workspace_lakehouse, -) - - -@dataclass -class CreateLakehouseArgs: - name: str - workspace_id: str - enable_schemas: bool - on_conflict: str - description: str | None = None - - -@dataclass -class DeleteLakehouseArgs: - workspace_id: str - on_conflict: str - id: str | None = None - name: str | None = None - - -def create_lakehouse_command(args: CreateLakehouseArgs): - """Creates a lakehouse.""" - - name = args.name - workspace_id = args.workspace_id - enable_schemas = bool(args.enable_schemas) - description = args.description - on_conflict = args.on_conflict - - lakehouse_id = None - try: - lakehouse = get_workspace_lakehouse(workspace_id, lakehouse_name=name) - lakehouse_id = lakehouse["id"] - logging.info(f"Lakehouse {name} created successfully with id {lakehouse_id}") - except ValueError: - logging.info(f"Lakehouse {name} does not exist") - pass - - if lakehouse_id is not None: - - if on_conflict == "ignore": - logging.info(f"Lakehouse `{name}` already exists, skipping update") - return lakehouse - - if on_conflict == "error": - raise ValueError(f"Lakehouse {name} already exists") - - if on_conflict == "update": - logging.info(f"Updating lakehouse with `{name}` with description `{description}`") - update_workspace_lakehouse(workspace_id, lakehouse_id, name, enable_schemas, description) - lakehouse = get_workspace_lakehouse(workspace_id, name, enable_schemas, description) - lakehouse_id = lakehouse["id"] - logging.info(f"Lakehouse `{name}` successfully updated") - - else: - logging.info(f"Creating lakehouse with `{name}` with description `{description}`") - lakehouse = create_workspace_lakehouse(workspace_id, name, enable_schemas, description) - logging.info(f"Lakehouse `{name}` successfully created") - lakehouse_id = lakehouse["id"] - - return lakehouse - - -def delete_lakehouse_command(args: DeleteLakehouseArgs): - """Deletes a lakehouse.""" - workspace_id = args.workspace_id - lakehouse_id = args.id - lakehouse_name = args.name - on_conflict = args.on_conflict - - if lakehouse_id is None and lakehouse_name is None: - raise ValueError("Either `lakehouse_id` or `lakehouse_name` must be provided") - - if lakehouse_id is None: - try: - lakehouse = get_workspace_lakehouse(workspace_id, lakehouse_name=lakehouse_name) - lakehouse_id = lakehouse["id"] - logging.info(f"Lakehouse {lakehouse_name} exists") - except ValueError: - logging.info(f"Lakehouse {lakehouse_name} does not exist") - - if workspace_id is None and on_conflict == "error": - raise ValueError(f"Lakehouse {lakehouse_name} does not exist") - - if workspace_id is None and on_conflict == "ignore": - logging.info(f"Lakehouse {lakehouse_name} does not exist, skipping deletion") - return - - logging.info(f"Deleting lakehouse {lakehouse_id}") - response = delete_workspace_lakehouse(workspace_id, lakehouse_id) - response.raise_for_status() - logging.info(f"Lakehouse {lakehouse_id} successfully deleted") \ No newline at end of file diff --git a/src/msfabricutils/cli/notebook.py b/src/msfabricutils/cli/notebook.py deleted file mode 100644 index c4ed2ba..0000000 --- a/src/msfabricutils/cli/notebook.py +++ /dev/null @@ -1,123 +0,0 @@ -import glob -import logging -import os -from dataclasses import dataclass - -from msfabricutils.core.notebook import ( - create_workspace_notebook, - delete_workspace_notebook, - get_workspace_notebook, - update_workspace_notebook_definition, -) - - -@dataclass -class CreateNotebookArgs: - workspace_id: str - path: str - name: str - description: str - -@dataclass -class BulkCreateNotebookArgs: - workspace_id: str - path: list[str] - on_conflict: str - no_wait: bool - -@dataclass -class DeleteNotebookArgs: - workspace_id: str - on_conflict: str - id: str | None = None - name: str | None = None - - -def create_notebook_command(args: CreateNotebookArgs): - """Creates a notebook.""" - - workspace_id = args.workspace_id - path = args.path - name = args.name - description = args.description - on_conflict = args.on_conflict - no_wait = args.no_wait - notebook_id = None - try: - notebook = get_workspace_notebook(workspace_id, notebook_name=name) - notebook_id = notebook["id"] - logging.info(f"Notebook {name} already exists") - except ValueError: - logging.info(f"Notebook {name} does not exist") - - - if notebook_id is not None: - - if on_conflict == "ignore": - logging.info(f"Notebook `{name}` already exists, skipping update") - return notebook - - if on_conflict == "error": - raise ValueError(f"Notebook {name} already exists") - - if on_conflict == "update": - logging.info(f"Updating notebook with `{name}` with description `{description}`") - notebook = update_workspace_notebook_definition(workspace_id, notebook_id, path, description) - logging.info(f"Notebook `{name}` successfully updated") - - else: - logging.info(f"Creating notebook with `{name}` with description `{description}`") - notebook = create_workspace_notebook(workspace_id, path, name, description, wait_for_completion=not no_wait) - logging.info(f"Notebook `{name}` successfully created") - notebook_id = notebook["id"] - - return notebook - -def bulk_create_notebook_command(args: BulkCreateNotebookArgs): - """Creates one or more notebooks.""" - - raise NotImplementedError("Bulk create notebooks is not implemented yet") - paths = [] - for path in args.path: - path = path if path.endswith(".Notebook") else path + ".Notebook" - # print(path) - paths.extend(glob.glob(path)) - - paths = list(set(paths)) - formatted_paths = ", ".join(paths) - - if len(paths) == 0: - logging.info(f"No notebooks found in current directory `{os.getcwd()}` given the provided paths: {formatted_paths}") - return "" - - - -def delete_notebook_command(args: DeleteNotebookArgs): - """Deletes one or more notebooks.""" - workspace_id = args.workspace_id - notebook_id = args.id - notebook_name = args.name - on_conflict = args.on_conflict - - if notebook_id is None and notebook_name is None: - raise ValueError("Either `notebook_id` or `notebook_name` must be provided") - - if notebook_id is None: - try: - notebook = get_workspace_notebook(workspace_id, notebook_name=notebook_name) - notebook_id = notebook["id"] - logging.info(f"Notebook {notebook_name} exists") - except ValueError: - logging.info(f"Notebook {notebook_name} does not exist") - - if workspace_id is None and on_conflict == "error": - raise ValueError(f"Notebook {notebook_name} does not exist") - - if workspace_id is None and on_conflict == "ignore": - logging.info(f"Notebook {notebook_name} does not exist, skipping deletion") - return - - logging.info(f"Deleting notebook {notebook_id}") - response = delete_workspace_notebook(workspace_id, notebook_id) - response.raise_for_status() - logging.info(f"Notebook {notebook_id} successfully deleted") \ No newline at end of file diff --git a/src/msfabricutils/cli/workspace.py b/src/msfabricutils/cli/workspace.py deleted file mode 100644 index b139971..0000000 --- a/src/msfabricutils/cli/workspace.py +++ /dev/null @@ -1,135 +0,0 @@ -import logging -from dataclasses import dataclass - -from msfabricutils.core.workspace import ( - assign_workspace_to_capacity, - create_workspace, - delete_workspace, - get_workspace, - update_workspace, -) - - -@dataclass -class WorkspaceCreateArgs: - """Arguments for creating a workspace - - Args: - name (str): The name of the workspace - on_conflict (str): The action to take if the workspace already exists. - description (str | None): The description of the workspace - capacity_id (str | None): The capacity ID of the workspace - """ - - name: str - on_conflict: str - description: str | None = None - capacity_id: str | None = None - - -@dataclass -class WorkspaceDeleteArgs: - """Arguments for deleting a workspace - - Args: - id (str): The ID of the workspace to delete - """ - - on_conflict: str - id: str | None = None - name: str | None = None - -def create_workspace_command(args: WorkspaceCreateArgs) -> dict[str, str]: - """Create a new workspace with the specified configuration. - - Args: - args (WorkspaceCreateArgs): The arguments to create a workspace - - Returns: - Workspace information as a dictionary - """ - - logging.info(f"Creating workspace {args.__dict__}") - - name = args.name - description = args.description - capacity_id = args.capacity_id - on_conflict = args.on_conflict - - workspace_id = None - try: - workspace = get_workspace(workspace_name=name) - workspace_id = workspace["id"] - logging.info(f"Workspace {name} already exists") - except ValueError: - logging.info(f"Workspace {name} does not exist") - - - if workspace_id is not None: - - if on_conflict == "ignore": - logging.info(f"Workspace `{name}` already exists, skipping update") - return workspace - - if on_conflict == "error": - raise ValueError(f"Workspace {name} already exists") - - if on_conflict == "update": - logging.info(f"Updating workspace with `{name}` with description `{description}`") - update_workspace(workspace_id, name, description) - workspace = get_workspace(workspace_name=name) - workspace_id = workspace["id"] - logging.info(f"Workspace `{name}` successfully updated") - - else: - logging.info(f"Creating workspace with `{name}` with description `{description}`") - workspace = create_workspace(name, description) - logging.info(f"Workspace `{name}` successfully created") - workspace_id = workspace["id"] - - if capacity_id is not None: - logging.info(f"Assigning workspace `{workspace_id}` to capacity `{capacity_id}`") - assign_workspace_to_capacity(workspace_id, capacity_id) - logging.info(f"Workspace `{workspace_id}` successfully assigned to capacity `{capacity_id}`") - - return workspace - - -def delete_workspace_command(args: WorkspaceDeleteArgs) -> dict[str, str]: - """Delete a workspace with the specified configuration. - - Args: - args (WorkspaceDeleteArgs): The arguments to delete a workspace - - Returns: - Workspace information as a dictionary - """ - - logging.info(f"Deleting workspace {args.__dict__}") - - workspace_id = args.id - name = args.name - on_conflict = args.on_conflict - - if workspace_id is None and name is None: - raise ValueError("Either `id` or `name` must be provided") - - if workspace_id is None: - try: - workspace = get_workspace(workspace_name=name) - workspace_id = workspace["id"] - logging.info(f"Workspace {name} exists") - except ValueError: - logging.info(f"Workspace {name} does not exist") - - if workspace_id is None and on_conflict == "error": - raise ValueError(f"Workspace {name} does not exist") - - if workspace_id is None and on_conflict == "ignore": - logging.info(f"Workspace {name} does not exist, skipping deletion") - return - - logging.info(f"Deleting workspace {workspace_id}") - response = delete_workspace(workspace_id) - response.raise_for_status() - logging.info(f"Workspace {workspace_id} successfully deleted") \ No newline at end of file diff --git a/src/msfabricutils/common/remove_none.py b/src/msfabricutils/common/remove_none.py new file mode 100644 index 0000000..113ccf8 --- /dev/null +++ b/src/msfabricutils/common/remove_none.py @@ -0,0 +1,19 @@ +from typing import Any + + +def remove_none(obj: Any) -> Any: + """ + Recursively remove None values from dictionaries and lists. + + Args: + obj (Any): The data structure to clean. + + Returns: + Any: A new data structure with None values removed. + """ + if isinstance(obj, dict): + return {k: remove_none(v) for k, v in obj.items() if v is not None} + elif isinstance(obj, list): + return [remove_none(item) for item in obj if item is not None] + else: + return obj diff --git a/src/msfabricutils/common/shorten_dict_values.py b/src/msfabricutils/common/shorten_dict_values.py new file mode 100644 index 0000000..20bab29 --- /dev/null +++ b/src/msfabricutils/common/shorten_dict_values.py @@ -0,0 +1,22 @@ +from typing import Any + + +def shorten_dict_values(obj: Any, max_length: int = 20) -> Any: + """ + Recursively shorten string values in dictionaries and lists. + Useful for printing out data structures in a readable format. + + Args: + obj (Any): The data structure to shorten. + max_length (int): The maximum length of string values to shorten. + Returns: + Any: A new data structure with string values shortened. + """ + if isinstance(obj, dict): + return {k: shorten_dict_values(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [shorten_dict_values(item) for item in obj] + elif isinstance(obj, str): + return obj[:max_length] + "... (truncated)" if len(obj) > max_length else obj + else: + return obj diff --git a/src/msfabricutils/core/operations.py b/src/msfabricutils/core/operations.py index 870c09b..b1f450a 100644 --- a/src/msfabricutils/core/operations.py +++ b/src/msfabricutils/core/operations.py @@ -7,85 +7,56 @@ def get_long_running_operation(operation_id: str) -> requests.Response: - endpoint = f"operations/{operation_id}" return get_request(endpoint, content_only=False) def wait_for_long_running_operation( - operation_id: str, - retry_after: str, - initial_delay: float = 1.0, - max_delay: float = 32.0, - max_attempts: int = 10, - timeout: float = 60.0 * 5 + operation_id: str, retry_after: str, timeout: float = 60.0 * 5, polling_interval: float = 5.0 ) -> requests.Response: - """Wait for a long running operation to complete with exponential backoff. - + """Wait for a long running operation to complete within timeout period. + Args: operation_id: The operation ID to check - initial_delay: Starting delay in seconds (default: 1s) - max_delay: Maximum delay between retries in seconds (default: 32s) - max_attempts: Maximum number of retry attempts (default: 10) - timeout: Optional total timeout in seconds (default: None) - + retry_after: Initial wait time in seconds + timeout: Total timeout in seconds (default: 300s/5min) + polling_interval: Time in seconds between status checks (default: 5.0) + Returns: Response from the operation - + Raises: - TimeoutError: If the operation times out - Exception: If the operation fails or max retries exceeded + TimeoutError: If the operation does not complete within the timeout period + Exception: If the operation fails """ logging.info(f"Waiting {retry_after} seconds for operation {operation_id} to complete...") time.sleep(float(retry_after)) start_time = time.time() - current_delay = initial_delay - attempts = 0 while True: - attempts += 1 response = get_long_running_operation(operation_id) - - if response.status_code != 200: - if attempts < max_attempts: - logging.warning( - f"Request failed (attempt {attempts}/{max_attempts}), retrying...", - extra={ - "operation_id": operation_id, - "status_code": response.status_code, - "delay": current_delay - } - ) - time.sleep(current_delay) - current_delay = min(current_delay * 2, max_delay) - continue - else: - raise Exception( - f"Operation {operation_id} failed after {max_attempts} attempts: {response.json()['error']}" - ) + content = response.json() - match response.json()["status"]: + response.raise_for_status() + + match content["status"]: case "Succeeded": logging.info(f"Operation {operation_id} completed successfully") - return response + break case "Failed": - raise Exception(f"Operation {operation_id} failed: {response.json()['error']}") + raise Exception(f"Operation {operation_id} failed: {content['error']}") case _: - if timeout and (time.time() - start_time) > timeout: + if (time.time() - start_time) > timeout: raise TimeoutError( f"Operation {operation_id} timed out after {timeout} seconds" ) - + logging.info( - "Operation in progress, waiting...", - extra={ - "operation_id": operation_id, - "status": response.json()["status"], - "delay": current_delay, - "elapsed": time.time() - start_time - } + f"Operation {operation_id} is {content['percentComplete']} percent complete, waiting..." ) - time.sleep(current_delay) - current_delay = min(current_delay * 2, max_delay) + time.sleep(polling_interval) + created_item = get_long_running_operation(f"{operation_id}/result") + created_item.raise_for_status() + return created_item diff --git a/src/msfabricutils/rest_api/__init__.py b/src/msfabricutils/rest_api/__init__.py new file mode 100644 index 0000000..a8bc6e4 --- /dev/null +++ b/src/msfabricutils/rest_api/__init__.py @@ -0,0 +1,320 @@ +from .capacity import capacity_list +from .dashboard import dashboard_list +from .data_pipeline import ( + data_pipeline_create, + data_pipeline_delete, + data_pipeline_get, + data_pipeline_list, + data_pipeline_update, +) +from .datamart import datamart_list +from .environment import ( + environment_cancel_publish, + environment_create, + environment_delete, + environment_delete_staging_library, + environment_get, + environment_get_published_libraries, + environment_get_spark_compute_published_settings, + environment_get_spark_compute_staging_settings, + environment_get_staging_libraries, + environment_list, + environment_publish, + environment_update, + environment_update_spark_compute_staging_settings, + environment_upload_staging_library, +) +from .eventhouse import ( + eventhouse_create, + eventhouse_delete, + eventhouse_get, + eventhouse_get_definition, + eventhouse_list, + eventhouse_update, + eventhouse_update_definition, +) +from .eventstream import ( + eventstream_create, + eventstream_delete, + eventstream_get, + eventstream_get_definition, + eventstream_list, + eventstream_update, + eventstream_update_definition, +) +from .kql_dashboard import ( + kql_dashboard_create, + kql_dashboard_delete, + kql_dashboard_get, + kql_dashboard_get_definition, + kql_dashboard_list, + kql_dashboard_update, + kql_dashboard_update_definition, +) +from .kql_database import ( + kql_database_create, + kql_database_delete, + kql_database_get, + kql_database_get_definition, + kql_database_list, + kql_database_update, + kql_database_update_definition, +) +from .kql_queryset import ( + kql_queryset_create, + kql_queryset_delete, + kql_queryset_get, + kql_queryset_get_definition, + kql_queryset_list, + kql_queryset_update, + kql_queryset_update_definition, +) +from .lakehouse import ( + lakehouse_create, + lakehouse_delete, + lakehouse_get, + lakehouse_list, + lakehouse_list_tables, + lakehouse_load_table, + lakehouse_run_background_job, + lakehouse_update, +) +from .long_running_operation import ( + long_running_operation_get_result, + long_running_operation_get_state, +) +from .mirrored_database import ( + mirrored_database_create, + mirrored_database_delete, + mirrored_database_get, + mirrored_database_get_definition, + mirrored_database_list, + mirrored_database_update, + mirrored_database_update_definition, +) +from .mirrored_warehouse import mirrored_warehouse_list +from .ml_experiment import ( + ml_experiment_create, + ml_experiment_delete, + ml_experiment_get, + ml_experiment_list, + ml_experiment_update, +) +from .ml_model import ml_model_create, ml_model_delete, ml_model_get, ml_model_list, ml_model_update +from .notebook import ( + notebook_create, + notebook_delete, + notebook_get, + notebook_get_definition, + notebook_list, + notebook_update, + notebook_update_definition, +) +from .paginated_report import paginated_report_list, paginated_report_update +from .reflex import ( + reflex_create, + reflex_delete, + reflex_get, + reflex_get_definition, + reflex_list, + reflex_update, + reflex_update_definition, +) +from .report import ( + report_create, + report_delete, + report_get, + report_get_definition, + report_list, + report_update, + report_update_definition, +) +from .semantic_model import ( + semantic_model_create, + semantic_model_delete, + semantic_model_get, + semantic_model_get_definition, + semantic_model_list, + semantic_model_update, + semantic_model_update_definition, +) +from .spark_job_definition import ( + spark_job_definition_create, + spark_job_definition_delete, + spark_job_definition_get, + spark_job_definition_get_definition, + spark_job_definition_list, + spark_job_definition_run_background_job, + spark_job_definition_update, + spark_job_definition_update_definition, +) +from .sql_endpoint import sql_endpoint_list +from .warehouse import ( + warehouse_create, + warehouse_delete, + warehouse_get, + warehouse_list, + warehouse_update, +) +from .workspace import ( + workspace_add_role_assignment, + workspace_assign_to_capacity, + workspace_create, + workspace_delete, + workspace_delete_role_assignment, + workspace_deprovision_identity, + workspace_get, + workspace_get_role_assignment, + workspace_list, + workspace_list_role_assignments, + workspace_provision_identity, + workspace_unassign_from_capacity, + workspace_update, + workspace_update_role_assignment, +) + +__all__ = ( + "eventhouse_create", + "eventhouse_get", + "eventhouse_list", + "eventhouse_update", + "eventhouse_delete", + "eventhouse_get_definition", + "eventhouse_update_definition", + "report_create", + "report_get", + "report_list", + "report_update", + "report_delete", + "report_get_definition", + "report_update_definition", + "reflex_create", + "reflex_get", + "reflex_list", + "reflex_update", + "reflex_delete", + "reflex_get_definition", + "reflex_update_definition", + "paginated_report_list", + "paginated_report_update", + "warehouse_create", + "warehouse_get", + "warehouse_list", + "warehouse_update", + "warehouse_delete", + "ml_model_create", + "ml_model_get", + "ml_model_list", + "ml_model_update", + "ml_model_delete", + "workspace_create", + "workspace_get", + "workspace_list", + "workspace_update", + "workspace_delete", + "workspace_assign_to_capacity", + "workspace_unassign_from_capacity", + "workspace_add_role_assignment", + "workspace_get_role_assignment", + "workspace_list_role_assignments", + "workspace_update_role_assignment", + "workspace_delete_role_assignment", + "workspace_deprovision_identity", + "workspace_provision_identity", + "eventstream_create", + "eventstream_get", + "eventstream_list", + "eventstream_update", + "eventstream_delete", + "eventstream_get_definition", + "eventstream_update_definition", + "capacity_list", + "mirrored_database_create", + "mirrored_database_get", + "mirrored_database_list", + "mirrored_database_update", + "mirrored_database_delete", + "mirrored_database_get_definition", + "mirrored_database_update_definition", + "notebook_create", + "notebook_get", + "notebook_list", + "notebook_update", + "notebook_delete", + "notebook_get_definition", + "notebook_update_definition", + "mirrored_warehouse_list", + "lakehouse_create", + "lakehouse_get", + "lakehouse_list", + "lakehouse_update", + "lakehouse_delete", + "lakehouse_run_background_job", + "lakehouse_list_tables", + "lakehouse_load_table", + "dashboard_list", + "data_pipeline_create", + "data_pipeline_get", + "data_pipeline_list", + "data_pipeline_update", + "data_pipeline_delete", + "ml_experiment_create", + "ml_experiment_get", + "ml_experiment_list", + "ml_experiment_update", + "ml_experiment_delete", + "kql_dashboard_create", + "kql_dashboard_get", + "kql_dashboard_list", + "kql_dashboard_update", + "kql_dashboard_delete", + "kql_dashboard_get_definition", + "kql_dashboard_update_definition", + "sql_endpoint_list", + "environment_create", + "environment_get", + "environment_list", + "environment_update", + "environment_delete", + "environment_get_spark_compute_published_settings", + "environment_get_spark_compute_staging_settings", + "environment_update_spark_compute_staging_settings", + "environment_get_published_libraries", + "environment_get_staging_libraries", + "environment_delete_staging_library", + "environment_upload_staging_library", + "environment_publish", + "environment_cancel_publish", + "spark_job_definition_create", + "spark_job_definition_get", + "spark_job_definition_list", + "spark_job_definition_update", + "spark_job_definition_delete", + "spark_job_definition_get_definition", + "spark_job_definition_update_definition", + "spark_job_definition_run_background_job", + "semantic_model_create", + "semantic_model_get", + "semantic_model_list", + "semantic_model_update", + "semantic_model_delete", + "semantic_model_get_definition", + "semantic_model_update_definition", + "kql_database_create", + "kql_database_get", + "kql_database_list", + "kql_database_update", + "kql_database_delete", + "kql_database_get_definition", + "kql_database_update_definition", + "kql_queryset_create", + "kql_queryset_get", + "kql_queryset_list", + "kql_queryset_update", + "kql_queryset_delete", + "kql_queryset_get_definition", + "kql_queryset_update_definition", + "datamart_list", + "long_running_operation_get_state", + "long_running_operation_get_result", +) diff --git a/src/msfabricutils/rest_api/capacity.py b/src/msfabricutils/rest_api/capacity.py new file mode 100644 index 0000000..11c9329 --- /dev/null +++ b/src/msfabricutils/rest_api/capacity.py @@ -0,0 +1,54 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values + + +def capacity_list( + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List capacities for a workspace. + + Args: + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/capacities" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/dashboard.py b/src/msfabricutils/rest_api/dashboard.py new file mode 100644 index 0000000..8af1391 --- /dev/null +++ b/src/msfabricutils/rest_api/dashboard.py @@ -0,0 +1,56 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values + + +def dashboard_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List dashboards for a workspace. + + Args: + workspace_id (str): The id of the workspace to list dashboards for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/dashboards" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/data_pipeline.py b/src/msfabricutils/rest_api/data_pipeline.py new file mode 100644 index 0000000..2070195 --- /dev/null +++ b/src/msfabricutils/rest_api/data_pipeline.py @@ -0,0 +1,263 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def data_pipeline_create( + workspace_id: str, + display_name: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a data pipeline. + + Args: + workspace_id (str): The id of the workspace to create the data pipeline in. + display_name (str): The display name of the data pipeline. + description (str | None): The description of the data pipeline. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/dataPipelines" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def data_pipeline_get( + workspace_id: str, + data_pipeline_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a data pipeline. + + Args: + workspace_id (str): The id of the workspace to get the data pipeline from. + data_pipeline_id (str): The id of the data pipeline to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/dataPipelines/{data_pipeline_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def data_pipeline_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List data pipelines for a workspace. + + Args: + workspace_id (str): The id of the workspace to list data pipelines for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/dataPipelines" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def data_pipeline_update( + workspace_id: str, + data_pipeline_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a data pipeline. + + Args: + workspace_id (str): The id of the workspace to update the data pipeline in. + data_pipeline_id (str): The id of the data pipeline to update. + display_name (str | None): The display name of the data pipeline. + description (str | None): The description of the data pipeline. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/dataPipelines/{data_pipeline_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def data_pipeline_delete( + workspace_id: str, + data_pipeline_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a data pipeline. + + Args: + workspace_id (str): The id of the workspace to delete the data pipeline from. + data_pipeline_id (str): The id of the data pipeline to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/dataPipelines/{data_pipeline_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/datamart.py b/src/msfabricutils/rest_api/datamart.py new file mode 100644 index 0000000..417a386 --- /dev/null +++ b/src/msfabricutils/rest_api/datamart.py @@ -0,0 +1,56 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values + + +def datamart_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List datamarts for a workspace. + + Args: + workspace_id (str): The id of the workspace to list datamarts for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/datamarts" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/environment.py b/src/msfabricutils/rest_api/environment.py new file mode 100644 index 0000000..def1d1c --- /dev/null +++ b/src/msfabricutils/rest_api/environment.py @@ -0,0 +1,725 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def environment_create( + workspace_id: str, + display_name: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create an environment. + + Args: + workspace_id (str): The id of the workspace to create the environment in. + display_name (str): The display name of the environment. + description (str | None): The description of the environment. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def environment_get( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get an environment. + + Args: + workspace_id (str): The id of the workspace to get the environment from. + environment_id (str): The id of the environment to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List environments for a workspace. + + Args: + workspace_id (str): The id of the workspace to list environments for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_update( + workspace_id: str, + environment_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update an environment. + + Args: + workspace_id (str): The id of the workspace to update the environment in. + environment_id (str): The id of the environment to update. + display_name (str | None): The display name of the environment. + description (str | None): The description of the environment. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_delete( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete an environment. + + Args: + workspace_id (str): The id of the workspace to delete the environment from. + environment_id (str): The id of the environment to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_get_spark_compute_published_settings( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get spark compute published settings for an environment. + + Args: + workspace_id (str): The id of the workspace to get the spark compute published settings for. + environment_id (str): The id of the environment to get the spark compute published settings for. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/sparkComputePublishedSettings" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_get_spark_compute_staging_settings( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get spark compute staging settings for an environment. + + Args: + workspace_id (str): The id of the workspace to get the spark compute staging settings for. + environment_id (str): The id of the environment to get the spark compute staging settings for. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/staging/sparkcompute" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_update_spark_compute_staging_settings( + workspace_id: str, + environment_id: str, + instance_pool_name: str = None, + instance_pool_type: str = None, + driver_cores: int = None, + driver_memory: str = None, + executor_cores: int = None, + executor_memory: str = None, + dynamic_executor_allocation_enabled: bool = None, + min_executors: int = None, + max_executors: int = None, + spark_acls_enable: str = None, + spark_admin_acls: str = None, + runtime_version: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update spark compute staging settings for an environment. + + Args: + workspace_id (str): The id of the workspace to update the spark compute staging settings for. + environment_id (str): The id of the environment to update the spark compute staging settings for. + instance_pool_name (str | None): The name of the instance pool to use for Spark Compute settings. + instance_pool_type (str | None): The type of the instance pool to use for Spark Compute settings. + driver_cores (int | None): The number of cores to use for the driver. + driver_memory (str | None): The memory to use for the driver. + executor_cores (int | None): The number of cores to use for the executors. + executor_memory (str | None): The memory to use for the executors. + dynamic_executor_allocation_enabled (bool | None): Whether to enable dynamic executor allocation. + min_executors (int | None): The minimum number of executors to use for dynamic executor allocation. + max_executors (int | None): The maximum number of executors to use for dynamic executor allocation. + spark_acls_enable (str | None): Whether to enable Spark ACLs. + spark_admin_acls (str | None): The admin ACLs to use for Spark. + runtime_version (str | None): The runtime version to use for Spark Compute settings. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/staging/sparkcompute" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + custom_payload = { + { + "instancePool": {"name": instance_pool_name, "type": instance_pool_type}, + "driverCores": driver_cores, + "driverMemory": driver_memory, + "executorCores": executor_cores, + "executorMemory": executor_memory, + "dynamicExecutorAllocation": { + "enabled": dynamic_executor_allocation_enabled, + "minExecutors": min_executors, + "maxExecutors": max_executors, + }, + "sparkProperties": { + "spark.acls.enable": spark_acls_enable, + "spark.admin.acls": spark_admin_acls, + }, + "runtimeVersion": runtime_version, + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_get_published_libraries( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get published libraries for an environment. + + Args: + workspace_id (str): The id of the workspace to get the published libraries for. + environment_id (str): The id of the environment to get the published libraries for. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/libraries" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_get_staging_libraries( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get staging libraries for an environment. + + Args: + workspace_id (str): The id of the workspace to get the staging libraries for. + environment_id (str): The id of the environment to get the staging libraries for. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/staging/libraries" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_delete_staging_library( + workspace_id: str, + environment_id: str, + library_to_delete: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a staging library for an environment. + + Args: + workspace_id (str): The id of the workspace to delete the staging library from. + environment_id (str): The id of the environment to delete the staging library from. + library_to_delete (str): The library file to delete. Must include the file extension. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/staging/libraries" # noqa + url = f"{url}?" + if library_to_delete is not None: + url = f"{url}libraryToDelete={library_to_delete}&" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_upload_staging_library( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Upload a staging library for an environment. + + Args: + workspace_id (str): The id of the workspace to upload the staged library to. + environment_id (str): The id of the environment to upload the staging library to. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/staging/libraries" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_publish( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Publish an environment. + + Args: + workspace_id (str): The id of the workspace to publish the environment for. + environment_id (str): The id of the environment to publish. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/staging/publish" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def environment_cancel_publish( + workspace_id: str, + environment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Cancel a publish operation for an environment. + + Args: + workspace_id (str): The id of the workspace to cancel the publish operation for. + environment_id (str): The id of the environment to cancel the publish operation for. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/environments/{environment_id}/staging/cancelPublish" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/eventhouse.py b/src/msfabricutils/rest_api/eventhouse.py new file mode 100644 index 0000000..95ce3b4 --- /dev/null +++ b/src/msfabricutils/rest_api/eventhouse.py @@ -0,0 +1,436 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def eventhouse_create( + workspace_id: str, + display_name: str, + eventhouse_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create an eventhouse. + + Args: + workspace_id (str): The id of the workspace to create the eventhouse in. + display_name (str): The display name of the eventhouse. + eventhouse_path (str): The path to the eventhouse to load content from. + description (str | None): The description of the eventhouse. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventhouses" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(eventhouse_path.rstrip("/") + "/EventhouseProperties.json", "r") as f: + EventhouseProperties = base64.b64encode(f.read().encode()).decode() + + with open(eventhouse_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + { + "path": "EventhouseProperties.json", + "payload": EventhouseProperties, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def eventhouse_get( + workspace_id: str, + eventhouse_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get an eventhouse. + + Args: + workspace_id (str): The id of the workspace to get the eventhouse from. + eventhouse_id (str): The id of the eventhouse to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = ( + f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventhouses/{eventhouse_id}" # noqa + ) + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def eventhouse_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List eventhouses for a workspace. + + Args: + workspace_id (str): The id of the workspace to list eventhouses for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventhouses" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def eventhouse_update( + workspace_id: str, + eventhouse_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update an eventhouse. + + Args: + workspace_id (str): The id of the workspace to update. + eventhouse_id (str): The id of the eventhouse to update. + display_name (str | None): The display name of the eventhouse. + description (str | None): The description of the eventhouse. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = ( + f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventhouses/{eventhouse_id}" # noqa + ) + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def eventhouse_delete( + workspace_id: str, + eventhouse_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete an eventhouse. + + Args: + workspace_id (str): The id of the workspace to delete. + eventhouse_id (str): The id of the eventhouse to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = ( + f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventhouses/{eventhouse_id}" # noqa + ) + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def eventhouse_get_definition( + workspace_id: str, + eventhouse_id: str, + format: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of an eventhouse. + + Args: + workspace_id (str): The id of the workspace to get the eventhouse definition from. + eventhouse_id (str): The id of the eventhouse to get the definition from. + format (str | None): The format of the Eventhouse definition. Supported format is \"eventhouse\". + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventhouses/{eventhouse_id}/getDefinition" # noqa + url = f"{url}?" + if format is not None: + url = f"{url}format={format}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def eventhouse_update_definition( + workspace_id: str, + eventhouse_id: str, + eventhouse_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of an eventhouse. + + Args: + workspace_id (str): The id of the workspace to update. + eventhouse_id (str): The id of the eventhouse to update. + eventhouse_path (str): The path to the eventhouse to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventhouses/{eventhouse_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(eventhouse_path.rstrip("/") + "/EventhouseProperties.json", "r") as f: + EventhouseProperties = base64.b64encode(f.read().encode()).decode() + + with open(eventhouse_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + { + "path": "EventhouseProperties.json", + "payload": EventhouseProperties, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/eventstream.py b/src/msfabricutils/rest_api/eventstream.py new file mode 100644 index 0000000..de3140e --- /dev/null +++ b/src/msfabricutils/rest_api/eventstream.py @@ -0,0 +1,422 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def eventstream_create( + workspace_id: str, + display_name: str, + eventstream_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create an eventstream. + + Args: + workspace_id (str): The id of the workspace to create the eventstream in. + display_name (str): The display name of the eventstream. + eventstream_path (str): The path to the eventstream to load content from. + description (str | None): The description of the eventstream. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventstreams" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(eventstream_path.rstrip("/") + "/eventstream.json", "r") as f: + eventstream = base64.b64encode(f.read().encode()).decode() + + with open(eventstream_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + {"path": "eventstream.json", "payload": eventstream, "payloadType": "InlineBase64"}, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def eventstream_get( + workspace_id: str, + eventstream_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get an eventstream. + + Args: + workspace_id (str): The id of the workspace to get the eventstream from. + eventstream_id (str): The id of the eventstream to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventstreams/{eventstream_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def eventstream_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List eventstreams for a workspace. + + Args: + workspace_id (str): The id of the workspace to list eventstreams for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventstreams" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def eventstream_update( + workspace_id: str, + eventstream_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update an eventstream. + + Args: + workspace_id (str): The id of the workspace to update. + eventstream_id (str): The id of the eventstream to update. + display_name (str | None): The display name of the eventstream. + description (str | None): The description of the eventstream. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventstreams/{eventstream_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def eventstream_delete( + workspace_id: str, + eventstream_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete an eventstream. + + Args: + workspace_id (str): The id of the workspace to delete. + eventstream_id (str): The id of the eventstream to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventstreams/{eventstream_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def eventstream_get_definition( + workspace_id: str, + eventstream_id: str, + format: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of an eventstream. + + Args: + workspace_id (str): The id of the workspace to get the eventstream definition from. + eventstream_id (str): The id of the eventstream to get the definition from. + format (str | None): The format of the Eventstream definition. Supported format is \"eventstream\". + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventstreams/{eventstream_id}/getDefinition" # noqa + url = f"{url}?" + if format is not None: + url = f"{url}format={format}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def eventstream_update_definition( + workspace_id: str, + eventstream_id: str, + eventstream_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of an eventstream. + + Args: + workspace_id (str): The id of the workspace to update. + eventstream_id (str): The id of the eventstream to update. + eventstream_path (str): The path to the eventstream to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/eventstreams/{eventstream_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(eventstream_path.rstrip("/") + "/eventstream.json", "r") as f: + eventstream = base64.b64encode(f.read().encode()).decode() + + with open(eventstream_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + {"path": "eventstream.json", "payload": eventstream, "payloadType": "InlineBase64"}, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/kql_dashboard.py b/src/msfabricutils/rest_api/kql_dashboard.py new file mode 100644 index 0000000..103a354 --- /dev/null +++ b/src/msfabricutils/rest_api/kql_dashboard.py @@ -0,0 +1,426 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def kql_dashboard_create( + workspace_id: str, + display_name: str, + kql_dashboard_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a kql dashboard. + + Args: + workspace_id (str): The id of the workspace to create the kql dashboard in. + display_name (str): The display name of the kql dashboard. + kql_dashboard_path (str): The path to the kql dashboard to load content from. + description (str | None): The description of the kql dashboard. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDashboards" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(kql_dashboard_path.rstrip("/") + "/RealTimeDashboard.json", "r") as f: + RealTimeDashboard = base64.b64encode(f.read().encode()).decode() + + with open(kql_dashboard_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + { + "path": "RealTimeDashboard.json", + "payload": RealTimeDashboard, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def kql_dashboard_get( + workspace_id: str, + kql_dashboard_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a kql dashboard. + + Args: + workspace_id (str): The id of the workspace to get the kql dashboard from. + kql_dashboard_id (str): The id of the kql dashboard to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_dashboard_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List kql dashboards for a workspace. + + Args: + workspace_id (str): The id of the workspace to list kql dashboards for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDashboards" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_dashboard_update( + workspace_id: str, + kql_dashboard_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a kql dashboard. + + Args: + workspace_id (str): The id of the workspace to update. + kql_dashboard_id (str): The id of the kql dashboard to update. + display_name (str | None): The display name of the kql dashboard. + description (str | None): The description of the kql dashboard. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_dashboard_delete( + workspace_id: str, + kql_dashboard_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a kql dashboard. + + Args: + workspace_id (str): The id of the workspace to delete. + kql_dashboard_id (str): The id of the kql dashboard to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_dashboard_get_definition( + workspace_id: str, + kql_dashboard_id: str, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a kql dashboard. + + Args: + workspace_id (str): The id of the workspace to get the kql dashboard definition from. + kql_dashboard_id (str): The id of the kql dashboard to get the definition from. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id}/getDefinition" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def kql_dashboard_update_definition( + workspace_id: str, + kql_dashboard_id: str, + kql_dashboard_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a kql dashboard. + + Args: + workspace_id (str): The id of the workspace to update. + kql_dashboard_id (str): The id of the kql dashboard to update. + kql_dashboard_path (str): The path to the kql dashboard to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDashboards/{kql_dashboard_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(kql_dashboard_path.rstrip("/") + "/RealTimeDashboard.json", "r") as f: + RealTimeDashboard = base64.b64encode(f.read().encode()).decode() + + with open(kql_dashboard_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + { + "path": "RealTimeDashboard.json", + "payload": RealTimeDashboard, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/kql_database.py b/src/msfabricutils/rest_api/kql_database.py new file mode 100644 index 0000000..291404f --- /dev/null +++ b/src/msfabricutils/rest_api/kql_database.py @@ -0,0 +1,442 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def kql_database_create( + workspace_id: str, + display_name: str, + kql_database_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a kql database. + + Args: + workspace_id (str): The id of the workspace to create the kql database in. + display_name (str): The display name of the kql database. + kql_database_path (str): The path to the kql database to load content from. + description (str | None): The description of the kql database. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDatabases" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(kql_database_path.rstrip("/") + "/DatabaseProperties.json", "r") as f: + DatabaseProperties = base64.b64encode(f.read().encode()).decode() + + with open(kql_database_path.rstrip("/") + "/DatabaseSchema.kql", "r") as f: + DatabaseSchema = base64.b64encode(f.read().encode()).decode() + + with open(kql_database_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + { + "path": "DatabaseProperties.json", + "payload": DatabaseProperties, + "payloadType": "InlineBase64", + }, + { + "path": "DatabaseSchema.kql", + "payload": DatabaseSchema, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def kql_database_get( + workspace_id: str, + kql_database_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a kql database. + + Args: + workspace_id (str): The id of the workspace to get the kql database from. + kql_database_id (str): The id of the kql database to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDatabases/{kql_database_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_database_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List kql databases for a workspace. + + Args: + workspace_id (str): The id of the workspace to list kql databases for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDatabases" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_database_update( + workspace_id: str, + kql_database_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a kql database. + + Args: + workspace_id (str): The id of the workspace to update. + kql_database_id (str): The id of the kql database to update. + display_name (str | None): The display name of the kql database. + description (str | None): The description of the kql database. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDatabases/{kql_database_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_database_delete( + workspace_id: str, + kql_database_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a kql database. + + Args: + workspace_id (str): The id of the workspace to delete. + kql_database_id (str): The id of the kql database to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDatabases/{kql_database_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_database_get_definition( + workspace_id: str, + kql_database_id: str, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a kql database. + + Args: + workspace_id (str): The id of the workspace to get the kql database definition from. + kql_database_id (str): The id of the kql database to get the definition from. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDatabases/{kql_database_id}/getDefinition" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def kql_database_update_definition( + workspace_id: str, + kql_database_id: str, + kql_database_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a kql database. + + Args: + workspace_id (str): The id of the workspace to update. + kql_database_id (str): The id of the kql database to update. + kql_database_path (str): The path to the kql database to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlDatabases/{kql_database_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(kql_database_path.rstrip("/") + "/DatabaseProperties.json", "r") as f: + DatabaseProperties = base64.b64encode(f.read().encode()).decode() + + with open(kql_database_path.rstrip("/") + "/DatabaseSchema.kql", "r") as f: + DatabaseSchema = base64.b64encode(f.read().encode()).decode() + + with open(kql_database_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + { + "path": "DatabaseProperties.json", + "payload": DatabaseProperties, + "payloadType": "InlineBase64", + }, + { + "path": "DatabaseSchema.kql", + "payload": DatabaseSchema, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/kql_queryset.py b/src/msfabricutils/rest_api/kql_queryset.py new file mode 100644 index 0000000..7c858ce --- /dev/null +++ b/src/msfabricutils/rest_api/kql_queryset.py @@ -0,0 +1,426 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def kql_queryset_create( + workspace_id: str, + display_name: str, + kql_database_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a kql queryset. + + Args: + workspace_id (str): The id of the workspace to create the kql queryset in. + display_name (str): The display name of the kql queryset. + kql_database_path (str): The path to the kql queryset to load content from. + description (str | None): The description of the kql queryset. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlQuerysets" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(kql_database_path.rstrip("/") + "/RealtimeQueryset.json", "r") as f: + RealtimeQueryset = base64.b64encode(f.read().encode()).decode() + + with open(kql_database_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + { + "path": "RealtimeQueryset.json", + "payload": RealtimeQueryset, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def kql_queryset_get( + workspace_id: str, + kql_queryset_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a kql queryset. + + Args: + workspace_id (str): The id of the workspace to get the kql queryset from. + kql_queryset_id (str): The id of the kql queryset to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_queryset_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List kql querysets for a workspace. + + Args: + workspace_id (str): The id of the workspace to list kql querysets for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlQuerysets" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_queryset_update( + workspace_id: str, + kql_queryset_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a kql queryset. + + Args: + workspace_id (str): The id of the workspace to update. + kql_queryset_id (str): The id of the kql queryset to update. + display_name (str | None): The display name of the kql queryset. + description (str | None): The description of the kql queryset. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_queryset_delete( + workspace_id: str, + kql_queryset_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a kql queryset. + + Args: + workspace_id (str): The id of the workspace to delete. + kql_queryset_id (str): The id of the kql queryset to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def kql_queryset_get_definition( + workspace_id: str, + kql_queryset_id: str, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a kql queryset. + + Args: + workspace_id (str): The id of the workspace to get the kql queryset definition from. + kql_queryset_id (str): The id of the kql queryset to get the definition from. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id}/getDefinition" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def kql_queryset_update_definition( + workspace_id: str, + kql_queryset_id: str, + kql_queryset_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a kql queryset. + + Args: + workspace_id (str): The id of the workspace to update. + kql_queryset_id (str): The id of the kql queryset to update. + kql_queryset_path (str): The path to the kql queryset to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/kqlQuerysets/{kql_queryset_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(kql_queryset_path.rstrip("/") + "/RealtimeQueryset.json", "r") as f: + RealtimeQueryset = base64.b64encode(f.read().encode()).decode() + + with open(kql_queryset_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + { + "path": "RealtimeQueryset.json", + "payload": RealtimeQueryset, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/lakehouse.py b/src/msfabricutils/rest_api/lakehouse.py new file mode 100644 index 0000000..6e81cea --- /dev/null +++ b/src/msfabricutils/rest_api/lakehouse.py @@ -0,0 +1,490 @@ +import json +from typing import List + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def lakehouse_create( + workspace_id: str, + display_name: str, + description: str = None, + enable_schemas: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a lakehouse. + + Args: + workspace_id (str): The id of the workspace to create the lakehouse in. + display_name (str): The display name of the lakehouse. + description (str | None): The description of the lakehouse. + enable_schemas (bool | None): Whether the lakehouse is schema enabled. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/lakehouses" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + if enable_schemas is True: + custom_payload = { + "enableSchemas": True, + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def lakehouse_get( + workspace_id: str, + lakehouse_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a lakehouse. + + Args: + workspace_id (str): The id of the workspace to get the lakehouse from. + lakehouse_id (str): The id of the lakehouse to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def lakehouse_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List lakehouses for a workspace. + + Args: + workspace_id (str): The id of the workspace to list lakehouses for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/lakehouses" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def lakehouse_update( + workspace_id: str, + lakehouse_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a lakehouse. + + Args: + workspace_id (str): The id of the workspace to update. + lakehouse_id (str): The id of the lakehouse to update. + display_name (str | None): The display name of the lakehouse. + description (str | None): The description of the lakehouse. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def lakehouse_delete( + workspace_id: str, + lakehouse_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a lakehouse. + + Args: + workspace_id (str): The id of the workspace to delete. + lakehouse_id (str): The id of the lakehouse to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def lakehouse_run_background_job( + workspace_id: str, + lakehouse_id: str, + job_type: str, + table_name: str, + schema_name: str = None, + v_order: bool = None, + z_order_columns: List[str] = None, + retention_period: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Run on-demand table maintenance job instance. + + Args: + workspace_id (str): The id of the workspace to create a job for. + lakehouse_id (str): The id of the lakehouse to create a job for. + job_type (str): The type of the job to create. Must be \"TableMaintenance\". + table_name (str): The name of the table to run the job on. + schema_name (str | None): The name of the schema to run the job on. Only applicable for schema enabled lakehouses. + v_order (bool | None): If table should be v-ordered. + z_order_columns (List[str] | None): List of columns to z-order by. + retention_period (str | None): Retention periode in format d:hh:mm:ss. Overrides the default retention period. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}/jobs/instances" # noqa + url = f"{url}?" + if job_type is not None: + url = f"{url}jobType={job_type}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + custom_payload = { + "executionData": { + "tableName": table_name, + "schemaName": schema_name, + "optimizeSettings": {"vOrder": v_order, "zOrderBy": z_order_columns}, + "vacuumSettings": {"retentionPeriod": retention_period}, + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def lakehouse_list_tables( + workspace_id: str, + lakehouse_id: str, + continuation_token: str = None, + max_results: int = None, + preview: bool = True, +) -> requests.Response: + """ + List tables in a lakehouse. + + Args: + workspace_id (str): The id of the workspace to list tables for. + lakehouse_id (str): The id of the lakehouse to list tables for. + continuation_token (str | None): A token for retrieving the next page of results. + max_results (int | None): The maximum number of results to return. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}/tables" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + if max_results is not None: + url = f"{url}maxResults={max_results}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def lakehouse_load_table( + workspace_id: str, + lakehouse_id: str, + table_name: str, + relative_path: str, + path_type: str, + format: str = None, + header: bool = None, + delimiter: str = None, + mode: str = None, + file_extension: str = None, + recursive: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Load a table. + + Args: + workspace_id (str): The id of the workspace to load the table for. + lakehouse_id (str): The id of the lakehouse to load the table for. + table_name (str): The name of the table to load. + relative_path (str): The relative path to the table to load. + path_type (str): The type of the path to load. Either \"File\" or \"Folder\". + format (str | None): The format of the files to load. Must be \"Parquet\" or \"Csv\". + header (bool | None): Whether the file has a header row. Only applicable for csv files. + delimiter (str | None): The delimiter of the csv files. Only applicable for csv files. + mode (str | None): The mode to load the table in. Either \"Overwrite\" or \"Append\". + file_extension (str | None): The file extension of the files to load. + recursive (bool | None): Whether to search data files recursively or not, when loading from a folder. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/lakehouses/{lakehouse_id}/tables/{table_name}/load" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["relativePath"] = relative_path + data["pathType"] = path_type + data["format"] = format + data["header"] = header + data["delimiter"] = delimiter + data["mode"] = mode + data["fileExtension"] = file_extension + data["recursive"] = recursive + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/long_running_operation.py b/src/msfabricutils/rest_api/long_running_operation.py new file mode 100644 index 0000000..5bd4cd5 --- /dev/null +++ b/src/msfabricutils/rest_api/long_running_operation.py @@ -0,0 +1,96 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values + + +def long_running_operation_get_state( + operation_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get the state of the long running operation. + + Args: + operation_id (str): The ID of the long running operation. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/operations/{operation_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def long_running_operation_get_result( + operation_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get the result of the long running operation. Only available when the operation status is `Succeeded`. + + Args: + operation_id (str): The ID of the long running operation. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/operations/{operation_id}/result" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/mirrored_database.py b/src/msfabricutils/rest_api/mirrored_database.py new file mode 100644 index 0000000..90c5c70 --- /dev/null +++ b/src/msfabricutils/rest_api/mirrored_database.py @@ -0,0 +1,426 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def mirrored_database_create( + workspace_id: str, + display_name: str, + mirrored_database_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a mirrored database. + + Args: + workspace_id (str): The id of the workspace to create the mirrored database in. + display_name (str): The display name of the mirrored database. + mirrored_database_path (str): The path to the mirrored database to load content from. + description (str | None): The description of the mirrored database. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mirroredDatabases" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(mirrored_database_path.rstrip("/") + "/mirroredDatabase.json", "r") as f: + mirroredDatabase = base64.b64encode(f.read().encode()).decode() + + with open(mirrored_database_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + { + "path": "mirroredDatabase.json", + "payload": mirroredDatabase, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def mirrored_database_get( + workspace_id: str, + mirrored_database_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a mirrored database. + + Args: + workspace_id (str): The id of the workspace to get the mirrored database from. + mirrored_database_id (str): The id of the mirrored database to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def mirrored_database_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List mirrored databases for a workspace. + + Args: + workspace_id (str): The id of the workspace to list mirrored databases for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mirroredDatabases" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def mirrored_database_update( + workspace_id: str, + mirrored_database_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a mirrored database. + + Args: + workspace_id (str): The id of the workspace to update. + mirrored_database_id (str): The id of the mirrored database to update. + display_name (str | None): The display name of the mirrored database. + description (str | None): The description of the mirrored database. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def mirrored_database_delete( + workspace_id: str, + mirrored_database_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a mirrored database. + + Args: + workspace_id (str): The id of the workspace to delete. + mirrored_database_id (str): The id of the mirrored database to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def mirrored_database_get_definition( + workspace_id: str, + mirrored_database_id: str, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a mirrored database. + + Args: + workspace_id (str): The id of the workspace to get the mirrored database definition from. + mirrored_database_id (str): The id of the mirrored database to get the definition from. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id}/getDefinition" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def mirrored_database_update_definition( + workspace_id: str, + mirrored_database_id: str, + mirrored_database_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a mirrored database. + + Args: + workspace_id (str): The id of the workspace to update. + mirrored_database_id (str): The id of the mirrored database to update. + mirrored_database_path (str): The path to the mirrored database to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mirroredDatabases/{mirrored_database_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(mirrored_database_path.rstrip("/") + "/mirroredDatabase.json", "r") as f: + mirroredDatabase = base64.b64encode(f.read().encode()).decode() + + with open(mirrored_database_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + { + "path": "mirroredDatabase.json", + "payload": mirroredDatabase, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/mirrored_warehouse.py b/src/msfabricutils/rest_api/mirrored_warehouse.py new file mode 100644 index 0000000..461d58a --- /dev/null +++ b/src/msfabricutils/rest_api/mirrored_warehouse.py @@ -0,0 +1,56 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values + + +def mirrored_warehouse_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List mirrored warehouses for a workspace. + + Args: + workspace_id (str): The id of the workspace to list mirrored warehouses for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mirroredWarehouses" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/ml_experiment.py b/src/msfabricutils/rest_api/ml_experiment.py new file mode 100644 index 0000000..f07ed64 --- /dev/null +++ b/src/msfabricutils/rest_api/ml_experiment.py @@ -0,0 +1,261 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def ml_experiment_create( + workspace_id: str, + display_name: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a ml experiment. + + Args: + workspace_id (str): The id of the workspace to create the ml experiment in. + display_name (str): The display name of the ml experiment. + description (str | None): The description of the ml experiment. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlExperiments" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def ml_experiment_get( + workspace_id: str, + ml_experiment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a ml experiment. + + Args: + workspace_id (str): The id of the workspace to get the ml experiment from. + ml_experiment_id (str): The id of the ml experiment to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlExperiments/{ml_experiment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def ml_experiment_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List ml experiments for a workspace. + + Args: + workspace_id (str): The id of the workspace to list data pipelines for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlExperiments" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def ml_experiment_update( + workspace_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a ml experiment. + + Args: + workspace_id (str): The id of the workspace to update the ml experiment in. + display_name (str | None): The display name of the ml experiment. + description (str | None): The description of the ml experiment. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlExperiments/{ml_experiment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def ml_experiment_delete( + workspace_id: str, + ml_experiment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a ml experiment. + + Args: + workspace_id (str): The id of the workspace to delete the ml experiment from. + ml_experiment_id (str): The id of the ml experiment to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlExperiments/{ml_experiment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/ml_model.py b/src/msfabricutils/rest_api/ml_model.py new file mode 100644 index 0000000..96880fe --- /dev/null +++ b/src/msfabricutils/rest_api/ml_model.py @@ -0,0 +1,261 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def ml_model_create( + workspace_id: str, + display_name: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a ml model. + + Args: + workspace_id (str): The id of the workspace to create the ml model in. + display_name (str): The display name of the ml model. + description (str | None): The description of the ml model. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlModels" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def ml_model_get( + workspace_id: str, + ml_model_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a ml model. + + Args: + workspace_id (str): The id of the workspace to get the ml model from. + ml_model_id (str): The id of the ml model to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlModels/{ml_model_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def ml_model_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List ml models for a workspace. + + Args: + workspace_id (str): The id of the workspace to list data pipelines for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlModels" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def ml_model_update( + workspace_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a ml model. + + Args: + workspace_id (str): The id of the workspace to update the ml model in. + display_name (str | None): The display name of the ml model. + description (str | None): The description of the ml model. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlModels/{ml_model_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def ml_model_delete( + workspace_id: str, + ml_model_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a ml model. + + Args: + workspace_id (str): The id of the workspace to delete the ml model from. + ml_model_id (str): The id of the ml model to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/mlModels/{ml_model_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/notebook.py b/src/msfabricutils/rest_api/notebook.py new file mode 100644 index 0000000..5e3bcfe --- /dev/null +++ b/src/msfabricutils/rest_api/notebook.py @@ -0,0 +1,432 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def notebook_create( + workspace_id: str, + display_name: str, + notebook_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a notebook. + + Args: + workspace_id (str): The id of the workspace to create the notebook in. + display_name (str): The display name of the notebook. + notebook_path (str): The path to the notebook to load content from. + description (str | None): The description of the notebook. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/notebooks" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(notebook_path.rstrip("/") + "/notebook-content.py", "r") as f: + notebook_content = base64.b64encode(f.read().encode()).decode() + + with open(notebook_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "format": "ipynb", + "parts": [ + { + "path": "notebook-content.py", + "payload": notebook_content, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ], + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def notebook_get( + workspace_id: str, + notebook_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a notebook. + + Args: + workspace_id (str): The id of the workspace to get the notebook from. + notebook_id (str): The id of the notebook to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/notebooks/{notebook_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def notebook_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List notebooks for a workspace. + + Args: + workspace_id (str): The id of the workspace to list notebooks for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/notebooks" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def notebook_update( + workspace_id: str, + notebook_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a notebook. + + Args: + workspace_id (str): The id of the workspace to update. + notebook_id (str): The id of the notebook to update. + display_name (str | None): The display name of the notebook. + description (str | None): The description of the notebook. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/notebooks/{notebook_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def notebook_delete( + workspace_id: str, + notebook_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a notebook. + + Args: + workspace_id (str): The id of the workspace to delete. + notebook_id (str): The id of the notebook to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/notebooks/{notebook_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def notebook_get_definition( + workspace_id: str, + notebook_id: str, + format: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a notebook. + + Args: + workspace_id (str): The id of the workspace to get the notebook definition from. + notebook_id (str): The id of the notebook to get the definition from. + format (str | None): The format of the Notebook definition. Supported format is \"ipynb\". + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/notebooks/{notebook_id}/getDefinition" # noqa + url = f"{url}?" + if format is not None: + url = f"{url}format={format}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def notebook_update_definition( + workspace_id: str, + notebook_id: str, + notebook_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a notebook. + + Args: + workspace_id (str): The id of the workspace to update. + notebook_id (str): The id of the notebook to update. + notebook_path (str): The path to the notebook to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/notebooks/{notebook_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(notebook_path.rstrip("/") + "/notebook-content.py", "r") as f: + notebook_content = base64.b64encode(f.read().encode()).decode() + + with open(notebook_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "format": "ipynb", + "parts": [ + { + "path": "notebook-content.py", + "payload": notebook_content, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ], + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/paginated_report.py b/src/msfabricutils/rest_api/paginated_report.py new file mode 100644 index 0000000..8de8601 --- /dev/null +++ b/src/msfabricutils/rest_api/paginated_report.py @@ -0,0 +1,104 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values + + +def paginated_report_list( + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List paginated reports for a workspace. + + Args: + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/paginated-reports" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def paginated_report_update( + paginated_report_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a paginated report. + + Args: + paginated_report_id (str): The id of the paginated report to update. + display_name (str | None): The display name of the paginated report. + description (str | None): The description of the paginated report. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/paginated-reports/{paginated_report_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/reflex.py b/src/msfabricutils/rest_api/reflex.py new file mode 100644 index 0000000..eda076f --- /dev/null +++ b/src/msfabricutils/rest_api/reflex.py @@ -0,0 +1,430 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def reflex_create( + workspace_id: str, + display_name: str, + reflex_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a reflex. + + Args: + workspace_id (str): The id of the workspace to create the reflex in. + display_name (str): The display name of the reflex. + reflex_path (str): The path to the reflex to load content from. + description (str | None): The description of the reflex. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reflexes" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(reflex_path.rstrip("/") + "/ReflexEntities.json", "r") as f: + ReflexEntities = base64.b64encode(f.read().encode()).decode() + + with open(reflex_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + { + "path": "ReflexEntities.json", + "payload": ReflexEntities, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def reflex_get( + workspace_id: str, + reflex_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a reflex. + + Args: + workspace_id (str): The id of the workspace to get the reflex from. + reflex_id (str): The id of the reflex to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reflexes/{reflex_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def reflex_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List reflexes for a workspace. + + Args: + workspace_id (str): The id of the workspace to list reflexes for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reflexes" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def reflex_update( + workspace_id: str, + reflex_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a reflex. + + Args: + workspace_id (str): The id of the workspace to update. + reflex_id (str): The id of the reflex to update. + display_name (str | None): The display name of the reflex. + description (str | None): The description of the reflex. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reflexes/{reflex_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def reflex_delete( + workspace_id: str, + reflex_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a reflex. + + Args: + workspace_id (str): The id of the workspace to delete. + reflex_id (str): The id of the reflex to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reflexes/{reflex_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def reflex_get_definition( + workspace_id: str, + reflex_id: str, + format: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a reflex. + + Args: + workspace_id (str): The id of the workspace to get the reflex definition from. + reflex_id (str): The id of the reflex to get the definition from. + format (str | None): The format of the reflex definition. Supported format is \"ipynb\". + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reflexes/{reflex_id}/getDefinition" # noqa + url = f"{url}?" + if format is not None: + url = f"{url}format={format}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def reflex_update_definition( + workspace_id: str, + reflex_id: str, + reflex_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a reflex. + + Args: + workspace_id (str): The id of the workspace to update. + reflex_id (str): The id of the reflex to update. + reflex_path (str): The path to the reflex to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reflexes/{reflex_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(reflex_path.rstrip("/") + "/ReflexEntities.json", "r") as f: + ReflexEntities = base64.b64encode(f.read().encode()).decode() + + with open(reflex_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + { + "path": "ReflexEntities.json", + "payload": ReflexEntities, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/report.py b/src/msfabricutils/rest_api/report.py new file mode 100644 index 0000000..8dd9dde --- /dev/null +++ b/src/msfabricutils/rest_api/report.py @@ -0,0 +1,430 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def report_create( + workspace_id: str, + display_name: str, + report_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a report. + + Args: + workspace_id (str): The id of the workspace to create the report in. + display_name (str): The display name of the report. + report_path (str): The path to the report to load content from. + description (str | None): The description of the report. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reports" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(report_path.rstrip("/") + "/definition.pbir", "r") as f: + definition = base64.b64encode(f.read().encode()).decode() + + with open(report_path.rstrip("/") + "/report.json", "r") as f: + report = base64.b64encode(f.read().encode()).decode() + + with open(report_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + {"path": "definition.pbir", "payload": definition, "payloadType": "InlineBase64"}, + {"path": "report.json", "payload": report, "payloadType": "InlineBase64"}, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def report_get( + workspace_id: str, + report_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a report. + + Args: + workspace_id (str): The id of the workspace to get the report from. + report_id (str): The id of the report to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reports/{report_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def report_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List reports for a workspace. + + Args: + workspace_id (str): The id of the workspace to list reports for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reports" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def report_update( + workspace_id: str, + report_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a report. + + Args: + workspace_id (str): The id of the workspace to update. + report_id (str): The id of the report to update. + display_name (str | None): The display name of the report. + description (str | None): The description of the report. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reports/{report_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def report_delete( + workspace_id: str, + report_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a report. + + Args: + workspace_id (str): The id of the workspace to delete. + report_id (str): The id of the report to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reports/{report_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def report_get_definition( + workspace_id: str, + report_id: str, + format: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a report. + + Args: + workspace_id (str): The id of the workspace to get the report definition from. + report_id (str): The id of the report to get the definition from. + format (str | None): The format of the report definition. Supported format is \"ipynb\". + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reports/{report_id}/getDefinition" # noqa + url = f"{url}?" + if format is not None: + url = f"{url}format={format}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def report_update_definition( + workspace_id: str, + report_id: str, + report_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a report. + + Args: + workspace_id (str): The id of the workspace to update. + report_id (str): The id of the report to update. + report_path (str): The path to the report to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/reports/{report_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(report_path.rstrip("/") + "/definition.pbir", "r") as f: + definition = base64.b64encode(f.read().encode()).decode() + + with open(report_path.rstrip("/") + "/report.json", "r") as f: + report = base64.b64encode(f.read().encode()).decode() + + with open(report_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + {"path": "definition.pbir", "payload": definition, "payloadType": "InlineBase64"}, + {"path": "report.json", "payload": report, "payloadType": "InlineBase64"}, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/semantic_model.py b/src/msfabricutils/rest_api/semantic_model.py new file mode 100644 index 0000000..9c0f484 --- /dev/null +++ b/src/msfabricutils/rest_api/semantic_model.py @@ -0,0 +1,430 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def semantic_model_create( + workspace_id: str, + display_name: str, + semantic_model_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a semantic model. + + Args: + workspace_id (str): The id of the workspace to create the semantic model in. + display_name (str): The display name of the semantic model. + semantic_model_path (str): The path to the semantic model to load content from. + description (str | None): The description of the semantic model. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/semanticModels" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(semantic_model_path.rstrip("/") + "/definition.pbism", "r") as f: + definition = base64.b64encode(f.read().encode()).decode() + + with open(semantic_model_path.rstrip("/") + "/model.bim", "r") as f: + model = base64.b64encode(f.read().encode()).decode() + + with open(semantic_model_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + {"path": "definition.pbism", "payload": definition, "payloadType": "InlineBase64"}, + {"path": "model.bim", "payload": model, "payloadType": "InlineBase64"}, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def semantic_model_get( + workspace_id: str, + semantic_model_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a semantic model. + + Args: + workspace_id (str): The id of the workspace to get the semantic model from. + semantic_model_id (str): The id of the semantic model to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/semanticModels/{semantic_model_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def semantic_model_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List semantic models for a workspace. + + Args: + workspace_id (str): The id of the workspace to list semantic models for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/semanticModels" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def semantic_model_update( + workspace_id: str, + semantic_model_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a semantic model. + + Args: + workspace_id (str): The id of the workspace to update. + semantic_model_id (str): The id of the semantic model to update. + display_name (str | None): The display name of the semantic model. + description (str | None): The description of the semantic model. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/semanticModels/{semantic_model_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def semantic_model_delete( + workspace_id: str, + semantic_model_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a semantic model. + + Args: + workspace_id (str): The id of the workspace to delete. + semantic_model_id (str): The id of the semantic model to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/semanticModels/{semantic_model_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def semantic_model_get_definition( + workspace_id: str, + semantic_model_id: str, + format: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a semantic model. + + Args: + workspace_id (str): The id of the workspace to get the semantic model definition from. + semantic_model_id (str): The id of the semantic model to get the definition from. + format (str | None): The format of the semantic model definition. Supported format is \"ipynb\". + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/semanticModels/{semantic_model_id}/getDefinition" # noqa + url = f"{url}?" + if format is not None: + url = f"{url}format={format}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def semantic_model_update_definition( + workspace_id: str, + semantic_model_id: str, + semantic_model_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a semantic model. + + Args: + workspace_id (str): The id of the workspace to update. + semantic_model_id (str): The id of the semantic model to update. + semantic_model_path (str): The path to the semantic model to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/semanticModels/{semantic_model_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(semantic_model_path.rstrip("/") + "/definition.pbism", "r") as f: + definition = base64.b64encode(f.read().encode()).decode() + + with open(semantic_model_path.rstrip("/") + "/model.bim", "r") as f: + model = base64.b64encode(f.read().encode()).decode() + + with open(semantic_model_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + {"path": "definition.pbism", "payload": definition, "payloadType": "InlineBase64"}, + {"path": "model.bim", "payload": model, "payloadType": "InlineBase64"}, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/spark_job_definition.py b/src/msfabricutils/rest_api/spark_job_definition.py new file mode 100644 index 0000000..a293d0d --- /dev/null +++ b/src/msfabricutils/rest_api/spark_job_definition.py @@ -0,0 +1,488 @@ +import base64 +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def spark_job_definition_create( + workspace_id: str, + display_name: str, + spark_job_definition_path: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a spark job definition. + + Args: + workspace_id (str): The id of the workspace to create the spark job definition in. + display_name (str): The display name of the spark job definition. + spark_job_definition_path (str): The path to the spark job definition to load content from. + description (str | None): The description of the spark job definition. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sparkJobDefinitions" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(spark_job_definition_path.rstrip("/") + "/SparkJobDefinitionV1.json", "r") as f: + SparkJobDefinitionV1 = base64.b64encode(f.read().encode()).decode() + + with open(spark_job_definition_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + data["displayName"] = display_name + data["description"] = description + custom_payload = { + "definition": { + "parts": [ + { + "path": "SparkJobDefinitionV1.json", + "payload": SparkJobDefinitionV1, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def spark_job_definition_get( + workspace_id: str, + spark_job_definition_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a spark job definition. + + Args: + workspace_id (str): The id of the workspace to get the spark job definition from. + spark_job_definition_id (str): The id of the spark job definition to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def spark_job_definition_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List spark job definitions for a workspace. + + Args: + workspace_id (str): The id of the workspace to list spark job definitions for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sparkJobDefinitions" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def spark_job_definition_update( + workspace_id: str, + spark_job_definition_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a spark job definition. + + Args: + workspace_id (str): The id of the workspace to update. + spark_job_definition_id (str): The id of the spark job definition to update. + display_name (str | None): The display name of the spark job definition. + description (str | None): The description of the spark job definition. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def spark_job_definition_delete( + workspace_id: str, + spark_job_definition_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a spark job definition. + + Args: + workspace_id (str): The id of the workspace to delete. + spark_job_definition_id (str): The id of the spark job definition to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def spark_job_definition_get_definition( + workspace_id: str, + spark_job_definition_id: str, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Get the definition of a spark job definition. + + Args: + workspace_id (str): The id of the workspace to get the spark job definition definition from. + spark_job_definition_id (str): The id of the spark job definition to get the definition from. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}/getDefinition" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def spark_job_definition_update_definition( + workspace_id: str, + spark_job_definition_id: str, + spark_job_definition_path: str, + update_metadata: bool = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Update the definition of a spark job definition. + + Args: + workspace_id (str): The id of the workspace to update. + spark_job_definition_id (str): The id of the spark job definition to update. + spark_job_definition_path (str): The path to the spark job definition to load content from. + update_metadata (bool | None): When set to true, the item's metadata is updated using the metadata in the .platform file. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}/updateDefinition" # noqa + url = f"{url}?" + if update_metadata is not None: + url = f"{url}updateMetadata={update_metadata}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + with open(spark_job_definition_path.rstrip("/") + "/SparkJobDefinitionV1.json", "r") as f: + SparkJobDefinitionV1 = base64.b64encode(f.read().encode()).decode() + + with open(spark_job_definition_path.rstrip("/") + "/.platform", "r") as f: + platform = base64.b64encode(f.read().encode()).decode() + + data = {} + custom_payload = { + "definition": { + "parts": [ + { + "path": "SparkJobDefinitionV1.json", + "payload": SparkJobDefinitionV1, + "payloadType": "InlineBase64", + }, + {"path": ".platform", "payload": platform, "payloadType": "InlineBase64"}, + ] + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def spark_job_definition_run_background_job( + workspace_id: str, + spark_job_definition_id: str, + job_type: str, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Run on-demand spark job instance. + + Args: + workspace_id (str): The id of the workspace to create a job for. + spark_job_definition_id (str): The id of the spark job definition to create a job for. + job_type (str): The type of the job to create. Must be \"sparkJob\". + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sparkJobDefinitions/{spark_job_definition_id}/jobs/instances" # noqa + url = f"{url}?" + if job_type is not None: + url = f"{url}jobType={job_type}&" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/sql_endpoint.py b/src/msfabricutils/rest_api/sql_endpoint.py new file mode 100644 index 0000000..b7e30ca --- /dev/null +++ b/src/msfabricutils/rest_api/sql_endpoint.py @@ -0,0 +1,56 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values + + +def sql_endpoint_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List SQL endpoints for a workspace. + + Args: + workspace_id (str): The id of the workspace to list SQL endpoints for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/sqlEndpoints" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/warehouse.py b/src/msfabricutils/rest_api/warehouse.py new file mode 100644 index 0000000..8fbdaa0 --- /dev/null +++ b/src/msfabricutils/rest_api/warehouse.py @@ -0,0 +1,261 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def warehouse_create( + workspace_id: str, + display_name: str, + description: str = None, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Create a warehouse. + + Args: + workspace_id (str): The id of the workspace to create the warehouse in. + display_name (str): The display name of the warehouse. + description (str | None): The description of the warehouse. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/warehouses" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response + + +def warehouse_get( + workspace_id: str, + warehouse_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a warehouse. + + Args: + workspace_id (str): The id of the workspace to get the warehouse from. + warehouse_id (str): The id of the warehouse to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/warehouses/{warehouse_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def warehouse_list( + workspace_id: str, + continuation_token: str = None, + preview: bool = True, +) -> requests.Response: + """ + List warehouses for a workspace. + + Args: + workspace_id (str): The id of the workspace to list warehouses for. + continuation_token (str | None): A token for retrieving the next page of results. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/warehouses" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def warehouse_update( + workspace_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a warehouse. + + Args: + workspace_id (str): The id of the workspace to update the warehouse in. + display_name (str | None): The display name of the warehouse. + description (str | None): The description of the warehouse. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/warehouses/{warehouse_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def warehouse_delete( + workspace_id: str, + warehouse_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a warehouse. + + Args: + workspace_id (str): The id of the workspace to delete the warehouse from. + warehouse_id (str): The id of the warehouse to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/warehouses/{warehouse_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response diff --git a/src/msfabricutils/rest_api/workspace.py b/src/msfabricutils/rest_api/workspace.py new file mode 100644 index 0000000..e31d488 --- /dev/null +++ b/src/msfabricutils/rest_api/workspace.py @@ -0,0 +1,679 @@ +import json + +import requests +import typer + +from msfabricutils import get_fabric_bearer_token +from msfabricutils.common.remove_none import remove_none +from msfabricutils.common.shorten_dict_values import shorten_dict_values +from msfabricutils.core.operations import wait_for_long_running_operation + + +def workspace_create( + display_name: str, + description: str = None, + capacity_id: str = None, + preview: bool = True, +) -> requests.Response: + """ + Create a workspace. + + Args: + display_name (str): The display name of the workspace. + description (str | None): The description of the workspace. + capacity_id (str | None): The capacity id to assign the workspace to. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + data["capacityId"] = capacity_id + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_get( + workspace_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a workspace. + + Args: + workspace_id (str): The id of the workspace to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_list( + continuation_token: str = None, + roles: str = None, + preview: bool = True, +) -> requests.Response: + """ + List workspaces. + + Args: + continuation_token (str | None): A token for retrieving the next page of results. + roles (str | None): A list of roles. Separate values using a comma. If not provided, all workspaces are returned. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces" # noqa + url = f"{url}?" + if continuation_token is not None: + url = f"{url}continuationToken={continuation_token}&" + if roles is not None: + url = f"{url}roles={roles}&" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_update( + workspace_id: str, + display_name: str = None, + description: str = None, + preview: bool = True, +) -> requests.Response: + """ + Update a workspace. + + Args: + workspace_id (str): The id of the workspace to update. + display_name (str | None): The display name of the workspace. + description (str | None): The description of the workspace. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["displayName"] = display_name + data["description"] = description + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_delete( + workspace_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a workspace. + + Args: + workspace_id (str): The id of the workspace to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_assign_to_capacity( + workspace_id: str, + capacity_id: str, + preview: bool = True, +) -> requests.Response: + """ + Assign a workspace to a capacity. + + Args: + workspace_id (str): The id of the workspace to assign to a capacity. + capacity_id (str): The id of the capacity to assign the workspace to. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/assignToCapacity" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["capacityId"] = capacity_id + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_unassign_from_capacity( + workspace_id: str, + preview: bool = True, +) -> requests.Response: + """ + Unassign a workspace from a capacity. + + Args: + workspace_id (str): The id of the workspace to unassign from a capacity. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/unassignFromCapacity" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_add_role_assignment( + workspace_id: str, + role: str, + principal_id: str, + principal_type: str, + preview: bool = True, +) -> requests.Response: + """ + Add a role assignment to a workspace. + + Args: + workspace_id (str): The id of the workspace to add a role assignment to. + role (str): The role to add to the workspace. + principal_id (str): The id of the principal. + principal_type (str): The type of the principal. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/roleAssignments" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + data["role"] = role + custom_payload = { + "principal": { + "id": principal_id, + "type": principal_type, + } + } + data = {**data, **custom_payload} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_get_role_assignment( + workspace_id: str, + role_assignment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Get a role assignment for a workspace. + + Args: + workspace_id (str): The id of the workspace to get a role assignment for. + role_assignment_id (str): The id of the role assignment to get. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/roleAssignments/{role_assignment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_list_role_assignments( + workspace_id: str, + preview: bool = True, +) -> requests.Response: + """ + List role assignments for a workspace. + + Args: + workspace_id (str): The id of the workspace to list role assignments for. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/roleAssignments" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "get" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_update_role_assignment( + workspace_id: str, + role_assignment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Update a role assignment for a workspace. + + Args: + workspace_id (str): The workspace ID. + role_assignment_id (str): The workspace role assignment ID. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/roleAssignments/{role_assignment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "patch" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_delete_role_assignment( + workspace_id: str, + role_assignment_id: str, + preview: bool = True, +) -> requests.Response: + """ + Delete a role assignment from a workspace. + + Args: + workspace_id (str): The id of the workspace to add a role assignment to. + role_assignment_id (str): The id of the role assignment to delete. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/roleAssignments/{role_assignment_id}" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "delete" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_deprovision_identity( + workspace_id: str, + preview: bool = True, +) -> requests.Response: + """ + Deprovision an identity from a workspace. + + Args: + workspace_id (str): The id of the workspace to deprovision an identity from. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/deprovisionIdentity" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case _: + return response + + +def workspace_provision_identity( + workspace_id: str, + await_lro: bool = None, + timeout: int = 60 * 5, + preview: bool = True, +) -> requests.Response: + """ + Provision an identity to a workspace. + + Args: + workspace_id (str): The ID of the workspace. + await_lro (bool | None): Whether to await the long running operation. + timeout (int): Timeout for the long running operation (seconds). Defaults to 5 minutes. + preview (bool): Whether to preview the request. You will be asked to confirm the request before it is executed. Defaults to True. + + Returns: + The response from the request. + """ + + url = f"https://api.fabric.microsoft.com/v1/workspaces/{workspace_id}/provisionIdentity" # noqa + url = f"{url}?" + url = url.rstrip("&?") + + method = "post" + token = get_fabric_bearer_token() + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {token}"} + + data = {} + + data = remove_none(data) + + if preview: + typer.echo(f"Method:\n{method.upper()}\n") + typer.echo(f"URL:\n{url}\n") + typer.echo(f"Data:\n{json.dumps(shorten_dict_values(data, 35), indent=2)}\n") + typer.echo(f"Headers:\n{json.dumps(shorten_dict_values(headers, 35), indent=2)}\n") + typer.confirm("Do you want to run the command?", abort=True) + + response = requests.request(method=method, url=url, json=data, headers=headers) + # response.raise_for_status() + + match response.status_code: + case 200 | 201: + return response + case 202: + if await_lro is True: + operation_id = response.headers["x-ms-operation-id"] + retry_after = response.headers["Retry-After"] + return wait_for_long_running_operation( + operation_id=operation_id, retry_after=retry_after, timeout=timeout + ) + return response + case _: + return response