This is an automated email from the ASF dual-hosted git repository.

guanmingchiu pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/mahout.git


The following commit(s) were added to refs/heads/main by this push:
     new 64c437b57 Re-enable ruff on ipynb (#1149)
64c437b57 is described below

commit 64c437b572acef4846aa075d038f6170e919f3b3
Author: Tim Hsiung <[email protected]>
AuthorDate: Sun Mar 8 23:30:08 2026 +0800

    Re-enable ruff on ipynb (#1149)
---
 examples/qdp/simple.ipynb                          | 311 ++++++++++---------
 pyproject.toml                                     |   1 -
 .../benchmark/notebooks/mahout_benchmark.ipynb     | 333 +++++++++++----------
 3 files changed, 330 insertions(+), 315 deletions(-)

diff --git a/examples/qdp/simple.ipynb b/examples/qdp/simple.ipynb
index fa9146d42..fb8b98c52 100644
--- a/examples/qdp/simple.ipynb
+++ b/examples/qdp/simple.ipynb
@@ -1,154 +1,169 @@
 {
-  "cells": [
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "collapsed": true,
-        "id": "y5xLkFQ4sLOV",
-        "outputId": "b3b21a29-a232-4cf0-ef94-4b2be060f48b"
-      },
-      "outputs": [],
-      "source": [
-        "%pip install qumat[qdp]"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "id": "ZvmpEUJFscx-",
-        "outputId": "0c70d8eb-c7b4-4a87-914f-95c01d3fb26c"
-      },
-      "outputs": [],
-      "source": [
-        "\"\"\"\n",
-        "QDP + QML: Full GPU Pipeline (float64)\n",
-        "CPU → GPU (QDP batch encode) → GPU (real projection) → GPU (QML 
training)\n",
-        "\"\"\"\n",
-        "\n",
-        "import torch\n",
-        "import torch.nn as nn\n",
-        "import torch.optim as optim\n",
-        "from qumat.qdp import QdpEngine\n",
-        "\n",
-        "# ─────────────────────────────────────────────\n",
-        "# 1. Setup\n",
-        "# ─────────────────────────────────────────────\n",
-        "DEVICE_ID = 0\n",
-        "TORCH_DEVICE = torch.device(\"cuda\", DEVICE_ID)\n",
-        "NUM_QUBITS = 2\n",
-        "EPOCHS = 60\n",
-        "LR = 0.01\n",
-        "\n",
-        "engine = QdpEngine(DEVICE_ID)\n",
-        "\n",
-        "# ─────────────────────────────────────────────\n",
-        "# 2. Raw Data on CPU — float64\n",
-        "# ─────────────────────────────────────────────\n",
-        "raw = torch.tensor([\n",
-        "    [0.5, 0.5, 0.5, 0.5],\n",
-        "    [0.7, 0.1, 0.5, 0.3],\n",
-        "    [0.1, 0.8, 0.4, 0.4],\n",
-        "    [0.6, 0.2, 0.6, 0.4],\n",
-        "], dtype=torch.float64)   # ← float64\n",
-        "\n",
-        "labels = torch.tensor([0, 1, 0, 1], dtype=torch.float64, 
device=TORCH_DEVICE)\n",
-        "\n",
-        "# ─────────────────────────────────────────────\n",
-        "# 3. CPU → GPU: QDP Batch Encode\n",
-        "# ─────────────────────────────────────────────\n",
-        "print(\"CPU → GPU: Batch encoding with QDP...\")\n",
-        "cuda_batch = raw.cuda()\n",
-        "\n",
-        "qtensor = engine.encode(cuda_batch, num_qubits=NUM_QUBITS, 
encoding_method=\"amplitude\")\n",
-        "\n",
-        "# DLPack → complex128 CUDA tensor (two float64s per element)\n",
-        "X_complex = torch.from_dlpack(qtensor)\n",
-        "print(f\"Raw encoded: shape={X_complex.shape}, 
dtype={X_complex.dtype}, device={X_complex.device}\")\n",
-        "\n",
-        "# Concatenate real + imag → float64 [N, 8], stays on GPU\n",
-        "X_quantum = torch.cat([X_complex.real, X_complex.imag], 
dim=-1).double()\n",
-        "print(f\"Real features: shape={X_quantum.shape}, 
dtype={X_quantum.dtype}, device={X_quantum.device}\")\n",
-        "\n",
-        "# ─────────────────────────────────────────────\n",
-        "# 4. QML Model on GPU — double precision\n",
-        "# ─────────────────────────────────────────────\n",
-        "class VariationalLayer(nn.Module):\n",
-        "    def __init__(self, dim):\n",
-        "        super().__init__()\n",
-        "        self.theta = nn.Parameter(torch.randn(dim, 
dtype=torch.float64))\n",
-        "\n",
-        "    def forward(self, x):\n",
-        "        return x * torch.cos(self.theta) + torch.roll(x, 1, dims=-1) 
* torch.sin(self.theta)\n",
-        "\n",
-        "class QMLClassifier(nn.Module):\n",
-        "    def __init__(self, num_qubits):\n",
-        "        super().__init__()\n",
-        "        dim = 2 * (2 ** num_qubits)   # real + imag\n",
-        "        self.layer1 = VariationalLayer(dim)\n",
-        "        self.layer2 = VariationalLayer(dim)\n",
-        "        self.readout = nn.Linear(dim, 1, dtype=torch.float64)\n",
-        "\n",
-        "    def forward(self, x):\n",
-        "        x = torch.tanh(self.layer1(x))\n",
-        "        x = self.layer2(x)\n",
-        "        return torch.sigmoid(self.readout(x)).squeeze(-1)\n",
-        "\n",
-        "model = QMLClassifier(NUM_QUBITS).to(TORCH_DEVICE)\n",
-        "optimizer = optim.Adam(model.parameters(), lr=LR)\n",
-        "loss_fn = nn.BCELoss()\n",
-        "\n",
-        "# ─────────────────────────────────────────────\n",
-        "# 5. GPU Training\n",
-        "# ─────────────────────────────────────────────\n",
-        "print(\"\\nGPU → Training QML model...\")\n",
-        "for epoch in range(1, EPOCHS + 1):\n",
-        "    model.train()\n",
-        "    optimizer.zero_grad()\n",
-        "    preds = model(X_quantum)\n",
-        "    loss = loss_fn(preds, labels)\n",
-        "    loss.backward()\n",
-        "    optimizer.step()\n",
-        "\n",
-        "    if epoch % 10 == 0:\n",
-        "        with torch.no_grad():\n",
-        "            acc = ((preds > 0.5).double() == 
labels).double().mean().item()\n",
-        "        print(f\"Epoch {epoch:3d} | Loss: {loss.item():.6f} | 
Accuracy: {acc:.2f}\")\n",
-        "\n",
-        "# ─────────────────────────────────────────────\n",
-        "# 6. Inference\n",
-        "# ─────────────────────────────────────────────\n",
-        "model.eval()\n",
-        "with torch.no_grad():\n",
-        "    predicted = (model(X_quantum) > 0.5).int()\n",
-        "\n",
-        "print(\"\\n─── Results ───\")\n",
-        "for i, (pred, true) in enumerate(zip(predicted.cpu().tolist(), 
labels.int().cpu().tolist())):\n",
-        "    print(f\"Sample {i}: Predicted={pred}  True={true}  {'✓' if pred 
== true else '✗'}\")"
-      ]
-    }
-  ],
-  "metadata": {
-    "accelerator": "GPU",
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
     "colab": {
-      "gpuType": "T4",
-      "provenance": []
+     "base_uri": "https://localhost:8080/";
     },
-    "kernelspec": {
-      "display_name": "Python 3",
-      "name": "python3"
+    "collapsed": true,
+    "id": "y5xLkFQ4sLOV",
+    "outputId": "b3b21a29-a232-4cf0-ef94-4b2be060f48b"
+   },
+   "outputs": [],
+   "source": [
+    "%pip install qumat[qdp]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/";
     },
-    "language_info": {
-      "name": "python"
-    }
+    "id": "ZvmpEUJFscx-",
+    "outputId": "0c70d8eb-c7b4-4a87-914f-95c01d3fb26c"
+   },
+   "outputs": [],
+   "source": [
+    "\"\"\"\n",
+    "QDP + QML: Full GPU Pipeline (float64)\n",
+    "CPU → GPU (QDP batch encode) → GPU (real projection) → GPU (QML 
training)\n",
+    "\"\"\"\n",
+    "\n",
+    "import torch\n",
+    "import torch.nn as nn\n",
+    "import torch.optim as optim\n",
+    "\n",
+    "from qumat.qdp import QdpEngine\n",
+    "\n",
+    "# ─────────────────────────────────────────────\n",
+    "# 1. Setup\n",
+    "# ─────────────────────────────────────────────\n",
+    "DEVICE_ID = 0\n",
+    "TORCH_DEVICE = torch.device(\"cuda\", DEVICE_ID)\n",
+    "NUM_QUBITS = 2\n",
+    "EPOCHS = 60\n",
+    "LR = 0.01\n",
+    "\n",
+    "engine = QdpEngine(DEVICE_ID)\n",
+    "\n",
+    "# ─────────────────────────────────────────────\n",
+    "# 2. Raw Data on CPU — float64\n",
+    "# ─────────────────────────────────────────────\n",
+    "raw = torch.tensor(\n",
+    "    [\n",
+    "        [0.5, 0.5, 0.5, 0.5],\n",
+    "        [0.7, 0.1, 0.5, 0.3],\n",
+    "        [0.1, 0.8, 0.4, 0.4],\n",
+    "        [0.6, 0.2, 0.6, 0.4],\n",
+    "    ],\n",
+    "    dtype=torch.float64,\n",
+    ")  # ← float64\n",
+    "\n",
+    "labels = torch.tensor([0, 1, 0, 1], dtype=torch.float64, 
device=TORCH_DEVICE)\n",
+    "\n",
+    "# ─────────────────────────────────────────────\n",
+    "# 3. CPU → GPU: QDP Batch Encode\n",
+    "# ─────────────────────────────────────────────\n",
+    "print(\"CPU → GPU: Batch encoding with QDP...\")\n",
+    "cuda_batch = raw.cuda()\n",
+    "\n",
+    "qtensor = engine.encode(cuda_batch, num_qubits=NUM_QUBITS, 
encoding_method=\"amplitude\")\n",
+    "\n",
+    "# DLPack → complex128 CUDA tensor (two float64s per element)\n",
+    "X_complex = torch.from_dlpack(qtensor)\n",
+    "print(\n",
+    "    f\"Raw encoded: shape={X_complex.shape}, dtype={X_complex.dtype}, 
device={X_complex.device}\"\n",
+    ")\n",
+    "\n",
+    "# Concatenate real + imag → float64 [N, 8], stays on GPU\n",
+    "X_quantum = torch.cat([X_complex.real, X_complex.imag], 
dim=-1).double()\n",
+    "print(\n",
+    "    f\"Real features: shape={X_quantum.shape}, dtype={X_quantum.dtype}, 
device={X_quantum.device}\"\n",
+    ")\n",
+    "\n",
+    "\n",
+    "# ─────────────────────────────────────────────\n",
+    "# 4. QML Model on GPU — double precision\n",
+    "# ─────────────────────────────────────────────\n",
+    "class VariationalLayer(nn.Module):\n",
+    "    def __init__(self, dim):\n",
+    "        super().__init__()\n",
+    "        self.theta = nn.Parameter(torch.randn(dim, 
dtype=torch.float64))\n",
+    "\n",
+    "    def forward(self, x):\n",
+    "        return x * torch.cos(self.theta) + torch.roll(x, 1, dims=-1) * 
torch.sin(\n",
+    "            self.theta\n",
+    "        )\n",
+    "\n",
+    "\n",
+    "class QMLClassifier(nn.Module):\n",
+    "    def __init__(self, num_qubits):\n",
+    "        super().__init__()\n",
+    "        dim = 2 * (2**num_qubits)  # real + imag\n",
+    "        self.layer1 = VariationalLayer(dim)\n",
+    "        self.layer2 = VariationalLayer(dim)\n",
+    "        self.readout = nn.Linear(dim, 1, dtype=torch.float64)\n",
+    "\n",
+    "    def forward(self, x):\n",
+    "        x = torch.tanh(self.layer1(x))\n",
+    "        x = self.layer2(x)\n",
+    "        return torch.sigmoid(self.readout(x)).squeeze(-1)\n",
+    "\n",
+    "\n",
+    "model = QMLClassifier(NUM_QUBITS).to(TORCH_DEVICE)\n",
+    "optimizer = optim.Adam(model.parameters(), lr=LR)\n",
+    "loss_fn = nn.BCELoss()\n",
+    "\n",
+    "# ─────────────────────────────────────────────\n",
+    "# 5. GPU Training\n",
+    "# ─────────────────────────────────────────────\n",
+    "print(\"\\nGPU → Training QML model...\")\n",
+    "for epoch in range(1, EPOCHS + 1):\n",
+    "    model.train()\n",
+    "    optimizer.zero_grad()\n",
+    "    preds = model(X_quantum)\n",
+    "    loss = loss_fn(preds, labels)\n",
+    "    loss.backward()\n",
+    "    optimizer.step()\n",
+    "\n",
+    "    if epoch % 10 == 0:\n",
+    "        with torch.no_grad():\n",
+    "            acc = ((preds > 0.5).double() == 
labels).double().mean().item()\n",
+    "        print(f\"Epoch {epoch:3d} | Loss: {loss.item():.6f} | Accuracy: 
{acc:.2f}\")\n",
+    "\n",
+    "# ─────────────────────────────────────────────\n",
+    "# 6. Inference\n",
+    "# ─────────────────────────────────────────────\n",
+    "model.eval()\n",
+    "with torch.no_grad():\n",
+    "    predicted = (model(X_quantum) > 0.5).int()\n",
+    "\n",
+    "print(\"\\n─── Results ───\")\n",
+    "for i, (pred, true) in enumerate(\n",
+    "    zip(predicted.cpu().tolist(), labels.int().cpu().tolist())\n",
+    "):\n",
+    "    print(f\"Sample {i}: Predicted={pred}  True={true}  {'✓' if pred == 
true else '✗'}\")"
+   ]
+  }
+ ],
+ "metadata": {
+  "accelerator": "GPU",
+  "colab": {
+   "gpuType": "T4",
+   "provenance": []
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "name": "python3"
   },
-  "nbformat": 4,
-  "nbformat_minor": 0
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
 }
diff --git a/pyproject.toml b/pyproject.toml
index 8e6dd3c81..6a8ccc0f9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -98,7 +98,6 @@ exclude = [
 allowed-unresolved-imports = ["_qdp", "_qdp.*", "api", "api.*"]
 
 [tool.ruff]
-extend-exclude = ["**/*.ipynb"]
 target-version = "py310"
 
 [tool.ruff.lint]
diff --git a/qdp/qdp-python/benchmark/notebooks/mahout_benchmark.ipynb 
b/qdp/qdp-python/benchmark/notebooks/mahout_benchmark.ipynb
index b1583a500..35091ff8d 100644
--- a/qdp/qdp-python/benchmark/notebooks/mahout_benchmark.ipynb
+++ b/qdp/qdp-python/benchmark/notebooks/mahout_benchmark.ipynb
@@ -1,177 +1,178 @@
 {
-  "cells": [
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "pjstUzDHQHad"
-      },
-      "source": [
-        "## Install environments"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "collapsed": true,
-        "id": "-hkLubLFXs_8",
-        "outputId": "7bdf179b-71ed-455b-ef8e-17969acf1db5"
-      },
-      "outputs": [],
-      "source": [
-        "!sudo apt-get update -y > /dev/null\n",
-        "!sudo apt-get install python3.11 python3.11-dev python3.11-distutils 
libpython3.11-dev > /dev/null\n",
-        "!sudo apt-get install python3.11-venv binfmt-support > /dev/null\n",
-        "!sudo apt-get install python3-pip > /dev/null\n",
-        "!python3 -m pip install --upgrade pip > /dev/null\n",
-        "!python3 -m pip install ipykernel"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "collapsed": true,
-        "id": "_HEpQ4F3C4gV",
-        "outputId": "b45d4dac-f093-4370-f9e9-8b9adc6972c7"
-      },
-      "outputs": [],
-      "source": [
-        "# 1. Install Rust Toolchain\n",
-        "!curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- 
-y\n",
-        "import os\n",
-        "os.environ['PATH'] += \":/root/.cargo/bin\"\n",
-        "\n",
-        "# 2. Verify Installation\n",
-        "!rustc --version\n",
-        "!cargo --version"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "collapsed": true,
-        "id": "ljkluVL5ES4S",
-        "outputId": "8c719c4d-cdcc-4474-f4cf-e1b0b41f63bc"
-      },
-      "outputs": [],
-      "source": [
-        "!curl -LsSf https://astral.sh/uv/install.sh | sh"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "id": "9cgMNKOoEgYm",
-        "outputId": "c2c1c8d4-ac57-415a-d4ae-94dfb422b619"
-      },
-      "outputs": [],
-      "source": [
-        "!nvcc --version"
-      ]
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "pjstUzDHQHad"
+   },
+   "source": [
+    "## Install environments"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/";
     },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "rOja7HAaQL1h"
-      },
-      "source": [
-        "## Install Mahout"
-      ]
+    "collapsed": true,
+    "id": "-hkLubLFXs_8",
+    "outputId": "7bdf179b-71ed-455b-ef8e-17969acf1db5"
+   },
+   "outputs": [],
+   "source": [
+    "!sudo apt-get update -y > /dev/null\n",
+    "!sudo apt-get install python3.11 python3.11-dev python3.11-distutils 
libpython3.11-dev > /dev/null\n",
+    "!sudo apt-get install python3.11-venv binfmt-support > /dev/null\n",
+    "!sudo apt-get install python3-pip > /dev/null\n",
+    "!python3 -m pip install --upgrade pip > /dev/null\n",
+    "!python3 -m pip install ipykernel"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/";
     },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "collapsed": true,
-        "id": "u7Skxs7lDBlq",
-        "outputId": "9b594df0-a8b2-4c6c-8c88-2a76f16c9c1d"
-      },
-      "outputs": [],
-      "source": [
-        "# 1. Clone the repository\n",
-        "!git clone https://github.com/apache/mahout.git\n";,
-        "\n",
-        "# 2. Install Python Dependencies\n",
-        "# We use the requirements file provided in the benchmark folder\n",
-        "%cd /content/mahout/qdp/qdp-python\n",
-        "!uv venv -p python3.11\n",
-        "\n",
-        "!uv sync --group benchmark"
-      ]
+    "collapsed": true,
+    "id": "_HEpQ4F3C4gV",
+    "outputId": "b45d4dac-f093-4370-f9e9-8b9adc6972c7"
+   },
+   "outputs": [],
+   "source": [
+    "# 1. Install Rust Toolchain\n",
+    "!curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- 
-y\n",
+    "import os\n",
+    "\n",
+    "os.environ[\"PATH\"] += \":/root/.cargo/bin\"\n",
+    "\n",
+    "# 2. Verify Installation\n",
+    "!rustc --version\n",
+    "!cargo --version"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/";
     },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "collapsed": true,
-        "id": "qqmfUHGsGm8m",
-        "outputId": "10b2598d-0d0c-47d0-fda8-b5e825c64e77"
-      },
-      "outputs": [],
-      "source": [
-        "!uv pip install matplotlib-inline --python .venv/bin/python"
-      ]
+    "collapsed": true,
+    "id": "ljkluVL5ES4S",
+    "outputId": "8c719c4d-cdcc-4474-f4cf-e1b0b41f63bc"
+   },
+   "outputs": [],
+   "source": [
+    "!curl -LsSf https://astral.sh/uv/install.sh | sh"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/";
     },
-    {
-      "cell_type": "markdown",
-      "metadata": {
-        "id": "hj7sU3yJQeMj"
-      },
-      "source": [
-        "## Run Benchmarks"
-      ]
+    "id": "9cgMNKOoEgYm",
+    "outputId": "c2c1c8d4-ac57-415a-d4ae-94dfb422b619"
+   },
+   "outputs": [],
+   "source": [
+    "!nvcc --version"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "rOja7HAaQL1h"
+   },
+   "source": [
+    "## Install Mahout"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/";
     },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {
-        "colab": {
-          "base_uri": "https://localhost:8080/";
-        },
-        "id": "iuP5BdI3E-oR",
-        "outputId": "632019c3-da2e-4184-87ca-412b86555232"
-      },
-      "outputs": [],
-      "source": [
-        "!./.venv/bin/python 
/content/mahout/qdp/qdp-python/benchmark/benchmark_e2e.py --frameworks all 
--qubits 18 --samples 500"
-      ]
-    }
-  ],
-  "metadata": {
-    "accelerator": "GPU",
+    "collapsed": true,
+    "id": "u7Skxs7lDBlq",
+    "outputId": "9b594df0-a8b2-4c6c-8c88-2a76f16c9c1d"
+   },
+   "outputs": [],
+   "source": [
+    "# 1. Clone the repository\n",
+    "!git clone https://github.com/apache/mahout.git\n";,
+    "\n",
+    "# 2. Install Python Dependencies\n",
+    "# We use the requirements file provided in the benchmark folder\n",
+    "%cd /content/mahout/qdp/qdp-python\n",
+    "!uv venv -p python3.11\n",
+    "\n",
+    "!uv sync --group benchmark"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
     "colab": {
-      "gpuType": "T4",
-      "provenance": []
+     "base_uri": "https://localhost:8080/";
     },
-    "kernelspec": {
-      "display_name": "Python 3",
-      "name": "python3"
+    "collapsed": true,
+    "id": "qqmfUHGsGm8m",
+    "outputId": "10b2598d-0d0c-47d0-fda8-b5e825c64e77"
+   },
+   "outputs": [],
+   "source": [
+    "!uv pip install matplotlib-inline --python .venv/bin/python"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "id": "hj7sU3yJQeMj"
+   },
+   "source": [
+    "## Run Benchmarks"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "colab": {
+     "base_uri": "https://localhost:8080/";
     },
-    "language_info": {
-      "name": "python"
-    }
+    "id": "iuP5BdI3E-oR",
+    "outputId": "632019c3-da2e-4184-87ca-412b86555232"
+   },
+   "outputs": [],
+   "source": [
+    "!./.venv/bin/python 
/content/mahout/qdp/qdp-python/benchmark/benchmark_e2e.py --frameworks all 
--qubits 18 --samples 500"
+   ]
+  }
+ ],
+ "metadata": {
+  "accelerator": "GPU",
+  "colab": {
+   "gpuType": "T4",
+   "provenance": []
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "name": "python3"
   },
-  "nbformat": 4,
-  "nbformat_minor": 0
+  "language_info": {
+   "name": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
 }

Reply via email to