Skip to content

Commit

Permalink
Docs preview for PR #1654.
Browse files Browse the repository at this point in the history
  • Loading branch information
cuda-quantum-bot committed May 13, 2024
1 parent 8edfc14 commit 7a90d90
Show file tree
Hide file tree
Showing 94 changed files with 1,971 additions and 1,129 deletions.
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified pr-1654/_images/examples_python_tutorials_vqe_11_0.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
"q1 : ─────┤ x ├\n",
" ╰───╯\n",
"\n",
"{ 00:483 11:517 }\n",
"{ 00:505 11:495 }\n",
"\n"
]
}
Expand Down Expand Up @@ -99,9 +99,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{ 00:480 11:520 }\n",
"{ 00:493 11:507 }\n",
"\n",
"{ 00:487 11:513 }\n",
"{ 00:509 11:491 }\n",
"\n"
]
}
Expand Down Expand Up @@ -186,7 +186,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"<H> = -1.0000000000000002\n"
"<H> = 0.0\n"
]
}
],
Expand All @@ -199,7 +199,18 @@
"# Define the simulation target.\n",
"cudaq.set_target(\"qpp-cpu\")\n",
"\n",
"# Using the same quantum kernel function as we did with `sample`.\n",
"# Define a quantum kernel function.\n",
"\n",
"\n",
"@cudaq.kernel\n",
"def kernel(qubit_count: int):\n",
" qvector = cudaq.qvector(qubit_count)\n",
"\n",
" # 2-qubit GHZ state.\n",
" h(qvector[0])\n",
" for i in range(1, qubit_count):\n",
" x.ctrl(qvector[0], qvector[i])\n",
"\n",
"\n",
"# Define a Hamiltonian in terms of Pauli Spin operators.\n",
"hamiltonian = spin.z(0) + spin.y(1) + spin.x(0) * spin.z(0)\n",
Expand Down Expand Up @@ -237,7 +248,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]"
},
"orig_nbformat": 4,
"vscode": {
Expand Down
488 changes: 262 additions & 226 deletions pr-1654/_sources/examples/python/tutorials/hybrid_qnns.ipynb.txt

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{ 00:518 11:482 }\n"
"{ 00:475 11:525 }\n"
]
}
],
Expand Down Expand Up @@ -117,7 +117,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{ 0000000000000000000000000:477 1111111111111111111111111:523 }\n"
"{ 0000000000000000000000000:510 1111111111111111111111111:490 }\n"
]
}
],
Expand Down Expand Up @@ -155,7 +155,7 @@
"The `nvidia-mqpu` backend allows for future workflows made possible via GPU simulation today. \n",
"\n",
"\n",
"\\\n",
"\n",
"### Asynchronous data collection via batching Hamiltonian terms\n",
"\n",
"Expectation value computations of multi-term hamiltonians can be asynchronously processed via the `mqpu` platform.\n"
Expand All @@ -169,38 +169,70 @@
"<img src=\"images/hsplit.png\" alt=\"Alt Text\" width=\"500\" height=\"200\">\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"For workflows involving multiple GPUs, save the code below in a `filename.py` file and execute via: `mpirun -np n python3 filename.py` where `n` is an integer specifying the number of GPUs you have access to.\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"mpi is initialized? True\n",
"rank 0 num_ranks 1\n"
]
}
],
"source": [
"import cudaq\n",
"from cudaq import spin\n",
"\n",
"cudaq.set_target(\"nvidia-mqpu\")\n",
"\n",
"cudaq.mpi.initialize()\n",
"num_ranks = cudaq.mpi.num_ranks()\n",
"rank = cudaq.mpi.rank()\n",
"\n",
"print('mpi is initialized? ', cudaq.mpi.is_initialized())\n",
"print('rank', rank, 'num_ranks', num_ranks)\n",
"\n",
"qubit_count = 15\n",
"term_count = 100000\n",
"\n",
"kernel = cudaq.make_kernel()\n",
"\n",
"qubits = kernel.qalloc(qubit_count)\n",
"\n",
"kernel.h(qubits[0])\n",
"\n",
"for i in range(1, qubit_count):\n",
" kernel.cx(qubits[0], qubits[i])\n",
"\n",
"# We create a random hamiltonian with 10e5 terms\n",
"# We create a random hamiltonian\n",
"hamiltonian = cudaq.SpinOperator.random(qubit_count, term_count)\n",
"\n",
"# The observe calls allows us to calculate the expectation value of the Hamiltonian, batches the terms, and distributes them over the multiple QPU's/GPUs.\n",
"# The observe calls allows us to calculate the expectation value of the Hamiltonian with respect to a specified kernel.\n",
"\n",
"# Single node, single GPU.\n",
"result = cudaq.observe(kernel, hamiltonian)\n",
"result.expectation()\n",
"\n",
"# If we have multiple GPUs/ QPUs available, we can parallelize the workflow with the addition of an argument in the observe call.\n",
"\n",
"# expectation = cudaq.observe(kernel, hamiltonian) # Single node, single GPU.\n",
"# Single node, multi-GPU.\n",
"result = cudaq.observe(kernel, hamiltonian, execution=cudaq.parallel.thread)\n",
"result.expectation()\n",
"\n",
"expectation = cudaq.observe(\n",
" kernel, hamiltonian,\n",
" execution=cudaq.parallel.thread) # Single node, multi-GPU.\n",
"# Multi-node, multi-GPU.\n",
"result = cudaq.observe(kernel, hamiltonian, execution=cudaq.parallel.mpi)\n",
"result.expectation()\n",
"\n",
"# expectation = cudaq.observe(kernel, hamiltonian, execution= cudaq.parallel.mpi) # Multi-node, multi-GPU."
"cudaq.mpi.finalize()"
]
},
{
Expand Down Expand Up @@ -258,7 +290,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"29.7 s ± 548 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
"31.7 s ± 990 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
]
}
],
Expand Down Expand Up @@ -301,7 +333,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"939 ms ± 3.37 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
"85.3 ms ± 2.36 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
]
}
],
Expand Down Expand Up @@ -335,7 +367,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]"
"version": "3.10.12"
},
"orig_nbformat": 4,
"vscode": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.6.15 (default, Sep 23 2021, 15:41:43) [GCC]"
},
"orig_nbformat": 4,
"vscode": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]"
},
"orig_nbformat": 4,
"vscode": {
Expand Down
55 changes: 18 additions & 37 deletions pr-1654/_sources/examples/python/tutorials/vqe.ipynb.txt

Large diffs are not rendered by default.

9 changes: 5 additions & 4 deletions pr-1654/_sources/specification/cudaq/examples.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Hello World - Simple Bell State
import cudaq
@cudaq.kernel()
def bell(num_iters : int):
def bell(num_iters : int) -> int:
q = cudaq.qvector(2)
nCorrect = 0
for i in range(num_iters):
Expand All @@ -49,9 +49,10 @@ Hello World - Simple Bell State
reset(q)
return nCorrect
print('N Correct = {}'. bell(100))
assert bell(100) == 100
counts = bell(100)
print(f'N Correct = {counts}')
assert counts == 100
GHZ State Preparation and Sampling
----------------------------------
Expand Down
1 change: 1 addition & 0 deletions pr-1654/api/api.html
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@
<li class="toctree-l3"><a class="reference internal" href="../examples/python/tutorials/multi_gpu_workflows.html#Acceleration-via-NVIDIA-GPUs">Acceleration via NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="../examples/python/tutorials/multi_gpu_workflows.html#Multiple-NVIDIA-GPUs">Multiple NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="../examples/python/tutorials/multi_gpu_workflows.html#Multiple-QPU's">Multiple QPU’s</a></li>
<li class="toctree-l3"><a class="reference internal" href="../examples/python/tutorials/multi_gpu_workflows.html#Asynchronous-data-collection-via-batching-Hamiltonian-terms">Asynchronous data collection via batching Hamiltonian terms</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../examples/python/tutorials/multiple_qubits.html">Multiple Qubits</a><ul>
Expand Down
1 change: 1 addition & 0 deletions pr-1654/api/default_ops.html
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@
<li class="toctree-l3"><a class="reference internal" href="../examples/python/tutorials/multi_gpu_workflows.html#Acceleration-via-NVIDIA-GPUs">Acceleration via NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="../examples/python/tutorials/multi_gpu_workflows.html#Multiple-NVIDIA-GPUs">Multiple NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="../examples/python/tutorials/multi_gpu_workflows.html#Multiple-QPU's">Multiple QPU’s</a></li>
<li class="toctree-l3"><a class="reference internal" href="../examples/python/tutorials/multi_gpu_workflows.html#Asynchronous-data-collection-via-batching-Hamiltonian-terms">Asynchronous data collection via batching Hamiltonian terms</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../examples/python/tutorials/multiple_qubits.html">Multiple Qubits</a><ul>
Expand Down
1 change: 1 addition & 0 deletions pr-1654/api/languages/cpp_api.html
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@
<li class="toctree-l3"><a class="reference internal" href="../../examples/python/tutorials/multi_gpu_workflows.html#Acceleration-via-NVIDIA-GPUs">Acceleration via NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../examples/python/tutorials/multi_gpu_workflows.html#Multiple-NVIDIA-GPUs">Multiple NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../examples/python/tutorials/multi_gpu_workflows.html#Multiple-QPU's">Multiple QPU’s</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../examples/python/tutorials/multi_gpu_workflows.html#Asynchronous-data-collection-via-batching-Hamiltonian-terms">Asynchronous data collection via batching Hamiltonian terms</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../examples/python/tutorials/multiple_qubits.html">Multiple Qubits</a><ul>
Expand Down
3 changes: 2 additions & 1 deletion pr-1654/api/languages/python_api.html
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@
<li class="toctree-l3"><a class="reference internal" href="../../examples/python/tutorials/multi_gpu_workflows.html#Acceleration-via-NVIDIA-GPUs">Acceleration via NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../examples/python/tutorials/multi_gpu_workflows.html#Multiple-NVIDIA-GPUs">Multiple NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../examples/python/tutorials/multi_gpu_workflows.html#Multiple-QPU's">Multiple QPU’s</a></li>
<li class="toctree-l3"><a class="reference internal" href="../../examples/python/tutorials/multi_gpu_workflows.html#Asynchronous-data-collection-via-batching-Hamiltonian-terms">Asynchronous data collection via batching Hamiltonian terms</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../../examples/python/tutorials/multiple_qubits.html">Multiple Qubits</a><ul>
Expand Down Expand Up @@ -1697,7 +1698,7 @@ <h2>Data Types<a class="headerlink" href="#data-types" title="Permalink to this
<em class="property"><span class="pre">static</span><span class="w"> </span></em><span class="sig-name descname"><span class="pre">random</span></span><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="headerlink" href="#cudaq.SpinOperator.random" title="Permalink to this definition"></a></dt>
<dd><dl class="py function">
<dt class="sig sig-object py">
<span class="sig-name descname"><span class="pre">random</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">qubit_count</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.12)"><span class="pre">int</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">term_count</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.12)"><span class="pre">int</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">seed</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.12)"><span class="pre">int</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">4063654714</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">&#x2192;</span> <span class="sig-return-typehint"><a class="reference internal" href="#cudaq.SpinOperator" title="cudaq.mlir._mlir_libs._quakeDialects.cudaq_runtime.SpinOperator"><span class="pre">cudaq.mlir._mlir_libs._quakeDialects.cudaq_runtime.SpinOperator</span></a></span></span></dt>
<span class="sig-name descname"><span class="pre">random</span></span><span class="sig-paren">(</span><em class="sig-param"><span class="n"><span class="pre">qubit_count</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.12)"><span class="pre">int</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">term_count</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.12)"><span class="pre">int</span></a></span></em>, <em class="sig-param"><span class="n"><span class="pre">seed</span></span><span class="p"><span class="pre">:</span></span><span class="w"> </span><span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.12)"><span class="pre">int</span></a></span><span class="w"> </span><span class="o"><span class="pre">=</span></span><span class="w"> </span><span class="default_value"><span class="pre">2797434907</span></span></em><span class="sig-paren">)</span> <span class="sig-return"><span class="sig-return-icon">&#x2192;</span> <span class="sig-return-typehint"><a class="reference internal" href="#cudaq.SpinOperator" title="cudaq.mlir._mlir_libs._quakeDialects.cudaq_runtime.SpinOperator"><span class="pre">cudaq.mlir._mlir_libs._quakeDialects.cudaq_runtime.SpinOperator</span></a></span></span></dt>
<dd></dd></dl>

<p>Return a random <a class="reference internal" href="#cudaq.SpinOperator" title="cudaq.SpinOperator"><code class="xref py py-class docutils literal notranslate"><span class="pre">SpinOperator</span></code></a> on the given number of qubits (<code class="code docutils literal notranslate"><span class="pre">qubit_count</span></code>) and composed of the given number of terms (<code class="code docutils literal notranslate"><span class="pre">term_count</span></code>). An optional seed value may also be provided.</p>
Expand Down
1 change: 1 addition & 0 deletions pr-1654/examples/python/tutorials/cost_minimization.html
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@
<li class="toctree-l3"><a class="reference internal" href="multi_gpu_workflows.html#Acceleration-via-NVIDIA-GPUs">Acceleration via NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="multi_gpu_workflows.html#Multiple-NVIDIA-GPUs">Multiple NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="multi_gpu_workflows.html#Multiple-QPU's">Multiple QPU’s</a></li>
<li class="toctree-l3"><a class="reference internal" href="multi_gpu_workflows.html#Asynchronous-data-collection-via-batching-Hamiltonian-terms">Asynchronous data collection via batching Hamiltonian terms</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="multiple_qubits.html">Multiple Qubits</a><ul>
Expand Down
5 changes: 3 additions & 2 deletions pr-1654/examples/python/tutorials/deutschs_algorithm.html
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,7 @@
<li class="toctree-l3"><a class="reference internal" href="multi_gpu_workflows.html#Acceleration-via-NVIDIA-GPUs">Acceleration via NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="multi_gpu_workflows.html#Multiple-NVIDIA-GPUs">Multiple NVIDIA GPUs</a></li>
<li class="toctree-l3"><a class="reference internal" href="multi_gpu_workflows.html#Multiple-QPU's">Multiple QPU’s</a></li>
<li class="toctree-l3"><a class="reference internal" href="multi_gpu_workflows.html#Asynchronous-data-collection-via-batching-Hamiltonian-terms">Asynchronous data collection via batching Hamiltonian terms</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="multiple_qubits.html">Multiple Qubits</a><ul>
Expand Down Expand Up @@ -592,7 +593,7 @@ <h2>XOR <span class="math notranslate nohighlight">\(\oplus\)</span><a class="he
</section>
<section id="Quantum-oracles">
<h2>Quantum oracles<a class="headerlink" href="#Quantum-oracles" title="Permalink to this heading"></a></h2>
<p><img alt="8d66451319a14358900207388c165710" class="no-scaled-link" src="../../../_images/oracle.png" style="width: 300px; height: 150px;" /></p>
<p><img alt="c34d1785b6f845a2b860eca27b62bc70" class="no-scaled-link" src="../../../_images/oracle.png" style="width: 300px; height: 150px;" /></p>
<p>Suppose we have <span class="math notranslate nohighlight">\(f(x): \{0,1\} \longrightarrow \{0,1\}\)</span>. We can compute this function on a quantum computer using oracles which we treat as black box functions that yield the output with an appropriate sequence of logic gates.</p>
<p>Above you see an oracle represented as <span class="math notranslate nohighlight">\(U_f\)</span> which allows us to transform the state <span class="math notranslate nohighlight">\(\ket{x}\ket{y}\)</span> into:</p>
<div class="math notranslate nohighlight">
Expand Down Expand Up @@ -640,7 +641,7 @@ <h2>Quantum parallelism<a class="headerlink" href="#Quantum-parallelism" title="
<h2>Deutschs’ Algorithm:<a class="headerlink" href="#Deutschs'-Algorithm:" title="Permalink to this heading"></a></h2>
<p>Our aim is to find out if <span class="math notranslate nohighlight">\(f: \{0,1\} \longrightarrow \{0,1\}\)</span> is a constant or a balanced function? If constant, <span class="math notranslate nohighlight">\(f(0) = f(1)\)</span>, and if balanced, <span class="math notranslate nohighlight">\(f(0) \neq f(1)\)</span>.</p>
<p>We step through the circuit diagram below and follow the math after the application of each gate.</p>
<p><img alt="35276c8b962e4e3baf106588a2b6bef3" class="no-scaled-link" src="../../../_images/deutsch.png" style="width: 500px; height: 210px;" /></p>
<p><img alt="2a396c35f7024cfdb634718283ad8aaa" class="no-scaled-link" src="../../../_images/deutsch.png" style="width: 500px; height: 210px;" /></p>
<div class="math notranslate nohighlight">
\[\ket{\psi_0} = \ket{01}
\tag{1}\]</div>
Expand Down
Loading

0 comments on commit 7a90d90

Please sign in to comment.