Coverage for tests/test_expressiblity.py: 100%
34 statements
« prev ^ index » next coverage.py v7.6.5, created at 2024-11-15 11:13 +0000
« prev ^ index » next coverage.py v7.6.5, created at 2024-11-15 11:13 +0000
1from qml_essentials.model import Model
2from qml_essentials.expressibility import Expressibility
4import pennylane.numpy as np
5import logging
6import math
7import pytest
9logger = logging.getLogger(__name__)
12@pytest.mark.unittest
13def test_divergence() -> None:
14 test_cases = [
15 {
16 "n_qubits": 2,
17 "n_bins": 10,
18 "result": 0.000,
19 },
20 ]
22 for test_case in test_cases:
23 _, y_haar_a = Expressibility.haar_integral(
24 n_qubits=test_case["n_qubits"],
25 n_bins=test_case["n_bins"],
26 cache=True,
27 )
29 # We also test here the chache functionality
30 _, y_haar_b = Expressibility.haar_integral(
31 n_qubits=test_case["n_qubits"],
32 n_bins=test_case["n_bins"],
33 cache=False,
34 )
36 # Calculate the mean (over all inputs, if required)
37 kl_dist = Expressibility.kullback_leibler_divergence(y_haar_a, y_haar_b).mean()
39 assert math.isclose(
40 kl_dist.mean(), test_case["result"], abs_tol=1e-3
41 ), "Distance between two identical haar measures not equal."
44@pytest.mark.unittest
45@pytest.mark.expensive
46def test_expressibility() -> None:
47 test_cases = [
48 {
49 "circuit_type": "Circuit_1",
50 "n_qubits": 3,
51 "n_layers": 1,
52 "n_bins": 10,
53 "n_samples": 400,
54 "n_input_samples": 10,
55 "result": 2.905,
56 },
57 {
58 "circuit_type": "Circuit_9",
59 "n_qubits": 3,
60 "n_layers": 1,
61 "n_bins": 10,
62 "n_samples": 400,
63 "n_input_samples": 10,
64 "result": 6.670,
65 },
66 ]
68 for test_case in test_cases:
69 model = Model(
70 n_qubits=test_case["n_qubits"],
71 n_layers=test_case["n_layers"],
72 circuit_type=test_case["circuit_type"],
73 )
75 _, _, z = Expressibility.state_fidelities(
76 seed=1000,
77 n_bins=test_case["n_bins"],
78 n_samples=test_case["n_samples"],
79 n_input_samples=test_case["n_input_samples"],
80 input_domain=[0, 2 * np.pi],
81 model=model,
82 )
84 _, y_haar = Expressibility.haar_integral(
85 n_qubits=test_case["n_qubits"],
86 n_bins=test_case["n_bins"],
87 )
89 # Calculate the mean (over all inputs, if required)
90 kl_dist = Expressibility.kullback_leibler_divergence(z, y_haar).mean()
92 assert math.isclose(
93 kl_dist.mean(), test_case["result"], abs_tol=1e-3
94 ), f"Expressibility is not {test_case['result']}\
95 for circuit ansatz {test_case['circuit_type']}.\
96 Was {kl_dist} instead"
99@pytest.mark.unittest
100@pytest.mark.expensive
101def test_scaling() -> None:
102 model = Model(
103 n_qubits=2,
104 n_layers=1,
105 circuit_type="Circuit_1",
106 )
108 _, _, z = Expressibility.state_fidelities(
109 seed=1000,
110 n_bins=4,
111 n_samples=10,
112 n_input_samples=0,
113 input_domain=[0, 2 * np.pi],
114 model=model,
115 scale=True,
116 )
118 assert z.shape == (8,)
120 _, y = Expressibility.haar_integral(
121 n_qubits=model.n_qubits,
122 n_bins=4,
123 cache=False,
124 scale=True,
125 )
127 assert y.shape == (8,)
129 _ = Expressibility.kullback_leibler_divergence(z, y)