Coverage for tests/test_expressiblity.py: 95%
40 statements
« prev ^ index » next coverage.py v7.6.10, created at 2025-01-23 11:23 +0000
« prev ^ index » next coverage.py v7.6.10, created at 2025-01-23 11:23 +0000
1from qml_essentials.model import Model
2from qml_essentials.expressibility import Expressibility
4import pennylane.numpy as np
5import logging
6import math
7import pytest
9logger = logging.getLogger(__name__)
12@pytest.mark.unittest
13def test_divergence() -> None:
14 test_cases = [
15 {
16 "n_qubits": 2,
17 "n_bins": 10,
18 "result": 0.000,
19 },
20 ]
22 for test_case in test_cases:
23 _, y_haar_a = Expressibility.haar_integral(
24 n_qubits=test_case["n_qubits"],
25 n_bins=test_case["n_bins"],
26 cache=True,
27 )
29 # We also test here the chache functionality
30 _, y_haar_b = Expressibility.haar_integral(
31 n_qubits=test_case["n_qubits"],
32 n_bins=test_case["n_bins"],
33 cache=False,
34 )
36 # Calculate the mean (over all inputs, if required)
37 kl_dist = Expressibility.kullback_leibler_divergence(y_haar_a, y_haar_b).mean()
39 assert math.isclose(
40 kl_dist.mean(), test_case["result"], abs_tol=1e-3
41 ), "Distance between two identical haar measures not equal."
44@pytest.mark.unittest
45@pytest.mark.expensive
46def test_expressibility() -> None:
47 # Results taken from: https://doi.org/10.1002/qute.201900070
48 # circuits = [9, 1, 2, 16, 3, 18, 10, 12, 15, 17, 4, 11, 7, 8, 19, 5, 13, 14, 6]
49 # results-n_layers-1 = [0.6773, 0.2999, 0.2860, 0.2602, 0.2396, 0.2340, 0.2286,
50 # 0.1984, 0.1892, 0.1359, 0.1343, 0.1312, 0.0977, 0.0858, 0.0809, 0.0602,
51 # 0.0516, 0.0144, 0.0043]
52 # results-n_layers-3 = [0.0322, 0.2079, 0.0084, 0.0375, 0.0403, 0.0221, 0.1297,
53 # 0.0089, 0.1152, 0.0180, 0.0107, 0.0038, 0.0162, 0.0122, 0.0040, 0.0030,
54 # 0.0049, 0.0035, 0.0039]
56 # Circuits [5,7,8,11,12,13,14] are not included in the test cases,
57 # because not implemented in ansaetze.py
59 # Circuit 10 excluded because implementation with current setup not possible
60 test_cases = [
61 {
62 "circuit_type": "Circuit_9",
63 "n_qubits": 4,
64 "n_layers": 1,
65 "result": 0.6773,
66 },
67 # {
68 # "circuit_type": "Circuit_9",
69 # "n_qubits": 4,
70 # "n_layers": 3,
71 # "result": 0.0322,
72 # },
73 {
74 "circuit_type": "Circuit_1",
75 "n_qubits": 4,
76 "n_layers": 1,
77 "result": 0.2999,
78 },
79 # {
80 # "circuit_type": "Circuit_1",
81 # "n_qubits": 4,
82 # "n_layers": 3,
83 # "result": 0.2079,
84 # },
85 {
86 "circuit_type": "Circuit_2",
87 "n_qubits": 4,
88 "n_layers": 1,
89 "result": 0.2860,
90 },
91 # {
92 # "circuit_type": "Circuit_2",
93 # "n_qubits": 4,
94 # "n_layers": 3,
95 # "result": 0.0084,
96 # },
97 {
98 "circuit_type": "Circuit_16",
99 "n_qubits": 4,
100 "n_layers": 1,
101 "result": 0.2602,
102 },
103 # {
104 # "circuit_type": "Circuit_16",
105 # "n_qubits": 4,
106 # "n_layers": 3,
107 # "result": 0.0375,
108 # },
109 {
110 "circuit_type": "Circuit_3",
111 "n_qubits": 4,
112 "n_layers": 1,
113 "result": 0.2396,
114 },
115 # {
116 # "circuit_type": "Circuit_3",
117 # "n_qubits": 4,
118 # "n_layers": 3,
119 # "result": 0.0403,
120 # },
121 {
122 "circuit_type": "Circuit_18",
123 "n_qubits": 4,
124 "n_layers": 1,
125 "result": 0.2340,
126 },
127 # {
128 # "circuit_type": "Circuit_18",
129 # "n_qubits": 4,
130 # "n_layers": 3,
131 # "result": 0.0221,
132 # },
133 # {
134 # "circuit_type": "Circuit_10",
135 # "n_qubits": 4,
136 # "n_layers": 1,
137 # "result": 0.2286,
138 # },
139 # {
140 # "circuit_type": "Circuit_10",
141 # "n_qubits": 4,
142 # "n_layers": 3,
143 # "result": 0.1297,
144 # },
145 {
146 "circuit_type": "Circuit_15",
147 "n_qubits": 4,
148 "n_layers": 1,
149 "result": 0.1892,
150 },
151 # {
152 # "circuit_type": "Circuit_15",
153 # "n_qubits": 4,
154 # "n_layers": 3,
155 # "result": 0.1152,
156 # },
157 {
158 "circuit_type": "Circuit_17",
159 "n_qubits": 4,
160 "n_layers": 1,
161 "result": 0.1359,
162 },
163 # {
164 # "circuit_type": "Circuit_17",
165 # "n_qubits": 4,
166 # "n_layers": 3,
167 # "result": 0.0180,
168 # },
169 {
170 "circuit_type": "Circuit_4",
171 "n_qubits": 4,
172 "n_layers": 1,
173 "result": 0.1343,
174 },
175 # {
176 # "circuit_type": "Circuit_4",
177 # "n_qubits": 4,
178 # "n_layers": 3,
179 # "result": 0.0107,
180 # },
181 {
182 "circuit_type": "Circuit_19",
183 "n_qubits": 4,
184 "n_layers": 1,
185 "result": 0.0809,
186 },
187 # {
188 # "circuit_type": "Circuit_19",
189 # "n_qubits": 4,
190 # "n_layers": 3,
191 # "result": 0.0040,
192 # },
193 {
194 "circuit_type": "Circuit_6",
195 "n_qubits": 4,
196 "n_layers": 1,
197 "result": 0.0043,
198 },
199 # {
200 # "circuit_type": "Circuit_6",
201 # "n_qubits": 4,
202 # "n_layers": 3,
203 # "result": 0.0039,
204 # },
205 ]
207 tolerance = 0.35 # FIXME: reduce when reason for discrepancy is found
208 for test_case in test_cases:
209 model = Model(
210 n_qubits=test_case["n_qubits"],
211 n_layers=test_case["n_layers"],
212 circuit_type=test_case["circuit_type"],
213 initialization_domain=[0, 2 * np.pi],
214 data_reupload=False,
215 )
217 _, _, z = Expressibility.state_fidelities(
218 seed=1000,
219 n_bins=75,
220 n_samples=5000,
221 model=model,
222 scale=False,
223 )
225 _, y_haar = Expressibility.haar_integral(
226 n_qubits=test_case["n_qubits"],
227 n_bins=75,
228 cache=False,
229 scale=False,
230 )
232 # Calculate the mean (over all inputs, if required)
233 kl_dist = Expressibility.kullback_leibler_divergence(z, y_haar).mean()
235 difference = abs(kl_dist - test_case["result"])
236 if math.isclose(difference, 0.0, abs_tol=1e-10):
237 error = 0
238 else:
239 error = abs(kl_dist - test_case["result"]) / (test_case["result"])
241 assert (
242 error < tolerance
243 ), f"Expressibility of circuit {test_case['circuit_type']} is not\
244 {test_case['result']} but {kl_dist} instead.\
245 Deviation {(error*100):.1f}>{tolerance*100}%"
248@pytest.mark.unittest
249@pytest.mark.expensive
250def test_scaling() -> None:
251 model = Model(
252 n_qubits=2,
253 n_layers=1,
254 circuit_type="Circuit_1",
255 )
257 _, _, z = Expressibility.state_fidelities(
258 seed=1000,
259 n_bins=4,
260 n_samples=10,
261 n_input_samples=0,
262 input_domain=[0, 2 * np.pi],
263 model=model,
264 scale=True,
265 )
267 assert z.shape == (8,)
269 _, y = Expressibility.haar_integral(
270 n_qubits=model.n_qubits,
271 n_bins=4,
272 cache=False,
273 scale=True,
274 )
276 assert y.shape == (8,)
278 # _ = Expressibility.kullback_leibler_divergence(z, y)
281if __name__ == "__main__":
282 test_expressibility()