From e32b8b241f2abaa6648a20d69203dd239ee3c8aa Mon Sep 17 00:00:00 2001 From: Joel Pasvolsky Date: Thu, 21 Nov 2024 13:20:49 -0800 Subject: [PATCH 001/170] Add simulated annealing solver (dev code) --- app.py | 51 +++++++++++++++++++++++++++++++++++++-------------- helpers/qa.py | 38 ++++++++++++++++++++++++-------------- 2 files changed, 61 insertions(+), 28 deletions(-) diff --git a/app.py b/app.py index 7184674..a411f08 100644 --- a/app.py +++ b/app.py @@ -24,6 +24,7 @@ from dwave.cloud import Client from dwave.embedding import embed_bqm, is_valid_embedding from dwave.system import DWaveSampler +from dwave.samplers import SimulatedAnnealingSampler from helpers.kz_calcs import * from helpers.layouts_cards import * @@ -35,6 +36,7 @@ app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) # Initialize: available QPUs, initial progress-bar status + try: client = Client.from_config(client='qpu') qpus = {qpu.name: qpu for qpu in client.get_solvers(fast_anneal_time_range__covers=[0.005, 0.1])} @@ -45,6 +47,11 @@ qpus = {} client = None init_job_status = 'NO SOLVER' +if os.getenv('ZNE') == "YES": + qpus['simulated_annealing_solver'] = SimulatedAnnealingSampler() + init_job_status = 'READY' + if not client: + client = 'dummy' # Dashboard-organization section app.layout = dbc.Container([ @@ -173,11 +180,19 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached): """Cache embeddings for the selected QPU.""" trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] - + if trigger_id == 'qpu_selection': + if qpu_name == 'simulated_annealing_solver': + filename = [file for file in os.listdir('helpers') if + '.json' in file and 'emb_' in file][0] + with open(f'helpers/{filename}', 'r') as fp: + embeddings_cached = json.load(fp) + embeddings_cached = json_to_dict(embeddings_cached) + return embeddings_cached, list() + embeddings_cached = {} # Wipe out previous QPU's embeddings - + for filename in [file for file in os.listdir('helpers') if '.json' in file and 'emb_' in file]: @@ -308,19 +323,25 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): bqm = create_bqm(num_spins=spins, coupling_strength=J) - embeddings_cached = json_to_dict(embeddings_cached) - embedding = embeddings_cached[spins] + if qpu_name == 'simulated_annealing_solver': + sampleset = qpus['simulated_annealing_solver'].sample(bqm) + return json.dumps(sampleset.to_serializable()) + + else: + + embeddings_cached = json_to_dict(embeddings_cached) + embedding = embeddings_cached[spins] - bqm_embedded = embed_bqm(bqm, embedding, DWaveSampler(solver=solver.name).adjacency) + bqm_embedded = embed_bqm(bqm, embedding, DWaveSampler(solver=solver.name).adjacency) - computation = solver.sample_bqm( - bqm=bqm_embedded, - fast_anneal=True, - annealing_time=0.001*ta_ns, # SAPI anneal time units is microseconds - auto_scale=False, - answer_mode='raw', # Easier than accounting for num_occurrences - num_reads=100, - label=f'Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}',) + computation = solver.sample_bqm( + bqm=bqm_embedded, + fast_anneal=True, + annealing_time=0.001*ta_ns, # SAPI anneal time units is microseconds + auto_scale=False, + answer_mode='raw', # Easier than accounting for num_occurrences + num_reads=100, + label=f'Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}',) return computation.wait_id() @@ -355,9 +376,11 @@ def simulate(dummy1, dummy2, job_id, job_submit_state, job_submit_time, \ if trigger_id == 'btn_simulate': - if spins in cached_embedding_lengths: + if spins in cached_embedding_lengths or qpu_name == 'simulated_annealing_solver': submit_time = datetime.datetime.now().strftime('%c') + if qpu_name == 'simulated_annealing_solver': # Hack to fix switch from SA to QPU + submit_time = 'SA' job_submit_state = 'SUBMITTED' embedding = dash.no_update diff --git a/helpers/qa.py b/helpers/qa.py index 6714c8e..2b875c0 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json + import dimod from dwave.cloud.api import exceptions, Problems from dwave.embedding import unembed_sampleset @@ -77,22 +79,26 @@ def get_job_status(client, job_id, job_submit_time): Embedding, as a dict of format ``{spin: [qubit]}``. """ - p = Problems.from_config(client.config) + if '"type": "SampleSet"' in job_id and job_submit_time == 'SA': + return 'COMPLETED' + + else: + p = Problems.from_config(client.config) - try: + try: - status = p.get_problem_status(job_id) - label_time = dict(status)['label'].split('submitted: ')[1] + status = p.get_problem_status(job_id) + label_time = dict(status)['label'].split('submitted: ')[1] - if label_time == job_submit_time: + if label_time == job_submit_time: - return status.status.value + return status.status.value + + return None - return None - - except exceptions.ResourceNotFoundError: + except exceptions.ResourceNotFoundError: - return None + return None def get_samples(client, job_id, num_spins, J, embedding): """Retrieve an unembedded sample set for a given job ID. @@ -114,11 +120,15 @@ def get_samples(client, job_id, num_spins, J, embedding): Unembedded dimod sample set. """ - sampleset = client.retrieve_answer(job_id).sampleset + if '"type": "SampleSet"' in job_id: + return dimod.SampleSet.from_serializable(json.loads(job_id)) + + else: + sampleset = client.retrieve_answer(job_id).sampleset - bqm = create_bqm(num_spins=num_spins, coupling_strength=J) - - return unembed_sampleset(sampleset, embedding, bqm) + bqm = create_bqm(num_spins=num_spins, coupling_strength=J) + + return unembed_sampleset(sampleset, embedding, bqm) def json_to_dict(emb_json): """Retrieve an unembedded sampleset for a given job ID. From ae4c0e200fb12b5db82c2cfa71117a398530eb15 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 21 Nov 2024 16:34:25 -0800 Subject: [PATCH 002/170] Added MockKZSampler along with the changes to mock annealing time --- MockKibbleZurekSampler.py | 84 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 MockKibbleZurekSampler.py diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py new file mode 100644 index 0000000..42214c6 --- /dev/null +++ b/MockKibbleZurekSampler.py @@ -0,0 +1,84 @@ +from dimod import SampleSet +from dwave.samplers import SimulatedAnnealingSampler +from dwave.system.temperatures import fluxbias_to_h +from dwave.system.testing import MockDWaveSampler + +class MockKibbleZurekSampler(MockDWaveSampler): + def __init__( + self, + nodelist=None, + edgelist=None, + properties=None, + broken_nodes=None, + broken_edges=None, + topology_type='pegasus', + topology_shape=[16], + parameter_warnings=True, + substitute_sampler=None, + substitute_kwargs=None, + exact_solver_cutoff=0, + ): + if substitute_sampler is None: + substitute_sampler = SimulatedAnnealingSampler() + if substitute_kwargs is None: + substitute_kwargs = {'beta_range': [0, 3], + 'beta_schedule_type': 'linear', + 'num_sweeps': 100, + 'randomize_order': True, + 'proposal_acceptance_criteria': 'Gibbs'} + super().__init__( + nodelist=nodelist, + edgelist=edgelist, + properties=properties, + broken_nodes=broken_nodes, + broken_edges=broken_edges, + topology_type=topology_type, + topology_shape=topology_shape, + parameter_warnings=parameter_warnings, + substitute_sampler=substitute_sampler, + substitute_kwargs=substitute_kwargs, + exact_solver_cutoff=exact_solver_cutoff, + ) + + self.sampler_type = 'mock' + def sample(self, bqm, **kwargs): + + # Extract flux biases from kwargs (if provided) + flux_biases = kwargs.get('flux_biases', {}) + if flux_biases: + # Remove flux_biases from kwargs to avoid passing it to substitute_sampler + kwargs = kwargs.copy() + del kwargs['flux_biases'] + + annealing_time = kwargs.pop('annealing_time', None) + if annealing_time is not None: + try: + num_sweeps = int(annealing_time * 1000) + except (TypeError, ValueError): + num_sweeps = 1000 + kwargs['annealing_time'] = num_sweeps + else: + # Default number of sweeps if annealing_time is not provided + kwargs['annealing_time'] = kwargs.get('num_sweeps', 1000) + + # Adjust the BQM to include flux biases + bqm_effective = bqm.change_vartype('SPIN', inplace=False) + + # flux_to_h_factor = fluxbias_to_h() + # for v in bqm_effective.variables: + # bias = bqm_effective.get_linear(v) + # bqm_effective.set_linear(v, bias + flux_to_h_factor * flux_biases[v]) + + ss = super().sample(bqm=bqm_effective, **kwargs) + + ss.change_vartype(bqm.vartype) + + ss = SampleSet.from_samples_bqm(ss, bqm) + + return ss + + def get_sampler(self): + """ + Return the sampler instance. + """ + return self From 53854c781e53a8196a7004b1e68301c8bf73b176 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 21 Nov 2024 16:34:55 -0800 Subject: [PATCH 003/170] Changed SA Sampler to MockKZSampler --- app.py | 48 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/app.py b/app.py index a411f08..0b82a17 100644 --- a/app.py +++ b/app.py @@ -24,6 +24,7 @@ from dwave.cloud import Client from dwave.embedding import embed_bqm, is_valid_embedding from dwave.system import DWaveSampler +from MockKibbleZurekSampler import MockKibbleZurekSampler from dwave.samplers import SimulatedAnnealingSampler from helpers.kz_calcs import * @@ -33,6 +34,9 @@ from helpers.qa import * from helpers.tooltips import tool_tips +import networkx as nx +from minorminer.subgraph import find_subgraph + app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) # Initialize: available QPUs, initial progress-bar status @@ -48,7 +52,7 @@ client = None init_job_status = 'NO SOLVER' if os.getenv('ZNE') == "YES": - qpus['simulated_annealing_solver'] = SimulatedAnnealingSampler() + qpus['mock_dwave_solver'] = MockKibbleZurekSampler(topology_type='pegasus', topology_shape=[16]) # Change sampler to mock init_job_status = 'READY' if not client: client = 'dummy' @@ -175,21 +179,29 @@ def set_schedule(qpu_name): Output('embedding_is_cached', 'value'), Input('qpu_selection', 'value'), Input('embeddings_found', 'data'), - State('embeddings_cached', 'data'),) -def cache_embeddings(qpu_name, embeddings_found, embeddings_cached): + State('embeddings_cached', 'data'), + State('spins', 'value')) +def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): """Cache embeddings for the selected QPU.""" trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] if trigger_id == 'qpu_selection': - if qpu_name == 'simulated_annealing_solver': - filename = [file for file in os.listdir('helpers') if - '.json' in file and 'emb_' in file][0] - with open(f'helpers/{filename}', 'r') as fp: - embeddings_cached = json.load(fp) - embeddings_cached = json_to_dict(embeddings_cached) - return embeddings_cached, list() + if qpu_name == 'mock_dwave_solver': + embeddings_cached = {} + L = spins + edges = [(i, (i + 1)%L) for i in range(L)] + emb = find_subgraph(target=qpus['mock_dwave_solver'].to_networkx_graph(), source=nx.from_edgelist(edges)) + emb = {u: [v] for u, v in emb.items()} # Wrap target nodes in lists + embeddings_cached[spins] = emb # Store embedding in cache + return embeddings_cached, [spins] + # filename = [file for file in os.listdir('helpers') if + # '.json' in file and 'emb_' in file][0] + # with open(f'helpers/{filename}', 'r') as fp: + # embeddings_cached = json.load(fp) + # embeddings_cached = json_to_dict(embeddings_cached) + # return embeddings_cached, list() embeddings_cached = {} # Wipe out previous QPU's embeddings @@ -323,8 +335,16 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): bqm = create_bqm(num_spins=spins, coupling_strength=J) - if qpu_name == 'simulated_annealing_solver': - sampleset = qpus['simulated_annealing_solver'].sample(bqm) + if qpu_name == 'mock_dwave_solver': + embedding = embeddings_cached + emb = find_subgraph( + target=qpus['mock_dwave_solver'].to_networkx_graph(), + source=dimod.to_networkx_graph(bqm)) + emb = {u: [v] for u, v in emb.items()} + bqm_embedded = embed_bqm(bqm, emb, MockKibbleZurekSampler(topology_type='pegasus', topology_shape=[16]).adjacency) + # Calculate annealing_time in microseconds as per your setup + annealing_time = ta_ns / 1000 # ta_ns is in nanoseconds + sampleset = qpus['mock_dwave_solver'].sample(bqm_embedded, annealing_time=annealing_time) return json.dumps(sampleset.to_serializable()) else: @@ -376,10 +396,10 @@ def simulate(dummy1, dummy2, job_id, job_submit_state, job_submit_time, \ if trigger_id == 'btn_simulate': - if spins in cached_embedding_lengths or qpu_name == 'simulated_annealing_solver': + if spins in cached_embedding_lengths or qpu_name == 'mock_dwave_solver': submit_time = datetime.datetime.now().strftime('%c') - if qpu_name == 'simulated_annealing_solver': # Hack to fix switch from SA to QPU + if qpu_name == 'mock_dwave_solver': # Hack to fix switch from SA to QPU submit_time = 'SA' job_submit_state = 'SUBMITTED' embedding = dash.no_update From e22841a5833bdd004ebe94ab8451fd8db5b26bee Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 21 Nov 2024 17:58:04 -0800 Subject: [PATCH 004/170] Added a new radio button for coupling plot --- helpers/layouts_components.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 1080130..fa9e2b9 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -14,11 +14,11 @@ import dash_bootstrap_components as dbc from dash.dcc import Checklist, Dropdown, Input, Link, RadioItems, Slider -from dash import html +from dash import html, dcc __all__ = ['config_anneal_duration', 'config_kz_graph', 'config_spins', 'config_coupling_strength', 'config_qpu_selection', 'dbc_modal', 'embeddings', - 'job_bar_display', 'ring_lengths', 'tooltips_activate', ] + 'job_bar_display', 'ring_lengths', 'tooltips_activate'] ring_lengths = [512, 1024, 2048] @@ -49,7 +49,12 @@ 'label': 'Schedule', 'value': 'schedule', 'disabled': False - }, + }, + { + 'label': 'Coupling Strength', + 'value': 'coupling', + 'disabled': False + } ], value='both', inputStyle={'margin-right': '10px', 'margin-bottom': '5px'}, @@ -191,4 +196,5 @@ def dbc_modal(name): inputStyle={'margin-right': '10px', 'margin-bottom': '10px'}, labelStyle={'color': 'white', 'font-size': 12, 'display': 'inline-block', 'marginLeft': 20}, inline=True, # Currently requires above 'inline-block' -) \ No newline at end of file +) + From eb64aa5fe5cb995a91991b63d7ef4efef1ae9d10 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 21 Nov 2024 18:19:33 -0800 Subject: [PATCH 005/170] Added plots for couplig strength --- app.py | 3 +++ helpers/plots.py | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/app.py b/app.py index 0b82a17..739e98c 100644 --- a/app.py +++ b/app.py @@ -36,6 +36,8 @@ import networkx as nx from minorminer.subgraph import find_subgraph +from plotly.subplots import make_subplots +import plotly.graph_objects as go app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) @@ -262,6 +264,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J, schedule_filename) return fig diff --git a/helpers/plots.py b/helpers/plots.py index a38f9bf..2d29072 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -134,6 +134,9 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name type='linear', ) + x_axis3 = dict( + title='Coupling Strength' + ) if display == 'kink_density': fig_layout = go.Layout( @@ -151,7 +154,14 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) fig_data = [energy_transverse, energy_problem] + elif display == 'coupling': + + fig_layout = go.Layout( + xaxis=x_axis3, + yaxis=y_axis1, + ) + fig_data = [predicted_plus, predicted_minus] else: # Display both plots together x_axis2.update({'overlaying': 'x1'}) From f1077ebbc4807444ffb47894fd8a0521dc97ba8b Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 21 Nov 2024 19:06:11 -0800 Subject: [PATCH 006/170] Added plots for coupling points --- MockKibbleZurekSampler.py | 3 ++- app.py | 2 +- helpers/plots.py | 24 ++++++++++++++++++++++-- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index 42214c6..7db26ac 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -39,8 +39,9 @@ def __init__( substitute_kwargs=substitute_kwargs, exact_solver_cutoff=exact_solver_cutoff, ) - self.sampler_type = 'mock' + self.mocked_parameters.add('annealing_time') + def sample(self, bqm, **kwargs): # Extract flux biases from kwargs (if provided) diff --git a/app.py b/app.py index 739e98c..f4b52ec 100644 --- a/app.py +++ b/app.py @@ -278,7 +278,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) _, kink_density = kink_stats(sampleset_unembedded, J) - fig = plot_kink_density(kz_graph_display, figure, kink_density, ta) + fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J) return fig else: diff --git a/helpers/plots.py b/helpers/plots.py index 2d29072..d97c24b 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -135,7 +135,8 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) x_axis3 = dict( - title='Coupling Strength' + title='Coupling Strength', + type='log', ) if display == 'kink_density': @@ -162,6 +163,8 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) fig_data = [predicted_plus, predicted_minus] + + else: # Display both plots together x_axis2.update({'overlaying': 'x1'}) @@ -216,7 +219,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time): +def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): """Add kink density from QPU samples to plot. Args: @@ -240,6 +243,23 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time): fig_dict ) + if display == 'coupling': + + fig.add_trace( + go.Scatter( + x=[J], + y=[kink_density], + xaxis='x3', + yaxis='y1', + showlegend=False, + marker=dict(size=10, + color='black', + symbol='x', + ) + ) + ) + return fig + fig.add_trace( go.Scatter( x=[anneal_time], From 6366ee82ca87068a586b3330036846698cf092a1 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 22 Nov 2024 12:04:54 -0800 Subject: [PATCH 007/170] Saved changes to MockKZSampler --- MockKibbleZurekSampler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index 7db26ac..d62c654 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -40,8 +40,7 @@ def __init__( exact_solver_cutoff=exact_solver_cutoff, ) self.sampler_type = 'mock' - self.mocked_parameters.add('annealing_time') - + def sample(self, bqm, **kwargs): # Extract flux biases from kwargs (if provided) From a0723dd21aef173acf75c71e2b5891b9ae2ff926 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 22 Nov 2024 12:48:14 -0800 Subject: [PATCH 008/170] implement unembed bugfix for samplesets --- helpers/qa.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/helpers/qa.py b/helpers/qa.py index 2b875c0..42dcd0f 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -120,15 +120,14 @@ def get_samples(client, job_id, num_spins, J, embedding): Unembedded dimod sample set. """ - if '"type": "SampleSet"' in job_id: - return dimod.SampleSet.from_serializable(json.loads(job_id)) + bqm = create_bqm(num_spins=num_spins, coupling_strength=J) + if '"type": "SampleSet"' in job_id: # See modifications to submit_job + sampleset = dimod.SampleSet.from_serializable(json.loads(job_id)) else: sampleset = client.retrieve_answer(job_id).sampleset - - bqm = create_bqm(num_spins=num_spins, coupling_strength=J) - return unembed_sampleset(sampleset, embedding, bqm) + return unembed_sampleset(sampleset, embedding, bqm) def json_to_dict(emb_json): """Retrieve an unembedded sampleset for a given job ID. @@ -144,4 +143,3 @@ def json_to_dict(emb_json): return {int(key): {int(node): qubits for node, qubits in emb.items()} for key, emb in emb_json.items()} - \ No newline at end of file From a9d3631d03cbf12c371f6118e3642399cd5b9357 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 22 Nov 2024 14:25:15 -0800 Subject: [PATCH 009/170] Added global variable for a constant J value --- app.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/app.py b/app.py index f4b52ec..29c1e7e 100644 --- a/app.py +++ b/app.py @@ -41,8 +41,10 @@ app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) -# Initialize: available QPUs, initial progress-bar status +# global variable for a default J value +J_baseline = -1.8 +# Initialize: available QPUs, initial progress-bar status try: client = Client.from_config(client='qpu') qpus = {qpu.name: qpu for qpu in client.get_solvers(fast_anneal_time_range__covers=[0.005, 0.1])} From 3b773a17e1ddd67dfc6b5f4fc77b9413014ea78c Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 22 Nov 2024 14:28:50 -0800 Subject: [PATCH 010/170] Change the plot kink density function to use baseline J for background plot, and disable the function triger when changing inputs on couplings --- app.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/app.py b/app.py index 29c1e7e..9f9bd58 100644 --- a/app.py +++ b/app.py @@ -247,7 +247,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): @app.callback( Output('sample_vs_theory', 'figure'), Input('kz_graph_display', 'value'), - Input('coupling_strength', 'value'), + State('coupling_strength', 'value'), # previously input Input('quench_schedule_filename', 'children'), Input('job_submit_state', 'children'), State('job_id', 'children'), @@ -266,8 +266,8 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : - - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J, schedule_filename) + # Use global J + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) return fig @@ -286,7 +286,8 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ else: return dash.no_update - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J, schedule_filename) + # use global J value + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) return fig @app.callback( From 7aaf17c7bae41f1dd4c97a4a0b38848092c6c2b1 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 22 Nov 2024 14:53:08 -0800 Subject: [PATCH 011/170] Correct bug in setting of num_sweeps --- MockKibbleZurekSampler.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index d62c654..20ee603 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -52,14 +52,11 @@ def sample(self, bqm, **kwargs): annealing_time = kwargs.pop('annealing_time', None) if annealing_time is not None: - try: - num_sweeps = int(annealing_time * 1000) - except (TypeError, ValueError): - num_sweeps = 1000 - kwargs['annealing_time'] = num_sweeps + num_sweeps = int(annealing_time * 1000) + kwargs['num_sweeps'] = num_sweeps else: # Default number of sweeps if annealing_time is not provided - kwargs['annealing_time'] = kwargs.get('num_sweeps', 1000) + kwargs['num_sweeps'] = kwargs.get('num_sweeps', 1000) # Adjust the BQM to include flux biases bqm_effective = bqm.change_vartype('SPIN', inplace=False) From 668f6b0bd61ec8c6ecc826a97232db4c30454172 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 22 Nov 2024 15:00:11 -0800 Subject: [PATCH 012/170] Remove redundant kwargs update --- MockKibbleZurekSampler.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index 20ee603..9c62c60 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -54,9 +54,6 @@ def sample(self, bqm, **kwargs): if annealing_time is not None: num_sweeps = int(annealing_time * 1000) kwargs['num_sweeps'] = num_sweeps - else: - # Default number of sweeps if annealing_time is not provided - kwargs['num_sweeps'] = kwargs.get('num_sweeps', 1000) # Adjust the BQM to include flux biases bqm_effective = bqm.change_vartype('SPIN', inplace=False) From beb71a61589265e984431b323a4966374cd63bcb Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 22 Nov 2024 15:18:06 -0800 Subject: [PATCH 013/170] Updated readme document for environment variable config --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index ac7c3c7..7249794 100644 --- a/README.md +++ b/README.md @@ -118,6 +118,17 @@ Your development environment should be configured to You can see information about supported IDEs and authorizing access to your Leap account [here](https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html). +The default configuration uses `DWaveSampler` with specific models accessed through the Leap API. To run experiments using `MockDKibbleZurekSampler` locally, set the environment variable in your terminal before running the application. + +**Windows terminal**: +``` +set ZNE=YES +``` +**Unix terminal**: +``` +export ZNE=YES +``` + To run the demo: ```bash From b232d22d0f0c283d381f11458bd7c56bbf05fc2a Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 22 Nov 2024 15:25:10 -0800 Subject: [PATCH 014/170] Refactor MockKibbleZurekSampler, add kwargs.copy() --- MockKibbleZurekSampler.py | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index 9c62c60..22c097b 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -42,28 +42,23 @@ def __init__( self.sampler_type = 'mock' def sample(self, bqm, **kwargs): + _kwargs = kwargs.copy() # We will modify arguments + _bqm = bqm.change_vartype('SPIN', inplace=False) # We will modify the bqm + + # Extract annealing_time from kwargs (if provided) + annealing_time = _kwargs.pop('annealing_time', 20) # 20us default. + _kwargs['num_sweeps'] = int(annealing_time * 1000) # 1000 sweeps per microsecond # Extract flux biases from kwargs (if provided) - flux_biases = kwargs.get('flux_biases', {}) - if flux_biases: - # Remove flux_biases from kwargs to avoid passing it to substitute_sampler - kwargs = kwargs.copy() - del kwargs['flux_biases'] - - annealing_time = kwargs.pop('annealing_time', None) - if annealing_time is not None: - num_sweeps = int(annealing_time * 1000) - kwargs['num_sweeps'] = num_sweeps - - # Adjust the BQM to include flux biases - bqm_effective = bqm.change_vartype('SPIN', inplace=False) - + # flux_biases = kwargs.pop('flux_biases', {}) # flux_to_h_factor = fluxbias_to_h() - # for v in bqm_effective.variables: - # bias = bqm_effective.get_linear(v) - # bqm_effective.set_linear(v, bias + flux_to_h_factor * flux_biases[v]) + # for v in _bqm.variables: + # bias = _bqm.get_linear(v) + # _bqm.set_linear(v, bias + flux_to_h_factor * flux_biases[v]) - ss = super().sample(bqm=bqm_effective, **kwargs) + # TO DO: corrupt bqm with noise proportional to annealing_time + + ss = super().sample(bqm=_bqm, **kwargs) ss.change_vartype(bqm.vartype) From 21535c2f0a89d598cc32eba865a1c5287ea4034d Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 22 Nov 2024 22:25:30 -0800 Subject: [PATCH 015/170] Correct substitute_sampler errors --- MockKibbleZurekSampler.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index 22c097b..433e05f 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -3,6 +3,7 @@ from dwave.system.temperatures import fluxbias_to_h from dwave.system.testing import MockDWaveSampler + class MockKibbleZurekSampler(MockDWaveSampler): def __init__( self, @@ -19,13 +20,13 @@ def __init__( exact_solver_cutoff=0, ): if substitute_sampler is None: - substitute_sampler = SimulatedAnnealingSampler() + self.substitute_sampler = SimulatedAnnealingSampler() if substitute_kwargs is None: - substitute_kwargs = {'beta_range': [0, 3], - 'beta_schedule_type': 'linear', - 'num_sweeps': 100, - 'randomize_order': True, - 'proposal_acceptance_criteria': 'Gibbs'} + self.substitute_kwargs = {'beta_range': [0.01, 100], + 'beta_schedule_type': 'geometric', + 'num_sweeps': 1, + 'randomize_order': True, + 'proposal_acceptance_criteria': 'Gibbs'} super().__init__( nodelist=nodelist, edgelist=edgelist, @@ -40,15 +41,15 @@ def __init__( exact_solver_cutoff=exact_solver_cutoff, ) self.sampler_type = 'mock' + self.mocked_parameters.add('annealing_time') + self.parameters.update({'num_sweeps': []}) def sample(self, bqm, **kwargs): - _kwargs = kwargs.copy() # We will modify arguments - _bqm = bqm.change_vartype('SPIN', inplace=False) # We will modify the bqm + _bqm = bqm.change_vartype('SPIN', inplace=False) # Extract annealing_time from kwargs (if provided) - annealing_time = _kwargs.pop('annealing_time', 20) # 20us default. - _kwargs['num_sweeps'] = int(annealing_time * 1000) # 1000 sweeps per microsecond - + annealing_time = kwargs.pop('annealing_time', 20) # 20us default. + num_sweeps = int(annealing_time * 1000//5) # 1000 sweeps per microsecond # Extract flux biases from kwargs (if provided) # flux_biases = kwargs.pop('flux_biases', {}) # flux_to_h_factor = fluxbias_to_h() @@ -58,7 +59,7 @@ def sample(self, bqm, **kwargs): # TO DO: corrupt bqm with noise proportional to annealing_time - ss = super().sample(bqm=_bqm, **kwargs) + ss = super().sample(bqm=_bqm, num_sweeps=num_sweeps, **kwargs) ss.change_vartype(bqm.vartype) From 8ba97f2a774780334237542e4444b1501d6379c4 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 22 Nov 2024 22:51:57 -0800 Subject: [PATCH 016/170] Remove num_sweeps from substitute_kwargs so that sample() responds to annealing_time parameter (bug in dwave-sampler?). --- MockKibbleZurekSampler.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index 433e05f..9511ee7 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -20,13 +20,12 @@ def __init__( exact_solver_cutoff=0, ): if substitute_sampler is None: - self.substitute_sampler = SimulatedAnnealingSampler() + substitute_sampler = SimulatedAnnealingSampler() if substitute_kwargs is None: - self.substitute_kwargs = {'beta_range': [0.01, 100], - 'beta_schedule_type': 'geometric', - 'num_sweeps': 1, - 'randomize_order': True, - 'proposal_acceptance_criteria': 'Gibbs'} + substitute_kwargs = {'beta_range': [100, 100], # Quench + 'beta_schedule_type': 'geometric', + 'randomize_order': True, + 'proposal_acceptance_criteria': 'Gibbs'} super().__init__( nodelist=nodelist, edgelist=edgelist, @@ -42,26 +41,26 @@ def __init__( ) self.sampler_type = 'mock' self.mocked_parameters.add('annealing_time') + self.mocked_parameters.add('num_sweeps') self.parameters.update({'num_sweeps': []}) def sample(self, bqm, **kwargs): + # TO DO: corrupt bqm with noise proportional to annealing_time _bqm = bqm.change_vartype('SPIN', inplace=False) - + # Extract annealing_time from kwargs (if provided) annealing_time = kwargs.pop('annealing_time', 20) # 20us default. - num_sweeps = int(annealing_time * 1000//5) # 1000 sweeps per microsecond + num_sweeps = int(annealing_time * 1000) # 1000 sweeps per microsecond # Extract flux biases from kwargs (if provided) # flux_biases = kwargs.pop('flux_biases', {}) # flux_to_h_factor = fluxbias_to_h() # for v in _bqm.variables: # bias = _bqm.get_linear(v) # _bqm.set_linear(v, bias + flux_to_h_factor * flux_biases[v]) - - # TO DO: corrupt bqm with noise proportional to annealing_time ss = super().sample(bqm=_bqm, num_sweeps=num_sweeps, **kwargs) - ss.change_vartype(bqm.vartype) + ss.change_vartype(bqm.vartype) # Not required (but safe) this case ... ss = SampleSet.from_samples_bqm(ss, bqm) From 31a9937154df917c00505d07f785233abd364965 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sun, 24 Nov 2024 23:32:02 -0800 Subject: [PATCH 017/170] Incomplete testing scripts for MockKZSampler --- tests/test_mock_kz_sampler.py | 104 ++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 tests/test_mock_kz_sampler.py diff --git a/tests/test_mock_kz_sampler.py b/tests/test_mock_kz_sampler.py new file mode 100644 index 0000000..5c6aa66 --- /dev/null +++ b/tests/test_mock_kz_sampler.py @@ -0,0 +1,104 @@ +# Copyright 2024 D-Wave Systems Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import sys +import os + +import dimod +from dimod.testing import * +from unittest.mock import patch +from dimod import SampleSet + +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..") + +from dimod import BinaryQuadraticModel +from dwave.samplers import SimulatedAnnealingSampler +from dwave.system.testing import MockDWaveSampler +from MockKibbleZurekSampler import MockKibbleZurekSampler + +@pytest.fixture +def default_sampler(): + nodelist = ['a', 'b'] + edgelist = [('a', 'b')] + return MockKibbleZurekSampler(nodelist=nodelist, edgelist=edgelist) + +@pytest.fixture +def custom_sampler(): + custom_nodelist = [0, 1, 2] + custom_edgelist = [(0, 1), (1, 2)] + substitute_sampler = SimulatedAnnealingSampler() + substitute_kwargs = { + 'beta_range': [1, 2], + 'num_sweeps': 200 + } + return MockKibbleZurekSampler( + nodelist=custom_nodelist, + edgelist=custom_edgelist, + topology_type='chimera', + topology_shape=[4, 4, 4], + substitute_sampler=substitute_sampler, + substitute_kwargs=substitute_kwargs + ) + +@pytest.fixture +def sample_bqm(): + return BinaryQuadraticModel({'a': 1.0, 'b': -1.0}, {('a', 'b'): 0.5}, 0.0, 'BINARY') + + + +def test_initialization(default_sampler, custom_sampler): + assert default_sampler.topology_type == 'pegasus' + assert default_sampler.topology_shape == [16] + assert isinstance(default_sampler.substitute_sampler, SimulatedAnnealingSampler) + assert default_sampler.substitute_kwargs['beta_range'] == [0, 3] + assert default_sampler.substitute_kwargs['beta_schedule_type'] == 'linear' + assert default_sampler.substitute_kwargs['num_sweeps'] == 100 + assert default_sampler.substitute_kwargs['randomize_order'] is True + assert default_sampler.substitute_kwargs['proposal_acceptance_criteria'] == 'Gibbs' + assert default_sampler.sampler_type == 'mock' + + assert custom_sampler.topology_type == 'chimera' + assert custom_sampler.topology_shape == [4, 4, 4] + assert custom_sampler.nodelist == [0, 1, 2] + assert custom_sampler.edgelist == [(0, 1), (1, 2)] + + assert isinstance(custom_sampler.substitute_sampler, SimulatedAnnealingSampler) + assert custom_sampler.substitute_kwargs['beta_range'] == [1, 2] + assert custom_sampler.substitute_kwargs['num_sweeps'] == 200 + + + +def test_sample_with_default_annealing_time(default_sampler, sample_bqm): + sampleset = default_sampler.sample(sample_bqm) + + # default anneal _time should be 20 + expected_num_sweeps = int(20 * 1000) + assert default_sampler.== expected_num_sweeps + +def test_sample_with_custom_annealing_time(default_sampler, sample_bqm): + pass + + +def test_sample_preserves_vartype(default_sampler, sample_bqm): + pass + + +def test_bqm_vartype_conversion(default_sampler, sample_bqm): + pass + +def test_substitute_sampler_call_parameters(default_sampler, sample_bqm): + pass + + From d01cd392269317656e92e657aabe31cd8b13d2a3 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 25 Nov 2024 15:04:40 -0800 Subject: [PATCH 018/170] Changed coupling ranges --- helpers/layouts_components.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index fa9e2b9..beeb48d 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -98,6 +98,8 @@ value=-1.4, marks=j_marks, step=None, + min=-1.8, + max=-0.6 ) ]), ), From 1290970b2de501ea92ebd868c35c66c5eb46fa16 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 25 Nov 2024 15:10:10 -0800 Subject: [PATCH 019/170] Update plot label to Noise level lambda --- helpers/layouts_components.py | 2 +- helpers/plots.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index beeb48d..0e28cc4 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -51,7 +51,7 @@ 'disabled': False }, { - 'label': 'Coupling Strength', + 'label': 'Noise level (lambda)', 'value': 'coupling', 'disabled': False } diff --git a/helpers/plots.py b/helpers/plots.py index d97c24b..6e27ff6 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -135,7 +135,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) x_axis3 = dict( - title='Coupling Strength', + title='lambda', type='log', ) if display == 'kink_density': From 23d1fd72847e30d291ae538c1de8041b0520841a Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 25 Nov 2024 15:31:56 -0800 Subject: [PATCH 020/170] Adjust coupling plot background and added kappa calculation --- helpers/plots.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 6e27ff6..57708dd 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -162,9 +162,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name yaxis=y_axis1, ) - fig_data = [predicted_plus, predicted_minus] - - + fig_data = [] else: # Display both plots together x_axis2.update({'overlaying': 'x1'}) @@ -189,7 +187,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name margin=dict(b=5,l=5,r=20,t=10) ) - if display != 'schedule': + if display != 'schedule' and display != 'coupling': fig.add_annotation( xref='x', @@ -244,10 +242,11 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): ) if display == 'coupling': - + + kappa = -1.8/J fig.add_trace( go.Scatter( - x=[J], + x=[kappa], y=[kink_density], xaxis='x3', yaxis='y1', From 2adfb4635dcf72ca3ed56df532e0adc1dcd37199 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 25 Nov 2024 15:41:48 -0800 Subject: [PATCH 021/170] Update plot label --- helpers/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 57708dd..0b997c0 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -16,6 +16,7 @@ import numpy as np import pandas as pd import plotly.graph_objects as go +from numpy.polynomial.polynomial import Polynomial from helpers.kz_calcs import theoretical_kink_density @@ -135,7 +136,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) x_axis3 = dict( - title='lambda', + title='kappa', type='log', ) if display == 'kink_density': @@ -233,7 +234,6 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): Returns: Updated Plotly figure with a marker at (anneal time, kink-density). """ - if display == 'schedule': return no_update From 79d4101b0b5ae094ebc0603374e76b3259b89015 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 25 Nov 2024 16:27:53 -0800 Subject: [PATCH 022/170] Added dcc storage for kappa and polynomial regression calculation --- app.py | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 64 insertions(+), 4 deletions(-) diff --git a/app.py b/app.py index 9f9bd58..60681c2 100644 --- a/app.py +++ b/app.py @@ -20,6 +20,10 @@ import numpy as np import os +from dash import dcc +from collections import defaultdict +from numpy.polynomial.polynomial import Polynomial + import dimod from dwave.cloud import Client from dwave.embedding import embed_bqm, is_valid_embedding @@ -95,6 +99,8 @@ style={'minWidth': "60rem"}, ), ]), + # store coupling data points + dcc.Store(id='coupling_data', data={}), ], fluid=True, ) @@ -246,6 +252,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): @app.callback( Output('sample_vs_theory', 'figure'), + Output('coupling_data', 'data'), # store data using dcc Input('kz_graph_display', 'value'), State('coupling_strength', 'value'), # previously input Input('quench_schedule_filename', 'children'), @@ -256,10 +263,12 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State('anneal_duration', 'value'), State('spins', 'value'), State('embeddings_cached', 'data'), - State('sample_vs_theory', 'figure'),) + State('sample_vs_theory', 'figure'), + State('coupling_data', 'data'), # access previously stored data + ) def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ job_submit_state, job_id, ta_min, ta_max, ta, \ - spins, embeddings_cached, figure): + spins, embeddings_cached, figure, coupling_data): """Generate graphics for kink density based on theory and QPU samples.""" trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] @@ -269,7 +278,11 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # Use global J fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) - return fig + # reset couplingd ata storage if other plot are displayed + if kz_graph_display != 'coupling': + coupling_data = {} + + return fig, coupling_data if trigger_id == 'job_submit_state': @@ -281,7 +294,54 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ _, kink_density = kink_stats(sampleset_unembedded, J) fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J) - return fig + + if kz_graph_display == 'coupling': + # Calculate kappa + kappa = -1.8 / J + + # Initialize the list for this anneal_time if not present + ta_str = str(ta) + if ta_str not in coupling_data: + coupling_data[ta_str] = [] + + # Append the new data point + coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density}) + + # Check if more than two data points exist for this anneal_time + if len(coupling_data[ta_str]) > 2: + # Perform a polynomial fit (e.g., linear) + data_points = coupling_data[ta_str] + x = np.array([point['kappa'] for point in data_points]) + y = np.array([point['kink_density'] for point in data_points]) + + # Ensure there are enough unique x values for fitting + if len(np.unique(x)) > 1: + # Fit a 1st degree polynomial (linear fit) + coeffs = Polynomial.fit(x, y, deg=1).convert().coef + p = Polynomial(coeffs) + + # Generate fit curve points + x_fit = np.linspace(min(x), max(x), 100) + y_fit = p(x_fit) + + # Remove existing fitting curve traces to prevent duplication + fig.data = [trace for trace in fig.data if trace.name != 'Fitting Curve'] + + # Add the new fitting curve + fit_trace = go.Scatter( + x=x_fit, + y=y_fit, + mode='lines', + name='Fitting Curve', + line=dict(color='green', dash='dash'), + showlegend=True, + xaxis='x3', + yaxis='y1', + ) + + fig.add_trace(fit_trace) + + return fig, coupling_data else: return dash.no_update From 28e97e32194d0ac46440ab0b3badd87a8b086c4e Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 25 Nov 2024 16:28:24 -0800 Subject: [PATCH 023/170] Added plots for linear regression calculations --- helpers/plots.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 0b997c0..f114102 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -22,7 +22,7 @@ __all__ = ['plot_kink_densities_bg', 'plot_kink_density', 'plot_spin_orientation', ] -def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name): +def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name, fitting_curve=None): """ Plot background of theoretical kink-density and QPU energy scales. @@ -164,6 +164,16 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) fig_data = [] + if fitting_curve: + fit_trace = go.Scatter( + x=fitting_curve['x'], + y=fitting_curve['y'], + mode='lines', + name='Fitting Curve', + line=dict(color='green', dash='dash'), + showlegend=True + ) + fig_data.append(fit_trace) else: # Display both plots together x_axis2.update({'overlaying': 'x1'}) @@ -218,7 +228,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): +def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, fitting_curve=None): """Add kink density from QPU samples to plot. Args: @@ -257,6 +267,17 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): ) ) ) + if fitting_curve: + fit_trace = go.Scatter( + x=fitting_curve['x'], + y=fitting_curve['y'], + mode='lines', + name='Fitting Curve', + line=dict(color='green', dash='dash'), + showlegend=True + ) + fig.add_trace(fit_trace) + return fig fig.add_trace( From 5c293a3091a0f49124d9373ffced26866e635302 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 25 Nov 2024 17:55:17 -0800 Subject: [PATCH 024/170] Removed redundant plotting functions --- helpers/plots.py | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index f114102..0b997c0 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -22,7 +22,7 @@ __all__ = ['plot_kink_densities_bg', 'plot_kink_density', 'plot_spin_orientation', ] -def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name, fitting_curve=None): +def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name): """ Plot background of theoretical kink-density and QPU energy scales. @@ -164,16 +164,6 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) fig_data = [] - if fitting_curve: - fit_trace = go.Scatter( - x=fitting_curve['x'], - y=fitting_curve['y'], - mode='lines', - name='Fitting Curve', - line=dict(color='green', dash='dash'), - showlegend=True - ) - fig_data.append(fit_trace) else: # Display both plots together x_axis2.update({'overlaying': 'x1'}) @@ -228,7 +218,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, fitting_curve=None): +def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): """Add kink density from QPU samples to plot. Args: @@ -267,17 +257,6 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, fitting_c ) ) ) - if fitting_curve: - fit_trace = go.Scatter( - x=fitting_curve['x'], - y=fitting_curve['y'], - mode='lines', - name='Fitting Curve', - line=dict(color='green', dash='dash'), - showlegend=True - ) - fig.add_trace(fit_trace) - return fig fig.add_trace( From 47c0632ca284479eeb78501c6c668e412a62ee91 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 25 Nov 2024 17:56:03 -0800 Subject: [PATCH 025/170] Added storage for zne_estimation and kink_density_data along with zne calculation for kink_density plot (still need fixing) --- app.py | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 67 insertions(+), 4 deletions(-) diff --git a/app.py b/app.py index 60681c2..6db9811 100644 --- a/app.py +++ b/app.py @@ -101,6 +101,9 @@ ]), # store coupling data points dcc.Store(id='coupling_data', data={}), + dcc.Store(id='kink_density_data', data={}), + # store zero noise extrapolation + dcc.Store(id='zne_estimates', data={}), ], fluid=True, ) @@ -253,6 +256,8 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): @app.callback( Output('sample_vs_theory', 'figure'), Output('coupling_data', 'data'), # store data using dcc + Output('zne_estimates', 'data'), # update zne_estimates + Output('kink_density_data', 'data'), # update kink density data Input('kz_graph_display', 'value'), State('coupling_strength', 'value'), # previously input Input('quench_schedule_filename', 'children'), @@ -265,10 +270,12 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State('embeddings_cached', 'data'), State('sample_vs_theory', 'figure'), State('coupling_data', 'data'), # access previously stored data + State('zne_estimates', 'data'), # Access ZNE estimates + State('kink_density_data', 'data'), ) def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ job_submit_state, job_id, ta_min, ta_max, ta, \ - spins, embeddings_cached, figure, coupling_data): + spins, embeddings_cached, figure, coupling_data, zne_estimates, kink_density_data): """Generate graphics for kink density based on theory and QPU samples.""" trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] @@ -281,8 +288,9 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # reset couplingd ata storage if other plot are displayed if kz_graph_display != 'coupling': coupling_data = {} + zne_estimates = {} - return fig, coupling_data + return fig, coupling_data, zne_estimates, kink_density_data if trigger_id == 'job_submit_state': @@ -320,6 +328,9 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ coeffs = Polynomial.fit(x, y, deg=1).convert().coef p = Polynomial(coeffs) + a = p(0) # p(kappa=0) = a + b*0 = a + zne_estimates[ta_str] = a + # Generate fit curve points x_fit = np.linspace(min(x), max(x), 100) y_fit = p(x_fit) @@ -341,14 +352,66 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ fig.add_trace(fit_trace) - return fig, coupling_data + # Add the ZNE point at kappa=0 + zne_trace = go.Scatter( + x=[0], + y=[a], + mode='markers', + name='ZNE Estimate', + marker=dict(size=12, color='purple', symbol='diamond'), + showlegend=False, + xaxis='x3', + yaxis='y1', + ) + + fig.add_trace(zne_trace) + + elif kz_graph_display == 'kink_density': + # Initialize the list for this anneal_time if not present + ta_str = str(ta) + if ta_str not in kink_density_data: + kink_density_data[ta_str] = [] + + # Append the new data point + kink_density_data[ta_str].append({'ta': ta, 'kink_density': kink_density}) + ta_str = str(ta) + # Check if more than two data points exist for this anneal_time + if len(kink_density_data[ta_str]) > 2: + # Perform a polynomial fit (e.g., linear) + data_points = kink_density_data[ta_str] + x = np.array([point['ta'] for point in data_points]) + y = np.array([point['kink_density'] for point in data_points]) + coeffs = Polynomial.fit(x, y, deg=1).convert().coef + p = Polynomial(coeffs) + + a = p(0) # p(kappa=0) = a + b*0 = a + zne_estimates[ta_str] = a + # Generate fit curve points + x_fit = np.linspace(min(x), max(x), 100) + y_fit = p(x_fit) + + # Add the ZNE point at kappa=0 + zne_trace = go.Scatter( + x=[0], + y=[a], + mode='markers', + name='ZNE Estimate', + marker=dict(size=12, color='purple', symbol='diamond'), + xaxis='x1', + yaxis='y1', + showlegend=False, + ) + + fig.add_trace(zne_trace) + + return fig, coupling_data, zne_estimates, kink_density_data else: return dash.no_update # use global J value fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) - return fig + return fig, coupling_data, zne_estimates, kink_density @app.callback( Output('spin_orientation', 'figure'), From 69ce89d2f298d959d330f9642971833936c7d6eb Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Tue, 26 Nov 2024 08:51:09 -0800 Subject: [PATCH 026/170] Modified MockKibbleZurekSampler to be sensitive to temperature at |J|=1 and kink density ~ 0.04 --- MockKibbleZurekSampler.py | 50 +++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index 9511ee7..05cab89 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -1,3 +1,5 @@ +import numpy as np + from dimod import SampleSet from dwave.samplers import SimulatedAnnealingSampler from dwave.system.temperatures import fluxbias_to_h @@ -5,39 +7,37 @@ class MockKibbleZurekSampler(MockDWaveSampler): + """ Perform a quench (fixed beta = 1/temperature) evolution. + + The MockSampler is configured to use standard Markov Chain Monte Carlo + with Gibbs acceptance criteria from a random initial condition. + Defects diffuse (power law 1/2) and eliminate, but are also + created by thermal excitations. We will seek to take a limit of high + coupling strength where thermal excitations are removed, leaving only the + diffusion. + """ + def __init__( self, - nodelist=None, - edgelist=None, - properties=None, - broken_nodes=None, - broken_edges=None, topology_type='pegasus', topology_shape=[16], - parameter_warnings=True, - substitute_sampler=None, - substitute_kwargs=None, - exact_solver_cutoff=0, + kink_density_limit_absJ1=0.04 ): - if substitute_sampler is None: - substitute_sampler = SimulatedAnnealingSampler() - if substitute_kwargs is None: - substitute_kwargs = {'beta_range': [100, 100], # Quench - 'beta_schedule_type': 'geometric', - 'randomize_order': True, - 'proposal_acceptance_criteria': 'Gibbs'} + substitute_sampler = SimulatedAnnealingSampler() + # At equilibrium = (t^{L-1} + t)/(1 + t^L), t = -tanh(beta J) + # At large time (equilibrium) for long chains + # lessthansimilarto t, + # At J=-1 we want a kink density to bottom out. Therefore: + beta = np.atanh(1 - 2*kink_density_limit_absJ1) + substitute_kwargs = {'beta_range': [beta, beta], # Quench + 'randomize_order': True, + 'num_reads': 1000, + 'proposal_acceptance_criteria': 'Gibbs'} super().__init__( - nodelist=nodelist, - edgelist=edgelist, - properties=properties, - broken_nodes=broken_nodes, - broken_edges=broken_edges, topology_type=topology_type, topology_shape=topology_shape, - parameter_warnings=parameter_warnings, substitute_sampler=substitute_sampler, substitute_kwargs=substitute_kwargs, - exact_solver_cutoff=exact_solver_cutoff, ) self.sampler_type = 'mock' self.mocked_parameters.add('annealing_time') @@ -47,10 +47,10 @@ def __init__( def sample(self, bqm, **kwargs): # TO DO: corrupt bqm with noise proportional to annealing_time _bqm = bqm.change_vartype('SPIN', inplace=False) - + # Extract annealing_time from kwargs (if provided) annealing_time = kwargs.pop('annealing_time', 20) # 20us default. - num_sweeps = int(annealing_time * 1000) # 1000 sweeps per microsecond + num_sweeps = int(annealing_time * 3000) # 3000 sweeps per microsecond # Extract flux biases from kwargs (if provided) # flux_biases = kwargs.pop('flux_biases', {}) # flux_to_h_factor = fluxbias_to_h() From 897c3c519f71504e7310ef0d8d167f59dbb62240 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 26 Nov 2024 11:28:31 -0800 Subject: [PATCH 027/170] Changed quench time input box to drop down menue --- helpers/layouts_components.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 0e28cc4..d53818d 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -22,13 +22,18 @@ ring_lengths = [512, 1024, 2048] -config_anneal_duration = Input( +config_anneal_duration = dcc.Dropdown( id='anneal_duration', - type='number', - min=5, - max=100, - step=1, - value=7, + options=[ + {'label': '5 µs', 'value': 5}, + {'label': '10 µs', 'value': 10}, + {'label': '20 µs', 'value': 20}, + {'label': '40 µs', 'value': 40}, + {'label': '80 µs', 'value': 80}, + {'label': '160 µs', 'value': 160}, + {'label': '320 µs', 'value': 320}, + ], + value=5, # default value style={'max-width': '95%'} ) From b71e54f37b05676594ac08ae4e1360464b43d377 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 26 Nov 2024 11:28:55 -0800 Subject: [PATCH 028/170] Adjust plotting function to work with new quenchtime dropdown menue --- app.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/app.py b/app.py index 6db9811..a072ca8 100644 --- a/app.py +++ b/app.py @@ -263,8 +263,8 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Input('quench_schedule_filename', 'children'), Input('job_submit_state', 'children'), State('job_id', 'children'), - State('anneal_duration', 'min'), - State('anneal_duration', 'max'), + # State('anneal_duration', 'min'), + # State('anneal_duration', 'max'), State('anneal_duration', 'value'), State('spins', 'value'), State('embeddings_cached', 'data'), @@ -274,7 +274,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State('kink_density_data', 'data'), ) def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ - job_submit_state, job_id, ta_min, ta_max, ta, \ + job_submit_state, job_id, ta, \ spins, embeddings_cached, figure, coupling_data, zne_estimates, kink_density_data): """Generate graphics for kink density based on theory and QPU samples.""" @@ -282,6 +282,9 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : + ta_min = 2 + ta_max = 350 + # Use global J fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) From 4d4165af2025d1107f137be89b578d4e6ee1f803 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 26 Nov 2024 11:58:21 -0800 Subject: [PATCH 029/170] Attempted to default qpu dropdown to mock_dwave_sampler, encountered error when sampling --- app.py | 1 + helpers/layouts_components.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index a072ca8..f4d201e 100644 --- a/app.py +++ b/app.py @@ -65,6 +65,7 @@ if not client: client = 'dummy' + # Dashboard-organization section app.layout = dbc.Container([ dbc.Row([ # Top: logo diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index d53818d..87e0c49 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -110,11 +110,14 @@ ), ]) -def config_qpu_selection(solvers): + +def config_qpu_selection(solvers, default='mock_dwave_solver'): + default = 'mock_dwave_solver' if 'mock_dwave_solver' in solvers else None return Dropdown( id='qpu_selection', options=[{'label': qpu_name, 'value': qpu_name} for qpu_name in solvers], - placeholder='Select a quantum computer' + placeholder='Select a quantum computer', + #value=default ) job_bar_display = { From 6014611e8c14f1ff179531f3e039b8aaf534baab Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 26 Nov 2024 14:16:17 -0800 Subject: [PATCH 030/170] Removed previous zero noise point when generating new ones --- app.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/app.py b/app.py index f4d201e..d4d5316 100644 --- a/app.py +++ b/app.py @@ -341,6 +341,8 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # Remove existing fitting curve traces to prevent duplication fig.data = [trace for trace in fig.data if trace.name != 'Fitting Curve'] + # Remove existing ZNE Estimate traces to prevent duplication + fig.data = [trace for trace in fig.data if trace.name != 'ZNE Estimate'] # Add the new fitting curve fit_trace = go.Scatter( From f8812bc2adf23c068a758ecc0fe549b71986d56e Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 26 Nov 2024 14:58:57 -0800 Subject: [PATCH 031/170] Fixed the bug of changing anneal duration produce same plot --- app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app.py b/app.py index d4d5316..b4f8129 100644 --- a/app.py +++ b/app.py @@ -263,7 +263,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State('coupling_strength', 'value'), # previously input Input('quench_schedule_filename', 'children'), Input('job_submit_state', 'children'), - State('job_id', 'children'), + Input('job_id', 'children'), # State('anneal_duration', 'min'), # State('anneal_duration', 'max'), State('anneal_duration', 'value'), From 738c00b278c8096badb46f3898bb417bb4a00e4b Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 26 Nov 2024 15:15:52 -0800 Subject: [PATCH 032/170] Retrieve previously stored coupling strength data point along with ZNE point --- app.py | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/app.py b/app.py index b4f8129..4815ae5 100644 --- a/app.py +++ b/app.py @@ -290,10 +290,41 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) # reset couplingd ata storage if other plot are displayed - if kz_graph_display != 'coupling': - coupling_data = {} - zne_estimates = {} - + # if kz_graph_display != 'coupling': + # coupling_data = {} + # zne_estimates = {} + # Add stored data points back to the figure + if kz_graph_display != 'schedule': + if kz_graph_display == 'coupling': + # Plot data points from 'coupling_data' + for ta_str, data_points in coupling_data.items(): + for point in data_points: + kappa = point['kappa'] + kink_density = point['kink_density'] + fig.add_trace( + go.Scatter( + x=[kappa], + y=[kink_density], + xaxis='x3', + yaxis='y1', + showlegend=False, + marker=dict(size=10, color='black', symbol='x') + ) + ) + # Plot ZNE estimates + for ta_str, a in zne_estimates.items(): + fig.add_trace( + go.Scatter( + x=[0], + y=[a], + mode='markers', + name='ZNE Estimate', + marker=dict(size=12, color='purple', symbol='diamond'), + showlegend=False, + xaxis='x3', + yaxis='y1', + ) + ) return fig, coupling_data, zne_estimates, kink_density_data if trigger_id == 'job_submit_state': From 0771a663ef0bfd7cab0f6424f4d3b21a938b3b68 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 26 Nov 2024 15:54:17 -0800 Subject: [PATCH 033/170] Added consistent loading data between kink_density and coupling strength plot --- app.py | 103 ++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 62 insertions(+), 41 deletions(-) diff --git a/app.py b/app.py index 4815ae5..0061674 100644 --- a/app.py +++ b/app.py @@ -102,7 +102,6 @@ ]), # store coupling data points dcc.Store(id='coupling_data', data={}), - dcc.Store(id='kink_density_data', data={}), # store zero noise extrapolation dcc.Store(id='zne_estimates', data={}), ], @@ -258,7 +257,6 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output('sample_vs_theory', 'figure'), Output('coupling_data', 'data'), # store data using dcc Output('zne_estimates', 'data'), # update zne_estimates - Output('kink_density_data', 'data'), # update kink density data Input('kz_graph_display', 'value'), State('coupling_strength', 'value'), # previously input Input('quench_schedule_filename', 'children'), @@ -272,11 +270,10 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State('sample_vs_theory', 'figure'), State('coupling_data', 'data'), # access previously stored data State('zne_estimates', 'data'), # Access ZNE estimates - State('kink_density_data', 'data'), ) def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ job_submit_state, job_id, ta, \ - spins, embeddings_cached, figure, coupling_data, zne_estimates, kink_density_data): + spins, embeddings_cached, figure, coupling_data, zne_estimates): """Generate graphics for kink density based on theory and QPU samples.""" trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] @@ -325,7 +322,36 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ yaxis='y1', ) ) - return fig, coupling_data, zne_estimates, kink_density_data + + if kz_graph_display == 'kink_density': + for ta_str, data_points in coupling_data.items(): + for point in data_points: + kink_density = point['kink_density'] + fig.add_trace( + go.Scatter( + x=[ta_str], + y=[kink_density], + xaxis='x1', + yaxis='y1', + showlegend=False, + marker=dict(size=10, color='black', symbol='x') + ) + ) + # Plot ZNE estimates + for ta_str, a in zne_estimates.items(): + fig.add_trace( + go.Scatter( + x=[ta_str], + y=[a], + mode='markers', + name='ZNE Estimate', + marker=dict(size=12, color='purple', symbol='diamond'), + showlegend=False, + xaxis='x1', + yaxis='y1', + ) + ) + return fig, coupling_data, zne_estimates if trigger_id == 'job_submit_state': @@ -349,7 +375,6 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # Append the new data point coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density}) - # Check if more than two data points exist for this anneal_time if len(coupling_data[ta_str]) > 2: # Perform a polynomial fit (e.g., linear) @@ -403,52 +428,48 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ fig.add_trace(zne_trace) - elif kz_graph_display == 'kink_density': - # Initialize the list for this anneal_time if not present - ta_str = str(ta) - if ta_str not in kink_density_data: - kink_density_data[ta_str] = [] + # elif kz_graph_display == 'kink_density': + # # Initialize the list for this anneal_time if not present + # ta_str = str(ta) - # Append the new data point - kink_density_data[ta_str].append({'ta': ta, 'kink_density': kink_density}) - ta_str = str(ta) - # Check if more than two data points exist for this anneal_time - if len(kink_density_data[ta_str]) > 2: - # Perform a polynomial fit (e.g., linear) - data_points = kink_density_data[ta_str] - x = np.array([point['ta'] for point in data_points]) - y = np.array([point['kink_density'] for point in data_points]) - coeffs = Polynomial.fit(x, y, deg=1).convert().coef - p = Polynomial(coeffs) + + # # Check if more than two data points exist for this anneal_time + # if len(kink_density_data[ta_str]) > 2: + # # Perform a polynomial fit (e.g., linear) + # data_points = kink_density_data[ta_str] + # x = np.array([point['ta'] for point in data_points]) + # y = np.array([point['kink_density'] for point in data_points]) + # coeffs = Polynomial.fit(x, y, deg=1).convert().coef + # p = Polynomial(coeffs) - a = p(0) # p(kappa=0) = a + b*0 = a - zne_estimates[ta_str] = a - # Generate fit curve points - x_fit = np.linspace(min(x), max(x), 100) - y_fit = p(x_fit) + # a = p(0) # p(kappa=0) = a + b*0 = a + # zne_estimates[ta_str] = a + # # Generate fit curve points + # x_fit = np.linspace(min(x), max(x), 100) + # y_fit = p(x_fit) - # Add the ZNE point at kappa=0 - zne_trace = go.Scatter( - x=[0], - y=[a], - mode='markers', - name='ZNE Estimate', - marker=dict(size=12, color='purple', symbol='diamond'), - xaxis='x1', - yaxis='y1', - showlegend=False, - ) + # # Add the ZNE point at kappa=0 + # zne_trace = go.Scatter( + # x=[0], + # y=[a], + # mode='markers', + # name='ZNE Estimate', + # marker=dict(size=12, color='purple', symbol='diamond'), + # xaxis='x1', + # yaxis='y1', + # showlegend=False, + # ) - fig.add_trace(zne_trace) + # fig.add_trace(zne_trace) - return fig, coupling_data, zne_estimates, kink_density_data + return fig, coupling_data, zne_estimates else: return dash.no_update # use global J value fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) - return fig, coupling_data, zne_estimates, kink_density + return fig, coupling_data, zne_estimates @app.callback( Output('spin_orientation', 'figure'), From 96e1bdbf764b6f931f1ea31b97c83da797b6b82d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 11:27:51 -0800 Subject: [PATCH 034/170] Changed dropdown menue to nanoseconds --- helpers/layouts_components.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 87e0c49..f8a4c24 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -25,13 +25,13 @@ config_anneal_duration = dcc.Dropdown( id='anneal_duration', options=[ - {'label': '5 µs', 'value': 5}, - {'label': '10 µs', 'value': 10}, - {'label': '20 µs', 'value': 20}, - {'label': '40 µs', 'value': 40}, - {'label': '80 µs', 'value': 80}, - {'label': '160 µs', 'value': 160}, - {'label': '320 µs', 'value': 320}, + {'label': '5 ns', 'value': 5}, + {'label': '10 ns', 'value': 10}, + {'label': '20 ns', 'value': 20}, + {'label': '40 ns', 'value': 40}, + {'label': '80 ns', 'value': 80}, + {'label': '160 ns', 'value': 160}, + {'label': '320 ns', 'value': 320}, ], value=5, # default value style={'max-width': '95%'} From 78bc36d251b46f720fde528aa4c38b78b646f920 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 18:24:54 -0800 Subject: [PATCH 035/170] Defined global color theme, refactored ploting data code into backgrounds --- app.py | 82 +++++++++--------------------------------------- helpers/plots.py | 77 ++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 84 insertions(+), 75 deletions(-) diff --git a/app.py b/app.py index 0061674..bbb1a3b 100644 --- a/app.py +++ b/app.py @@ -23,6 +23,7 @@ from dash import dcc from collections import defaultdict from numpy.polynomial.polynomial import Polynomial +import plotly.express as px import dimod from dwave.cloud import Client @@ -48,6 +49,16 @@ # global variable for a default J value J_baseline = -1.8 +color_theme = { + 5.: '#1B5E20', # Dark Green + 10.: '#0D47A1', # Dark Blue + 20.: '#B71C1C', # Dark Red + 40.: '#004D40', # Teal Green + 80.: '#283593', # Indigo + 160.: '#880E4F', # Maroon + 320.: '#2E7D32', # Forest Green +} + # Initialize: available QPUs, initial progress-bar status try: client = Client.from_config(client='qpu') @@ -284,73 +295,8 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ ta_max = 350 # Use global J - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) - - # reset couplingd ata storage if other plot are displayed - # if kz_graph_display != 'coupling': - # coupling_data = {} - # zne_estimates = {} - # Add stored data points back to the figure - if kz_graph_display != 'schedule': - if kz_graph_display == 'coupling': - # Plot data points from 'coupling_data' - for ta_str, data_points in coupling_data.items(): - for point in data_points: - kappa = point['kappa'] - kink_density = point['kink_density'] - fig.add_trace( - go.Scatter( - x=[kappa], - y=[kink_density], - xaxis='x3', - yaxis='y1', - showlegend=False, - marker=dict(size=10, color='black', symbol='x') - ) - ) - # Plot ZNE estimates - for ta_str, a in zne_estimates.items(): - fig.add_trace( - go.Scatter( - x=[0], - y=[a], - mode='markers', - name='ZNE Estimate', - marker=dict(size=12, color='purple', symbol='diamond'), - showlegend=False, - xaxis='x3', - yaxis='y1', - ) - ) - - if kz_graph_display == 'kink_density': - for ta_str, data_points in coupling_data.items(): - for point in data_points: - kink_density = point['kink_density'] - fig.add_trace( - go.Scatter( - x=[ta_str], - y=[kink_density], - xaxis='x1', - yaxis='y1', - showlegend=False, - marker=dict(size=10, color='black', symbol='x') - ) - ) - # Plot ZNE estimates - for ta_str, a in zne_estimates.items(): - fig.add_trace( - go.Scatter( - x=[ta_str], - y=[a], - mode='markers', - name='ZNE Estimate', - marker=dict(size=12, color='purple', symbol='diamond'), - showlegend=False, - xaxis='x1', - yaxis='y1', - ) - ) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) + return fig, coupling_data, zne_estimates if trigger_id == 'job_submit_state': @@ -392,7 +338,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ zne_estimates[ta_str] = a # Generate fit curve points - x_fit = np.linspace(min(x), max(x), 100) + x_fit = np.linspace(0, max(x), 100) y_fit = p(x_fit) # Remove existing fitting curve traces to prevent duplication diff --git a/helpers/plots.py b/helpers/plots.py index 0b997c0..3929f95 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -16,13 +16,12 @@ import numpy as np import pandas as pd import plotly.graph_objects as go -from numpy.polynomial.polynomial import Polynomial from helpers.kz_calcs import theoretical_kink_density -__all__ = ['plot_kink_densities_bg', 'plot_kink_density', 'plot_spin_orientation', ] - -def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name): +__all__ = ['plot_kink_densities_bg', 'plot_kink_density', 'plot_spin_orientation'] + +def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name, coupling_data, zne_estimates, color_theme): """ Plot background of theoretical kink-density and QPU energy scales. @@ -137,16 +136,47 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name x_axis3 = dict( title='kappa', - type='log', ) if display == 'kink_density': - fig_layout = go.Layout( xaxis=x_axis1, yaxis=y_axis1, ) + fig_data = [] + for ta_str, data_points in coupling_data.items(): + ta_value = float(ta_str) + color = color_theme[ta_value] + for point in data_points: + kink_density = point['kink_density'] + fig_data.append( + go.Scatter( + x=[ta_str], + y=[kink_density], + xaxis='x1', + yaxis='y1', + showlegend=False, + marker=dict(size=10, color=color, symbol='x') + + ) + ) + # Plot ZNE estimates + for ta_str, a in zne_estimates.items(): + fig_data.append( + go.Scatter( + x=[ta_str], + y=[a], + mode='markers', + name='ZNE Estimate', + marker=dict(size=12, color='purple', symbol='diamond'), + showlegend=False, + xaxis='x1', + yaxis='y1', + + ) + ) + - fig_data = [predicted_plus, predicted_minus] + fig_data.extend([predicted_plus, predicted_minus]) elif display == 'schedule': @@ -164,6 +194,39 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) fig_data = [] + + # Plot data points from 'coupling_data' + for ta_str, data_points in coupling_data.items(): + ta_value = float(ta_str) + color = color_theme[ta_value] + for point in data_points: + kappa = point['kappa'] + kink_density = point['kink_density'] + fig_data.append( + go.Scatter( + x=[kappa], + y=[kink_density], + xaxis='x3', + yaxis='y1', + showlegend=False, + marker=dict(size=10, color=color, symbol='x') + ) + ) + # Plot ZNE estimates + for ta_str, a in zne_estimates.items(): + fig_data.append( + go.Scatter( + x=[0], + y=[a], + mode='markers', + name='ZNE Estimate', + marker=dict(size=12, color='purple', symbol='diamond'), + showlegend=False, + xaxis='x3', + yaxis='y1', + ) + ) + else: # Display both plots together x_axis2.update({'overlaying': 'x1'}) From 5a2f29d3df1b8270bdd46a910d630b17ba5ca12d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 19:02:28 -0800 Subject: [PATCH 036/170] Removed unused comment --- app.py | 40 ---------------------------------------- 1 file changed, 40 deletions(-) diff --git a/app.py b/app.py index bbb1a3b..75df4d3 100644 --- a/app.py +++ b/app.py @@ -220,12 +220,6 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): emb = {u: [v] for u, v in emb.items()} # Wrap target nodes in lists embeddings_cached[spins] = emb # Store embedding in cache return embeddings_cached, [spins] - # filename = [file for file in os.listdir('helpers') if - # '.json' in file and 'emb_' in file][0] - # with open(f'helpers/{filename}', 'r') as fp: - # embeddings_cached = json.load(fp) - # embeddings_cached = json_to_dict(embeddings_cached) - # return embeddings_cached, list() embeddings_cached = {} # Wipe out previous QPU's embeddings @@ -373,40 +367,6 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ ) fig.add_trace(zne_trace) - - # elif kz_graph_display == 'kink_density': - # # Initialize the list for this anneal_time if not present - # ta_str = str(ta) - - - # # Check if more than two data points exist for this anneal_time - # if len(kink_density_data[ta_str]) > 2: - # # Perform a polynomial fit (e.g., linear) - # data_points = kink_density_data[ta_str] - # x = np.array([point['ta'] for point in data_points]) - # y = np.array([point['kink_density'] for point in data_points]) - # coeffs = Polynomial.fit(x, y, deg=1).convert().coef - # p = Polynomial(coeffs) - - # a = p(0) # p(kappa=0) = a + b*0 = a - # zne_estimates[ta_str] = a - # # Generate fit curve points - # x_fit = np.linspace(min(x), max(x), 100) - # y_fit = p(x_fit) - - # # Add the ZNE point at kappa=0 - # zne_trace = go.Scatter( - # x=[0], - # y=[a], - # mode='markers', - # name='ZNE Estimate', - # marker=dict(size=12, color='purple', symbol='diamond'), - # xaxis='x1', - # yaxis='y1', - # showlegend=False, - # ) - - # fig.add_trace(zne_trace) return fig, coupling_data, zne_estimates From 59ac362d70407f5223b0760ce4c48a4f218f5dab Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 19:45:03 -0800 Subject: [PATCH 037/170] refactord add zne code --- app.py | 53 +++++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/app.py b/app.py index 75df4d3..2fd6b85 100644 --- a/app.py +++ b/app.py @@ -312,7 +312,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ ta_str = str(ta) if ta_str not in coupling_data: coupling_data[ta_str] = [] - + # Append the new data point coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density}) # Check if more than two data points exist for this anneal_time @@ -341,33 +341,34 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ fig.data = [trace for trace in fig.data if trace.name != 'ZNE Estimate'] # Add the new fitting curve - fit_trace = go.Scatter( - x=x_fit, - y=y_fit, - mode='lines', - name='Fitting Curve', - line=dict(color='green', dash='dash'), - showlegend=True, - xaxis='x3', - yaxis='y1', - ) - - fig.add_trace(fit_trace) - - # Add the ZNE point at kappa=0 - zne_trace = go.Scatter( - x=[0], - y=[a], - mode='markers', - name='ZNE Estimate', - marker=dict(size=12, color='purple', symbol='diamond'), - showlegend=False, - xaxis='x3', - yaxis='y1', + fig.add_trace( + go.Scatter( + x=x_fit, + y=y_fit, + mode='lines', + name='Fitting Curve', + line=dict(color='green', dash='dash'), + showlegend=True, + xaxis='x3', + yaxis='y1', + ) ) - fig.add_trace(zne_trace) - + for ta_str, a in zne_estimates.items(): + fig.add_trace( + # Add the ZNE point at kappa=0 + go.Scatter( + x=[0], + y=[a], + mode='markers', + name='ZNE Estimate', + marker=dict(size=12, color='purple', symbol='diamond'), + showlegend=False, + xaxis='x3', + yaxis='y1', + ) + ) + return fig, coupling_data, zne_estimates else: From fe666e73a91c3473a65818f20488e1a463b73433 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 19:47:46 -0800 Subject: [PATCH 038/170] minor fix to kink density plots --- helpers/plots.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 3929f95..ee21eef 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -142,7 +142,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name xaxis=x_axis1, yaxis=y_axis1, ) - fig_data = [] + fig_data = [predicted_plus, predicted_minus] for ta_str, data_points in coupling_data.items(): ta_value = float(ta_str) color = color_theme[ta_value] @@ -174,9 +174,6 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) ) - - - fig_data.extend([predicted_plus, predicted_minus]) elif display == 'schedule': From eea7bc37547807b746429569dfb5178269b2cdbc Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 20:02:55 -0800 Subject: [PATCH 039/170] dynamically update point color when plotting --- app.py | 2 +- helpers/plots.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/app.py b/app.py index 2fd6b85..1c224ac 100644 --- a/app.py +++ b/app.py @@ -302,7 +302,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) _, kink_density = kink_stats(sampleset_unembedded, J) - fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J) + fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J, color_theme) if kz_graph_display == 'coupling': # Calculate kappa diff --git a/helpers/plots.py b/helpers/plots.py index ee21eef..fd4a52e 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -278,7 +278,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): +def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, color_theme): """Add kink density from QPU samples to plot. Args: @@ -301,6 +301,9 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): fig_dict ) + ta_value = float(anneal_time) + color = color_theme[ta_value] + if display == 'coupling': kappa = -1.8/J @@ -312,7 +315,7 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): yaxis='y1', showlegend=False, marker=dict(size=10, - color='black', + color=color, symbol='x', ) ) @@ -327,7 +330,7 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): yaxis='y1', showlegend=False, marker=dict(size=10, - color='black', + color=color, symbol='x', ) ) From 3199bb38b9777198542ca27ec4c250a86cf407e1 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 20:17:26 -0800 Subject: [PATCH 040/170] Changed anneal_time from state to input on plotting function for immediate access --- app.py | 12 ++++++------ helpers/plots.py | 1 + 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/app.py b/app.py index 1c224ac..b031433 100644 --- a/app.py +++ b/app.py @@ -269,7 +269,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Input('job_id', 'children'), # State('anneal_duration', 'min'), # State('anneal_duration', 'max'), - State('anneal_duration', 'value'), + Input('anneal_duration', 'value'), State('spins', 'value'), State('embeddings_cached', 'data'), State('sample_vs_theory', 'figure'), @@ -282,13 +282,11 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ """Generate graphics for kink density based on theory and QPU samples.""" trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + ta_min = 2 + ta_max = 350 if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : - - ta_min = 2 - ta_max = 350 - # Use global J fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) return fig, coupling_data, zne_estimates @@ -310,6 +308,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # Initialize the list for this anneal_time if not present ta_str = str(ta) + print(ta_str) if ta_str not in coupling_data: coupling_data[ta_str] = [] @@ -355,6 +354,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ ) for ta_str, a in zne_estimates.items(): + #print(f'anneal time : {ta_str}, a: {a}') fig.add_trace( # Add the ZNE point at kappa=0 go.Scatter( @@ -375,7 +375,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ return dash.no_update # use global J value - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) return fig, coupling_data, zne_estimates @app.callback( diff --git a/helpers/plots.py b/helpers/plots.py index fd4a52e..05b46fc 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -161,6 +161,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) # Plot ZNE estimates for ta_str, a in zne_estimates.items(): + print(f'anneal time : {ta_str}, a: {a}') fig_data.append( go.Scatter( x=[ta_str], From 30d7b7cb5e44e4de742259ea656dd6eaf7d5ec0f Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 20:20:43 -0800 Subject: [PATCH 041/170] Revert "Changed anneal_time from state to input on plotting function for immediate access" This reverts commit 3199bb38b9777198542ca27ec4c250a86cf407e1. --- app.py | 12 ++++++------ helpers/plots.py | 1 - 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/app.py b/app.py index b031433..1c224ac 100644 --- a/app.py +++ b/app.py @@ -269,7 +269,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Input('job_id', 'children'), # State('anneal_duration', 'min'), # State('anneal_duration', 'max'), - Input('anneal_duration', 'value'), + State('anneal_duration', 'value'), State('spins', 'value'), State('embeddings_cached', 'data'), State('sample_vs_theory', 'figure'), @@ -282,11 +282,13 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ """Generate graphics for kink density based on theory and QPU samples.""" trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] - ta_min = 2 - ta_max = 350 if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : + + ta_min = 2 + ta_max = 350 + # Use global J fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) return fig, coupling_data, zne_estimates @@ -308,7 +310,6 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # Initialize the list for this anneal_time if not present ta_str = str(ta) - print(ta_str) if ta_str not in coupling_data: coupling_data[ta_str] = [] @@ -354,7 +355,6 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ ) for ta_str, a in zne_estimates.items(): - #print(f'anneal time : {ta_str}, a: {a}') fig.add_trace( # Add the ZNE point at kappa=0 go.Scatter( @@ -375,7 +375,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ return dash.no_update # use global J value - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) return fig, coupling_data, zne_estimates @app.callback( diff --git a/helpers/plots.py b/helpers/plots.py index 05b46fc..fd4a52e 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -161,7 +161,6 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) # Plot ZNE estimates for ta_str, a in zne_estimates.items(): - print(f'anneal time : {ta_str}, a: {a}') fig_data.append( go.Scatter( x=[ta_str], From 3a2ef86e88aa988949dda24ed50e107d15d74cb4 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 20:25:20 -0800 Subject: [PATCH 042/170] save changes in tests --- tests/test_mock_kz_sampler.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/test_mock_kz_sampler.py b/tests/test_mock_kz_sampler.py index 5c6aa66..15aea74 100644 --- a/tests/test_mock_kz_sampler.py +++ b/tests/test_mock_kz_sampler.py @@ -59,15 +59,15 @@ def sample_bqm(): def test_initialization(default_sampler, custom_sampler): - assert default_sampler.topology_type == 'pegasus' - assert default_sampler.topology_shape == [16] - assert isinstance(default_sampler.substitute_sampler, SimulatedAnnealingSampler) - assert default_sampler.substitute_kwargs['beta_range'] == [0, 3] - assert default_sampler.substitute_kwargs['beta_schedule_type'] == 'linear' - assert default_sampler.substitute_kwargs['num_sweeps'] == 100 - assert default_sampler.substitute_kwargs['randomize_order'] is True - assert default_sampler.substitute_kwargs['proposal_acceptance_criteria'] == 'Gibbs' - assert default_sampler.sampler_type == 'mock' + # #assert default_sampler.topology_type == 'pegasus' + # #assert default_sampler.topology_shape == [16] + # assert isinstance(default_sampler.substitute_sampler, SimulatedAnnealingSampler) + # #assert default_sampler.substitute_kwargs['beta_range'] == [0, 3] + # assert default_sampler.substitute_kwargs['beta_schedule_type'] == 'linear' + # assert default_sampler.substitute_kwargs['num_sweeps'] == 100 + # assert default_sampler.substitute_kwargs['randomize_order'] is True + # assert default_sampler.substitute_kwargs['proposal_acceptance_criteria'] == 'Gibbs' + # assert default_sampler.sampler_type == 'mock' assert custom_sampler.topology_type == 'chimera' assert custom_sampler.topology_shape == [4, 4, 4] @@ -80,12 +80,12 @@ def test_initialization(default_sampler, custom_sampler): -def test_sample_with_default_annealing_time(default_sampler, sample_bqm): - sampleset = default_sampler.sample(sample_bqm) +# def test_sample_with_default_annealing_time(default_sampler, sample_bqm): +# sampleset = default_sampler.sample(sample_bqm) - # default anneal _time should be 20 - expected_num_sweeps = int(20 * 1000) - assert default_sampler.== expected_num_sweeps +# # default anneal _time should be 20 +# expected_num_sweeps = int(20 * 1000) +# assert default_sampler.kwargs['num_sweeps']== expected_num_sweeps def test_sample_with_custom_annealing_time(default_sampler, sample_bqm): pass From 2f78c95013b9960fd92afe3061e701958c89d3e6 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 20:55:20 -0800 Subject: [PATCH 043/170] minor fixes to state and input along with parameter adjustments --- app.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/app.py b/app.py index 1c224ac..9478262 100644 --- a/app.py +++ b/app.py @@ -50,13 +50,13 @@ J_baseline = -1.8 color_theme = { - 5.: '#1B5E20', # Dark Green - 10.: '#0D47A1', # Dark Blue - 20.: '#B71C1C', # Dark Red - 40.: '#004D40', # Teal Green - 80.: '#283593', # Indigo - 160.: '#880E4F', # Maroon - 320.: '#2E7D32', # Forest Green + 5: '#1F77B4', # Dark Blue + 10: '#FF7F0E', # Dark Orange + 20: '#2CA02C', # Dark Green + 40: '#D62728', # Dark Red + 80: '#9467BD', # Dark Purple + 160: '#8C564B', # Brown + 320: '#E377C2', # Dark Pink } # Initialize: available QPUs, initial progress-bar status @@ -269,7 +269,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Input('job_id', 'children'), # State('anneal_duration', 'min'), # State('anneal_duration', 'max'), - State('anneal_duration', 'value'), + Input('anneal_duration', 'value'), State('spins', 'value'), State('embeddings_cached', 'data'), State('sample_vs_theory', 'figure'), @@ -282,13 +282,11 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ """Generate graphics for kink density based on theory and QPU samples.""" trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + ta_min = 2 + ta_max = 350 if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : - - ta_min = 2 - ta_max = 350 - # Use global J fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) return fig, coupling_data, zne_estimates @@ -355,6 +353,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ ) for ta_str, a in zne_estimates.items(): + #print(f'anneal itme: {ta_str}, a: {a}') fig.add_trace( # Add the ZNE point at kappa=0 go.Scatter( @@ -375,7 +374,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ return dash.no_update # use global J value - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) return fig, coupling_data, zne_estimates @app.callback( From 6316334716b9beadc0af7efd21622573f8aec9cb Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 27 Nov 2024 22:32:24 -0800 Subject: [PATCH 044/170] Plots different color for different coupling strength on kink vs anneal_time plot --- app.py | 19 ++++++++++++++----- helpers/plots.py | 19 ++++++++++--------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/app.py b/app.py index 9478262..becb4ab 100644 --- a/app.py +++ b/app.py @@ -49,7 +49,7 @@ # global variable for a default J value J_baseline = -1.8 -color_theme = { +ta_color_theme = { 5: '#1F77B4', # Dark Blue 10: '#FF7F0E', # Dark Orange 20: '#2CA02C', # Dark Green @@ -58,6 +58,15 @@ 160: '#8C564B', # Brown 320: '#E377C2', # Dark Pink } +coupling_color_theme = { + -1.8: '#1F77B4', # Dark Blue + -1.6: '#FF7F0E', # Dark Orange + -1.4: '#E377C2', # Dark Pink + -1.2: '#2CA02C', # Dark Green + -1: '#D62728', # Dark Red + -0.8: '#9467BD', # Dark Purple + -0.6: '#8C564B', # Brown +} # Initialize: available QPUs, initial progress-bar status try: @@ -287,7 +296,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme) return fig, coupling_data, zne_estimates @@ -300,7 +309,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) _, kink_density = kink_stats(sampleset_unembedded, J) - fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J, color_theme) + fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J, ta_color_theme, coupling_color_theme) if kz_graph_display == 'coupling': # Calculate kappa @@ -312,7 +321,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ coupling_data[ta_str] = [] # Append the new data point - coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density}) + coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density, 'coupling_strength':J}) # Check if more than two data points exist for this anneal_time if len(coupling_data[ta_str]) > 2: # Perform a polynomial fit (e.g., linear) @@ -374,7 +383,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ return dash.no_update # use global J value - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, color_theme) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme) return fig, coupling_data, zne_estimates @app.callback( diff --git a/helpers/plots.py b/helpers/plots.py index fd4a52e..b354241 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -21,7 +21,7 @@ __all__ = ['plot_kink_densities_bg', 'plot_kink_density', 'plot_spin_orientation'] -def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name, coupling_data, zne_estimates, color_theme): +def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme): """ Plot background of theoretical kink-density and QPU energy scales. @@ -51,7 +51,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name A_joule = A_ghz/1.5092E24 B_joule = B_ghz/1.5092E24 - n = theoretical_kink_density(time_range, coupling_strength, schedule, schedule_name) + n = theoretical_kink_density(time_range, J_base, schedule, schedule_name) predicted_plus = go.Scatter( x=np.asarray(time_range), @@ -98,7 +98,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name energy_problem = go.Scatter( x=s, - y=abs(coupling_strength) * B_joule, + y=abs(J_base) * B_joule, mode='lines', name='B(s)', xaxis=x_axis, @@ -144,9 +144,8 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name ) fig_data = [predicted_plus, predicted_minus] for ta_str, data_points in coupling_data.items(): - ta_value = float(ta_str) - color = color_theme[ta_value] for point in data_points: + color = coupling_color_theme[point['coupling_strength']] kink_density = point['kink_density'] fig_data.append( go.Scatter( @@ -195,7 +194,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name # Plot data points from 'coupling_data' for ta_str, data_points in coupling_data.items(): ta_value = float(ta_str) - color = color_theme[ta_value] + color = ta_color_theme[ta_value] for point in data_points: kappa = point['kappa'] kink_density = point['kink_density'] @@ -278,7 +277,7 @@ def plot_kink_densities_bg(display, time_range, coupling_strength, schedule_name return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, color_theme): +def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, ta_color_theme, coupling_color_theme): """Add kink density from QPU samples to plot. Args: @@ -302,10 +301,10 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, color_the ) ta_value = float(anneal_time) - color = color_theme[ta_value] + if display == 'coupling': - + color = ta_color_theme[ta_value] kappa = -1.8/J fig.add_trace( go.Scatter( @@ -322,6 +321,8 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, color_the ) return fig + + color = coupling_color_theme[J] fig.add_trace( go.Scatter( x=[anneal_time], From d726cc5f67d9feea9dfce4e2297944ccb3efe9a6 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Thu, 28 Nov 2024 01:05:02 -0800 Subject: [PATCH 045/170] Add suitable fitting functions for ZNE on QPU and in the Mocking regime --- app.py | 53 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 7 deletions(-) diff --git a/app.py b/app.py index becb4ab..cf7aae0 100644 --- a/app.py +++ b/app.py @@ -18,7 +18,9 @@ import datetime import json import numpy as np +import scipy import os +import warnings from dash import dcc from collections import defaultdict @@ -267,6 +269,39 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): return embeddings_cached, list(embeddings_cached.keys()) +def fitted_function(xdata, ydata, method=('polynomial', 1)): + """ """ + if type(method) is tuple and method[0] == 'polynomial': + coeffs = Polynomial.fit(xdata, ydata, deg=method[1]).convert().coef + def y_func_x(x): + return np.polyval(coeffs, x) + elif method == 'pure_quadratic': + # y = a + b x**2 + coeffs = Polynomial.fit(xdata**2, ydata, deg=1).convert().coef + def y_func_x(x): + return np.polyval(coeffs, x**2) + elif method == 'mixture_of_exponentials': + # The no thermal noise case has two sources. + # Kink-probability(T=0, t) ~ A t^{-1/2} ~ (1 - tanh(beta_eff))/2 + # Kink-probability(T, Inf) ~ (1 - tanh(beta J))/2 + # Kink-probability(T, t) ~ ? mixture of exponents + # Two independent sources: Const1 + Const2 exp(Const3*x) + # This type of function is quite difficult to fit. + def mixture_of_exponentials(x, p_0, p_1, p_2): + return p_2/2*(1 + p_1*np.exp(np.exp(p_0)*x)) + # Take p_1 = 1; p_2 = min(x); take max(y) occurs at max(x) + maxy = np.max(ydata) + maxx = np.max(xdata) + miny = np.min(ydata) + p0 = [np.log(np.log(2*maxy/miny - 1)/(maxx-1)), 1, miny] + p, _ = scipy.optimize.curve_fit( + f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0) + def y_func_x(x): + return mixture_of_exponentials(x, *p) + else: + raise ValueError('Unknown method') + return y_func_x + @app.callback( Output('sample_vs_theory', 'figure'), Output('coupling_data', 'data'), # store data using dcc @@ -325,6 +360,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # Check if more than two data points exist for this anneal_time if len(coupling_data[ta_str]) > 2: # Perform a polynomial fit (e.g., linear) + data_points = coupling_data[ta_str] x = np.array([point['kappa'] for point in data_points]) y = np.array([point['kink_density'] for point in data_points]) @@ -332,15 +368,18 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # Ensure there are enough unique x values for fitting if len(np.unique(x)) > 1: # Fit a 1st degree polynomial (linear fit) - coeffs = Polynomial.fit(x, y, deg=1).convert().coef - p = Polynomial(coeffs) - - a = p(0) # p(kappa=0) = a + b*0 = a - zne_estimates[ta_str] = a - + if client is None or True: + warnings.warn('WIP: Execute for mock_sampler only, somethings wrong...') + y_func_x = fitted_function(x, y, method='mixture_of_exponentials') + else: + warnings.warn('WIP: Execute for QPU only') + # Pure quadratic # y = a + b x^2 + y_func_x = fitted_function(x, y, method='pure_quadratic') + + zne_estimates[ta_str] = y_func_x(0) # Generate fit curve points x_fit = np.linspace(0, max(x), 100) - y_fit = p(x_fit) + y_fit = y_func_x(x_fit) # Remove existing fitting curve traces to prevent duplication fig.data = [trace for trace in fig.data if trace.name != 'Fitting Curve'] From 9e6bee6e3a5d6c4401cb901ceab627597de18587 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Thu, 28 Nov 2024 06:32:13 -0800 Subject: [PATCH 046/170] Improve non-polynomial function of mock fit --- app.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/app.py b/app.py index cf7aae0..0f603c2 100644 --- a/app.py +++ b/app.py @@ -288,14 +288,21 @@ def y_func_x(x): # Two independent sources: Const1 + Const2 exp(Const3*x) # This type of function is quite difficult to fit. def mixture_of_exponentials(x, p_0, p_1, p_2): - return p_2/2*(1 + p_1*np.exp(np.exp(p_0)*x)) + # Strictly positive form. + # To do: Change to force saturation. Large x should go sigmoidally + # towards 0.5 + return np.exp(p_2)/2*(1 + np.exp(p_1 + np.exp(p_0)*x)) # Take p_1 = 1; p_2 = min(x); take max(y) occurs at max(x) maxy = np.max(ydata) maxx = np.max(xdata) miny = np.min(ydata) - p0 = [np.log(np.log(2*maxy/miny - 1)/(maxx-1)), 1, miny] - p, _ = scipy.optimize.curve_fit( - f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0) + p0 = [np.log(np.log(2*maxy/miny - 1)/(maxx-1)), 0, np.log(miny)] + try: + p, _ = scipy.optimize.curve_fit( + f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0) + except: + warnings.warn('Should modify to check exception is no solution') + p = p0 def y_func_x(x): return mixture_of_exponentials(x, *p) else: From d7de5dc9419d7ebc021f5c2534e86fb34c6aeea6 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Thu, 28 Nov 2024 07:14:08 -0800 Subject: [PATCH 047/170] Add 4-parameter function; requires testing/debugging --- app.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/app.py b/app.py index 0f603c2..d388695 100644 --- a/app.py +++ b/app.py @@ -305,6 +305,36 @@ def mixture_of_exponentials(x, p_0, p_1, p_2): p = p0 def y_func_x(x): return mixture_of_exponentials(x, *p) + elif method == 'sigmoidal_crossover': + # Kink-probability(T, t) ~ sigmoidal crossover. + # Better? Requires atleast 4 points! Not tested. + # Sigmoidal cross-over between two positive limits. + # This type of function is quite difficult to fit. + def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): + # Strictly positive form. + # To do: Change to force saturation. Large x should go sigmoidally + # towards 0.5 + return np.exp(p_3)*(1 + np.exp(p_2)*np.tanh(np.exp(p_1)*(x - np.exp(p_0)))) + # Small lp1 << lp0, and lp0= (maxx-minxx)/2; We can linearize: + # lp3*(1 + lp2( lp1 x - lp0)) = lp0*lp2*lp3 + lp1*lp2*lp3 x # WIP + # lp2 = lp3: equal parts constant and crossover + # x=0 -> miny therefore lp0*lp2*lp3 = miny + # x=maxx -> maxy therefore (maxy - miny)/maxx = lp1*lp2*lp3 + maxy = np.max(ydata) + maxx = np.max(xdata) + miny = np.min(ydata) + lp0 = (maxx+1)/2 + lp1 = lp0/10 # Should really choose rate 1/10 to satisfy final condition. + lp2lp3 = miny/lp0 + p0 = (np.log(lp0), np.log(lp1), np.log(np.sqrt(lp2lp3)), np.log(np.sqrt(lp2lp3))) + try: + p, _ = scipy.optimize.curve_fit( + f=sigmoidal_crossover, xdata=xdata, ydata=ydata, p0=p0) + except: + warnings.warn('Should modify to check exception is no solution') + p = p0 + def y_func_x(x): + return sigmoidal_crossover(x, *p) else: raise ValueError('Unknown method') return y_func_x From eccab020dc7da9273d226157c5ea0de5b71c0ce4 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 11:56:41 -0800 Subject: [PATCH 048/170] Added lambda function and exported to global scope --- helpers/qa.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/helpers/qa.py b/helpers/qa.py index 42dcd0f..75c7ac7 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -19,9 +19,13 @@ from dwave.embedding import unembed_sampleset import minorminer -__all__ = ['create_bqm', 'find_one_to_one_embedding', 'get_job_status', 'get_samples', +__all__ = ['lmbda', 'create_bqm', 'find_one_to_one_embedding', 'get_job_status', 'get_samples', 'json_to_dict', ] + +def lmbda(coupling_strength): + return -1.8/coupling_strength + def create_bqm(num_spins=512, coupling_strength=-1.4): """ Create a binary quadratic model (BQM) representing a magnetic 1D ring. From eb6d08dd30c35cb9c2228736411f9a830786c0f8 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 11:57:16 -0800 Subject: [PATCH 049/170] Changed anneal time in sample bqm to lambda(couping_strength)*anneal time --- app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app.py b/app.py index d388695..b07aaca 100644 --- a/app.py +++ b/app.py @@ -535,7 +535,7 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, - annealing_time=0.001*ta_ns, # SAPI anneal time units is microseconds + annealing_time=lmbda(J)*ta_ns, # Changed to lambda calculations auto_scale=False, answer_mode='raw', # Easier than accounting for num_occurrences num_reads=100, From 8509e9eb09b5f9154f2b0d72d75b34fb1ca81ca9 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 11:58:00 -0800 Subject: [PATCH 050/170] removed unused import --- app.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/app.py b/app.py index b07aaca..208b8a5 100644 --- a/app.py +++ b/app.py @@ -23,16 +23,13 @@ import warnings from dash import dcc -from collections import defaultdict from numpy.polynomial.polynomial import Polynomial -import plotly.express as px import dimod from dwave.cloud import Client from dwave.embedding import embed_bqm, is_valid_embedding from dwave.system import DWaveSampler from MockKibbleZurekSampler import MockKibbleZurekSampler -from dwave.samplers import SimulatedAnnealingSampler from helpers.kz_calcs import * from helpers.layouts_cards import * @@ -43,7 +40,6 @@ import networkx as nx from minorminer.subgraph import find_subgraph -from plotly.subplots import make_subplots import plotly.graph_objects as go app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) From 0c5fb97e6713f11e6b715e1b863192aad434affe Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 12:18:31 -0800 Subject: [PATCH 051/170] minor fixes on kappa --- app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app.py b/app.py index 208b8a5..38eb21a 100644 --- a/app.py +++ b/app.py @@ -381,7 +381,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ if kz_graph_display == 'coupling': # Calculate kappa - kappa = -1.8 / J + kappa = lmbda(J) # Initialize the list for this anneal_time if not present ta_str = str(ta) From 1df819f61538c33a81ef22f806e0d6a5e83209e9 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 12:24:22 -0800 Subject: [PATCH 052/170] Save data point to dictionary for all plots --- app.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/app.py b/app.py index 38eb21a..cee43e7 100644 --- a/app.py +++ b/app.py @@ -379,17 +379,16 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J, ta_color_theme, coupling_color_theme) + # Calculate kappa + kappa = lmbda(J) + # Initialize the list for this anneal_time if not present + ta_str = str(ta) + if ta_str not in coupling_data: + coupling_data[ta_str] = [] + # Append the new data point + coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density, 'coupling_strength':J}) + if kz_graph_display == 'coupling': - # Calculate kappa - kappa = lmbda(J) - - # Initialize the list for this anneal_time if not present - ta_str = str(ta) - if ta_str not in coupling_data: - coupling_data[ta_str] = [] - - # Append the new data point - coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density, 'coupling_strength':J}) # Check if more than two data points exist for this anneal_time if len(coupling_data[ta_str]) > 2: # Perform a polynomial fit (e.g., linear) From 6e673b6663d58d6a54441b3ec48700e154db699f Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 12:24:58 -0800 Subject: [PATCH 053/170] Changed the color points on main plot back to black --- helpers/plots.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/helpers/plots.py b/helpers/plots.py index b354241..a84c433 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -321,8 +321,11 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, ta_color_ ) return fig + if display == 'kink_density': + color = coupling_color_theme[J] + else: + color = 'black' - color = coupling_color_theme[J] fig.add_trace( go.Scatter( x=[anneal_time], From f9c0dabf844ba3cb82300d1895807838ff9ffe33 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 12:31:01 -0800 Subject: [PATCH 054/170] refactored fitting function to qa script --- app.py | 72 +-------------------------------------------------- helpers/qa.py | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 72 insertions(+), 72 deletions(-) diff --git a/app.py b/app.py index cee43e7..78f7068 100644 --- a/app.py +++ b/app.py @@ -264,76 +264,6 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): return dash.no_update, dash.no_update return embeddings_cached, list(embeddings_cached.keys()) - -def fitted_function(xdata, ydata, method=('polynomial', 1)): - """ """ - if type(method) is tuple and method[0] == 'polynomial': - coeffs = Polynomial.fit(xdata, ydata, deg=method[1]).convert().coef - def y_func_x(x): - return np.polyval(coeffs, x) - elif method == 'pure_quadratic': - # y = a + b x**2 - coeffs = Polynomial.fit(xdata**2, ydata, deg=1).convert().coef - def y_func_x(x): - return np.polyval(coeffs, x**2) - elif method == 'mixture_of_exponentials': - # The no thermal noise case has two sources. - # Kink-probability(T=0, t) ~ A t^{-1/2} ~ (1 - tanh(beta_eff))/2 - # Kink-probability(T, Inf) ~ (1 - tanh(beta J))/2 - # Kink-probability(T, t) ~ ? mixture of exponents - # Two independent sources: Const1 + Const2 exp(Const3*x) - # This type of function is quite difficult to fit. - def mixture_of_exponentials(x, p_0, p_1, p_2): - # Strictly positive form. - # To do: Change to force saturation. Large x should go sigmoidally - # towards 0.5 - return np.exp(p_2)/2*(1 + np.exp(p_1 + np.exp(p_0)*x)) - # Take p_1 = 1; p_2 = min(x); take max(y) occurs at max(x) - maxy = np.max(ydata) - maxx = np.max(xdata) - miny = np.min(ydata) - p0 = [np.log(np.log(2*maxy/miny - 1)/(maxx-1)), 0, np.log(miny)] - try: - p, _ = scipy.optimize.curve_fit( - f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0) - except: - warnings.warn('Should modify to check exception is no solution') - p = p0 - def y_func_x(x): - return mixture_of_exponentials(x, *p) - elif method == 'sigmoidal_crossover': - # Kink-probability(T, t) ~ sigmoidal crossover. - # Better? Requires atleast 4 points! Not tested. - # Sigmoidal cross-over between two positive limits. - # This type of function is quite difficult to fit. - def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): - # Strictly positive form. - # To do: Change to force saturation. Large x should go sigmoidally - # towards 0.5 - return np.exp(p_3)*(1 + np.exp(p_2)*np.tanh(np.exp(p_1)*(x - np.exp(p_0)))) - # Small lp1 << lp0, and lp0= (maxx-minxx)/2; We can linearize: - # lp3*(1 + lp2( lp1 x - lp0)) = lp0*lp2*lp3 + lp1*lp2*lp3 x # WIP - # lp2 = lp3: equal parts constant and crossover - # x=0 -> miny therefore lp0*lp2*lp3 = miny - # x=maxx -> maxy therefore (maxy - miny)/maxx = lp1*lp2*lp3 - maxy = np.max(ydata) - maxx = np.max(xdata) - miny = np.min(ydata) - lp0 = (maxx+1)/2 - lp1 = lp0/10 # Should really choose rate 1/10 to satisfy final condition. - lp2lp3 = miny/lp0 - p0 = (np.log(lp0), np.log(lp1), np.log(np.sqrt(lp2lp3)), np.log(np.sqrt(lp2lp3))) - try: - p, _ = scipy.optimize.curve_fit( - f=sigmoidal_crossover, xdata=xdata, ydata=ydata, p0=p0) - except: - warnings.warn('Should modify to check exception is no solution') - p = p0 - def y_func_x(x): - return sigmoidal_crossover(x, *p) - else: - raise ValueError('Unknown method') - return y_func_x @app.callback( Output('sample_vs_theory', 'figure'), @@ -387,7 +317,7 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ coupling_data[ta_str] = [] # Append the new data point coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density, 'coupling_strength':J}) - + if kz_graph_display == 'coupling': # Check if more than two data points exist for this anneal_time if len(coupling_data[ta_str]) > 2: diff --git a/helpers/qa.py b/helpers/qa.py index 75c7ac7..e4e883f 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -20,7 +20,7 @@ import minorminer __all__ = ['lmbda', 'create_bqm', 'find_one_to_one_embedding', 'get_job_status', 'get_samples', - 'json_to_dict', ] + 'json_to_dict', 'fitted_function'] def lmbda(coupling_strength): @@ -147,3 +147,73 @@ def json_to_dict(emb_json): return {int(key): {int(node): qubits for node, qubits in emb.items()} for key, emb in emb_json.items()} + +def fitted_function(xdata, ydata, method=('polynomial', 1)): + """ """ + if type(method) is tuple and method[0] == 'polynomial': + coeffs = Polynomial.fit(xdata, ydata, deg=method[1]).convert().coef + def y_func_x(x): + return np.polyval(coeffs, x) + elif method == 'pure_quadratic': + # y = a + b x**2 + coeffs = Polynomial.fit(xdata**2, ydata, deg=1).convert().coef + def y_func_x(x): + return np.polyval(coeffs, x**2) + elif method == 'mixture_of_exponentials': + # The no thermal noise case has two sources. + # Kink-probability(T=0, t) ~ A t^{-1/2} ~ (1 - tanh(beta_eff))/2 + # Kink-probability(T, Inf) ~ (1 - tanh(beta J))/2 + # Kink-probability(T, t) ~ ? mixture of exponents + # Two independent sources: Const1 + Const2 exp(Const3*x) + # This type of function is quite difficult to fit. + def mixture_of_exponentials(x, p_0, p_1, p_2): + # Strictly positive form. + # To do: Change to force saturation. Large x should go sigmoidally + # towards 0.5 + return np.exp(p_2)/2*(1 + np.exp(p_1 + np.exp(p_0)*x)) + # Take p_1 = 1; p_2 = min(x); take max(y) occurs at max(x) + maxy = np.max(ydata) + maxx = np.max(xdata) + miny = np.min(ydata) + p0 = [np.log(np.log(2*maxy/miny - 1)/(maxx-1)), 0, np.log(miny)] + try: + p, _ = scipy.optimize.curve_fit( + f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0) + except: + warnings.warn('Should modify to check exception is no solution') + p = p0 + def y_func_x(x): + return mixture_of_exponentials(x, *p) + elif method == 'sigmoidal_crossover': + # Kink-probability(T, t) ~ sigmoidal crossover. + # Better? Requires atleast 4 points! Not tested. + # Sigmoidal cross-over between two positive limits. + # This type of function is quite difficult to fit. + def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): + # Strictly positive form. + # To do: Change to force saturation. Large x should go sigmoidally + # towards 0.5 + return np.exp(p_3)*(1 + np.exp(p_2)*np.tanh(np.exp(p_1)*(x - np.exp(p_0)))) + # Small lp1 << lp0, and lp0= (maxx-minxx)/2; We can linearize: + # lp3*(1 + lp2( lp1 x - lp0)) = lp0*lp2*lp3 + lp1*lp2*lp3 x # WIP + # lp2 = lp3: equal parts constant and crossover + # x=0 -> miny therefore lp0*lp2*lp3 = miny + # x=maxx -> maxy therefore (maxy - miny)/maxx = lp1*lp2*lp3 + maxy = np.max(ydata) + maxx = np.max(xdata) + miny = np.min(ydata) + lp0 = (maxx+1)/2 + lp1 = lp0/10 # Should really choose rate 1/10 to satisfy final condition. + lp2lp3 = miny/lp0 + p0 = (np.log(lp0), np.log(lp1), np.log(np.sqrt(lp2lp3)), np.log(np.sqrt(lp2lp3))) + try: + p, _ = scipy.optimize.curve_fit( + f=sigmoidal_crossover, xdata=xdata, ydata=ydata, p0=p0) + except: + warnings.warn('Should modify to check exception is no solution') + p = p0 + def y_func_x(x): + return sigmoidal_crossover(x, *p) + else: + raise ValueError('Unknown method') + return y_func_x \ No newline at end of file From e4ac451076fead60974f1b53825f6bd4d8d9d7ce Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 12:44:25 -0800 Subject: [PATCH 055/170] Fixed the branches for polynomial calculation between client and mock sampler --- app.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/app.py b/app.py index 78f7068..617542c 100644 --- a/app.py +++ b/app.py @@ -18,12 +18,10 @@ import datetime import json import numpy as np -import scipy import os import warnings from dash import dcc -from numpy.polynomial.polynomial import Polynomial import dimod from dwave.cloud import Client @@ -269,13 +267,12 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output('sample_vs_theory', 'figure'), Output('coupling_data', 'data'), # store data using dcc Output('zne_estimates', 'data'), # update zne_estimates + Input('qpu_selection', 'value'), Input('kz_graph_display', 'value'), State('coupling_strength', 'value'), # previously input Input('quench_schedule_filename', 'children'), Input('job_submit_state', 'children'), Input('job_id', 'children'), - # State('anneal_duration', 'min'), - # State('anneal_duration', 'max'), Input('anneal_duration', 'value'), State('spins', 'value'), State('embeddings_cached', 'data'), @@ -283,7 +280,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State('coupling_data', 'data'), # access previously stored data State('zne_estimates', 'data'), # Access ZNE estimates ) -def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ +def display_graphics_kink_density(qpu_name, kz_graph_display, J, schedule_filename, \ job_submit_state, job_id, ta, \ spins, embeddings_cached, figure, coupling_data, zne_estimates): """Generate graphics for kink density based on theory and QPU samples.""" @@ -330,8 +327,8 @@ def display_graphics_kink_density(kz_graph_display, J, schedule_filename, \ # Ensure there are enough unique x values for fitting if len(np.unique(x)) > 1: # Fit a 1st degree polynomial (linear fit) - if client is None or True: - warnings.warn('WIP: Execute for mock_sampler only, somethings wrong...') + if qpu_name == 'mock_dwave_solver': + warnings.warn('WIP: Execute for mock_sampler only') y_func_x = fitted_function(x, y, method='mixture_of_exponentials') else: warnings.warn('WIP: Execute for QPU only') From 67c95231f4ab303e12afb773a8dd2fd7f99445f6 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 13:24:14 -0800 Subject: [PATCH 056/170] Plot stored data in main display's background --- helpers/plots.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/helpers/plots.py b/helpers/plots.py index a84c433..3532152 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -236,7 +236,36 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ ) fig_data = [predicted_plus, predicted_minus, energy_transverse, energy_problem] - + for ta_str, data_points in coupling_data.items(): + for point in data_points: + color = 'black' + kink_density = point['kink_density'] + fig_data.append( + go.Scatter( + x=[ta_str], + y=[kink_density], + xaxis='x1', + yaxis='y1', + showlegend=False, + marker=dict(size=10, color=color, symbol='x') + + ) + ) + # Plot ZNE estimates + for ta_str, a in zne_estimates.items(): + fig_data.append( + go.Scatter( + x=[ta_str], + y=[a], + mode='markers', + name='ZNE Estimate', + marker=dict(size=12, color='purple', symbol='diamond'), + showlegend=False, + xaxis='x1', + yaxis='y1', + + ) + ) fig=go.Figure( data=fig_data, layout=fig_layout From 86a81261cf5d35931635d76da37283137604840b Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 15:30:53 -0800 Subject: [PATCH 057/170] Update coupling plot to a linear axis --- helpers/plots.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/helpers/plots.py b/helpers/plots.py index 3532152..6afa289 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -136,6 +136,8 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ x_axis3 = dict( title='kappa', + type='linear', + range=[-3, 1.5] ) if display == 'kink_density': fig_layout = go.Layout( From 5a4983e2dee1a6ce026c826875f7aa89d43628d2 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 15:39:45 -0800 Subject: [PATCH 058/170] Added reset button in layout cards --- helpers/layouts_cards.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index afe00cf..d4d0278 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -108,12 +108,30 @@ def control_card( 'Simulation', style=control_header_style ), - dbc.Button( - 'Run', - id='btn_simulate', - color='primary', - className='me-1', - style={'marginTop':'5px'} + dbc.Row( + [ + dbc.Col( + dbc.Button( + 'Run', + id='btn_simulate', + color='primary', + className='me-2', # Adds spacing between buttons + style={'marginTop': '10px'} # Adds some vertical spacing + ), + width='auto' + ), + dbc.Col( + dbc.Button( + 'Reset', + id='btn_reset', + color='danger', + style={'marginTop': '10px'} + ), + width='auto' + ), + ], + justify='start', # Aligns buttons to the left + align='center' # Vertically centers buttons ), dbc.Progress( id='bar_job_status', From 5ef55487e752637d5b409506059e07fa8a7eef08 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 16:12:56 -0800 Subject: [PATCH 059/170] Implemented the corresponding call backs for reset button --- app.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index 617542c..2e4502d 100644 --- a/app.py +++ b/app.py @@ -218,6 +218,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): if trigger_id == 'qpu_selection': if qpu_name == 'mock_dwave_solver': + embeddings_cached = {} L = spins edges = [(i, (i + 1)%L) for i in range(L)] @@ -230,7 +231,13 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): for filename in [file for file in os.listdir('helpers') if '.json' in file and 'emb_' in file]: - + + # if qpu_name == 'mock_dwave_solver' and 'Advantage_system6.4' in filename: + # with open(f'helpers/{filename}', 'r') as fp: + # embeddings_cached = json.load(fp) + # print(filename) + # embeddings_cached = json_to_dict(embeddings_cached) + if qpu_name.split('.')[0] in filename: with open(f'helpers/{filename}', 'r') as fp: @@ -268,6 +275,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output('coupling_data', 'data'), # store data using dcc Output('zne_estimates', 'data'), # update zne_estimates Input('qpu_selection', 'value'), + Input('btn_reset', 'n_clicks'), Input('kz_graph_display', 'value'), State('coupling_strength', 'value'), # previously input Input('quench_schedule_filename', 'children'), @@ -280,7 +288,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State('coupling_data', 'data'), # access previously stored data State('zne_estimates', 'data'), # Access ZNE estimates ) -def display_graphics_kink_density(qpu_name, kz_graph_display, J, schedule_filename, \ +def display_graphics_kink_density(dummy, qpu_name, kz_graph_display, J, schedule_filename, \ job_submit_state, job_id, ta, \ spins, embeddings_cached, figure, coupling_data, zne_estimates): """Generate graphics for kink density based on theory and QPU samples.""" @@ -289,6 +297,14 @@ def display_graphics_kink_density(qpu_name, kz_graph_display, J, schedule_filena ta_min = 2 ta_max = 350 + if trigger_id == 'btn_reset': + coupling_data = {} + zne_estimates = {} + + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme) + + return fig, coupling_data, zne_estimates + if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme) From c6bd17edd130cab76f4cee4e4ae6debd429d5e91 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 16:18:39 -0800 Subject: [PATCH 060/170] Refactored the color dictionary to plots.py and removed them from plotting function argument --- app.py | 29 +++++------------------------ helpers/plots.py | 25 ++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/app.py b/app.py index 2e4502d..0411d44 100644 --- a/app.py +++ b/app.py @@ -45,25 +45,6 @@ # global variable for a default J value J_baseline = -1.8 -ta_color_theme = { - 5: '#1F77B4', # Dark Blue - 10: '#FF7F0E', # Dark Orange - 20: '#2CA02C', # Dark Green - 40: '#D62728', # Dark Red - 80: '#9467BD', # Dark Purple - 160: '#8C564B', # Brown - 320: '#E377C2', # Dark Pink -} -coupling_color_theme = { - -1.8: '#1F77B4', # Dark Blue - -1.6: '#FF7F0E', # Dark Orange - -1.4: '#E377C2', # Dark Pink - -1.2: '#2CA02C', # Dark Green - -1: '#D62728', # Dark Red - -0.8: '#9467BD', # Dark Purple - -0.6: '#8C564B', # Brown -} - # Initialize: available QPUs, initial progress-bar status try: client = Client.from_config(client='qpu') @@ -274,8 +255,8 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output('sample_vs_theory', 'figure'), Output('coupling_data', 'data'), # store data using dcc Output('zne_estimates', 'data'), # update zne_estimates - Input('qpu_selection', 'value'), Input('btn_reset', 'n_clicks'), + Input('qpu_selection', 'value'), Input('kz_graph_display', 'value'), State('coupling_strength', 'value'), # previously input Input('quench_schedule_filename', 'children'), @@ -301,13 +282,13 @@ def display_graphics_kink_density(dummy, qpu_name, kz_graph_display, J, schedule coupling_data = {} zne_estimates = {} - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) return fig, coupling_data, zne_estimates if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) return fig, coupling_data, zne_estimates @@ -320,7 +301,7 @@ def display_graphics_kink_density(dummy, qpu_name, kz_graph_display, J, schedule sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) _, kink_density = kink_stats(sampleset_unembedded, J) - fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J, ta_color_theme, coupling_color_theme) + fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J) # Calculate kappa kappa = lmbda(J) @@ -397,7 +378,7 @@ def display_graphics_kink_density(dummy, qpu_name, kz_graph_display, J, schedule return dash.no_update # use global J value - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme) + fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) return fig, coupling_data, zne_estimates @app.callback( diff --git a/helpers/plots.py b/helpers/plots.py index 6afa289..fc0bce0 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -20,8 +20,27 @@ from helpers.kz_calcs import theoretical_kink_density __all__ = ['plot_kink_densities_bg', 'plot_kink_density', 'plot_spin_orientation'] - -def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_data, zne_estimates, ta_color_theme, coupling_color_theme): + +ta_color_theme = { + 5: '#1F77B4', # Dark Blue + 10: '#FF7F0E', # Dark Orange + 20: '#2CA02C', # Dark Green + 40: '#D62728', # Dark Red + 80: '#9467BD', # Dark Purple + 160: '#8C564B', # Brown + 320: '#E377C2', # Dark Pink +} +coupling_color_theme = { + -1.8: '#1F77B4', # Dark Blue + -1.6: '#FF7F0E', # Dark Orange + -1.4: '#E377C2', # Dark Pink + -1.2: '#2CA02C', # Dark Green + -1: '#D62728', # Dark Red + -0.8: '#9467BD', # Dark Purple + -0.6: '#8C564B', # Brown +} + +def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_data, zne_estimates): """ Plot background of theoretical kink-density and QPU energy scales. @@ -308,7 +327,7 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, ta_color_theme, coupling_color_theme): +def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): """Add kink density from QPU samples to plot. Args: From c29a9d57ffca16252ffe5d82c60323a4c2c3fe19 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 22:41:17 -0800 Subject: [PATCH 061/170] Added documentation for ZNE demo --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index 7249794..af96dd3 100644 --- a/README.md +++ b/README.md @@ -201,6 +201,16 @@ Note that as you increase the anneal time, you move from the coherent regime and the returned samples are increasingly affected by thermalization, pushing the kink density away from the predicted value. +## Zero-Noise Extrapolation + +Another feature showcased in this demo is the result achieved in Quantum Error Mitigation. In this paper, we demonstrate a practical implementation of zero-noise extrapolation as a method of quantum error mitigation specifically used for quantum annealing. + + +For various coupling strengths at the same annealing time, we used a fitting function—quadratic for the Advantage solver and a multi-polynomial for the MockDwaveSampler—to calculate the theoretical zero-noise point. As the experiment runs for a longer time, we expect this zero-noise point to follow the same trend as the other data points. + +Experimental results + + ## Code Most the code related to configuring and analyzing the Ising problem is in the From 7339c7c1f977c78c98b5c3c6b1739574636c8b75 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 28 Nov 2024 23:00:06 -0800 Subject: [PATCH 062/170] Added labels in the coupling plot --- helpers/plots.py | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index fc0bce0..027d98d 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -214,21 +214,37 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ # Plot data points from 'coupling_data' for ta_str, data_points in coupling_data.items(): + label = False ta_value = float(ta_str) color = ta_color_theme[ta_value] for point in data_points: kappa = point['kappa'] kink_density = point['kink_density'] - fig_data.append( - go.Scatter( - x=[kappa], - y=[kink_density], - xaxis='x3', - yaxis='y1', - showlegend=False, - marker=dict(size=10, color=color, symbol='x') + if not label: + fig_data.append( + go.Scatter( + x=[kappa], + y=[kink_density], + xaxis='x3', + yaxis='y1', + mode='markers', + name=f"Anneal Time: {ta_value} ns", + showlegend=True, + marker=dict(size=10, color=color, symbol='x') + ) + ) + label = True + else: + fig_data.append( + go.Scatter( + x=[kappa], + y=[kink_density], + xaxis='x3', + yaxis='y1', + showlegend=False, + marker=dict(size=10, color=color, symbol='x') + ) ) - ) # Plot ZNE estimates for ta_str, a in zne_estimates.items(): fig_data.append( From a18480a0d0a00b56ce3fe037f1893b437d12595d Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 29 Nov 2024 13:39:48 -0800 Subject: [PATCH 063/170] Correct to imports and t_a scaling --- app.py | 12 +++++------- helpers/qa.py | 24 ++++++++++++++++++++---- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/app.py b/app.py index 0411d44..7dad7f7 100644 --- a/app.py +++ b/app.py @@ -15,14 +15,13 @@ import dash import dash_bootstrap_components as dbc from dash import html, Input, Output, State +from dash import dcc import datetime import json import numpy as np import os import warnings -from dash import dcc - import dimod from dwave.cloud import Client from dwave.embedding import embed_bqm, is_valid_embedding @@ -304,7 +303,7 @@ def display_graphics_kink_density(dummy, qpu_name, kz_graph_display, J, schedule fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J) # Calculate kappa - kappa = lmbda(J) + kappa = calc_kappa(J, J_baseline) # Initialize the list for this anneal_time if not present ta_str = str(ta) if ta_str not in coupling_data: @@ -325,11 +324,10 @@ def display_graphics_kink_density(dummy, qpu_name, kz_graph_display, J, schedule if len(np.unique(x)) > 1: # Fit a 1st degree polynomial (linear fit) if qpu_name == 'mock_dwave_solver': - warnings.warn('WIP: Execute for mock_sampler only') + # Fancy non-linear function y_func_x = fitted_function(x, y, method='mixture_of_exponentials') else: - warnings.warn('WIP: Execute for QPU only') - # Pure quadratic # y = a + b x^2 + # Pure quadratic (see paper) # y = a + b x^2 y_func_x = fitted_function(x, y, method='pure_quadratic') zne_estimates[ta_str] = y_func_x(0) @@ -454,7 +452,7 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, - annealing_time=lmbda(J)*ta_ns, # Changed to lambda calculations + annealing_time=calc_lambda(J, J_baseline)*(ta_ns/1000), auto_scale=False, answer_mode='raw', # Easier than accounting for num_occurrences num_reads=100, diff --git a/helpers/qa.py b/helpers/qa.py index e4e883f..69f07f6 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -13,18 +13,34 @@ # limitations under the License. import json +import numpy as np +from numpy.polynomial.polynomial import Polynomial +import scipy import dimod from dwave.cloud.api import exceptions, Problems from dwave.embedding import unembed_sampleset import minorminer -__all__ = ['lmbda', 'create_bqm', 'find_one_to_one_embedding', 'get_job_status', 'get_samples', +__all__ = ['calc_lambda', 'calc_kappa', 'create_bqm', 'find_one_to_one_embedding', 'get_job_status', 'get_samples', 'json_to_dict', 'fitted_function'] -def lmbda(coupling_strength): - return -1.8/coupling_strength +def calc_kappa(coupling_strength, J_baseline=-1.8): + """Downgraded energy scale, see paper. + + """ + return abs(J_baseline/coupling_strength) + +def calc_lambda(coupling_strength, J_baseline=-1.8): + """Time rescaling factor (relative to J_baseline) + + lambda is approximately linear in kappa (see paper). + kappa used as a placeholder (update later) + """ + kappa = calc_kappa(coupling_strength, J_baseline) + return kappa + def create_bqm(num_spins=512, coupling_strength=-1.4): """ @@ -216,4 +232,4 @@ def y_func_x(x): return sigmoidal_crossover(x, *p) else: raise ValueError('Unknown method') - return y_func_x \ No newline at end of file + return y_func_x From 30a14326cc12b2ed46a15d6bb64e59f7d881e942 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 29 Nov 2024 13:52:58 -0800 Subject: [PATCH 064/170] Correct quadratic fit: working well with QPU samplers --- helpers/qa.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helpers/qa.py b/helpers/qa.py index 69f07f6..701801b 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -169,12 +169,12 @@ def fitted_function(xdata, ydata, method=('polynomial', 1)): if type(method) is tuple and method[0] == 'polynomial': coeffs = Polynomial.fit(xdata, ydata, deg=method[1]).convert().coef def y_func_x(x): - return np.polyval(coeffs, x) + return np.polynomial.polynomial.polyval(x, coeffs) elif method == 'pure_quadratic': # y = a + b x**2 coeffs = Polynomial.fit(xdata**2, ydata, deg=1).convert().coef def y_func_x(x): - return np.polyval(coeffs, x**2) + return np.polynomial.polynomial.polyval(x**2, coeffs) elif method == 'mixture_of_exponentials': # The no thermal noise case has two sources. # Kink-probability(T=0, t) ~ A t^{-1/2} ~ (1 - tanh(beta_eff))/2 From df36e5337b4a4586d5c4b8936204ad509d11159c Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sun, 1 Dec 2024 12:40:00 -0800 Subject: [PATCH 065/170] Used black to reformat code --- helpers/layouts_cards.py | 408 +++++++++++++++---------------- helpers/layouts_components.py | 330 ++++++++++++++----------- helpers/plots.py | 440 +++++++++++++++++----------------- helpers/qa.py | 151 +++++++----- 4 files changed, 701 insertions(+), 628 deletions(-) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index d4d0278..1b92256 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -1,4 +1,4 @@ -# Copyright 2024 D-Wave +# Copyright 2024 D-Wave # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,20 +13,21 @@ # limitations under the License. from dash import dcc, html -import dash_bootstrap_components as dbc +import dash_bootstrap_components as dbc import plotly.graph_objects as go from helpers.layouts_components import * -__all__ = ['control_card', 'graphs_card', ] +__all__ = [ + "control_card", + "graphs_card", +] -control_header_style = {'color': 'rgb(3, 184, 255)', 'marginTop': '10px'} +control_header_style = {"color": "rgb(3, 184, 255)", "marginTop": "10px"} -def control_card( - solvers={}, - init_job_status='READY'): +def control_card(solvers={}, init_job_status="READY"): """Lay out the configuration and job-submission card. Args: @@ -37,217 +38,220 @@ def control_card( Returns: - Dash card. + Dash card. """ - if init_job_status == 'NO SOLVER': - job_status_color = 'red' - else: - job_status_color = 'white' + if init_job_status == "NO SOLVER": + job_status_color = "red" + else: + job_status_color = "white" - return dbc.Card([ - dbc.Row( - [ - dbc.Col( + return dbc.Card( + [ + dbc.Row( [ - html.H4( - 'Coherent Annealing: KZ Simulation', - className='card-title', - style={'color': 'rgb(243, 120, 32)'} - ), - html.P( -""" + dbc.Col( + [ + html.H4( + "Coherent Annealing: KZ Simulation", + className="card-title", + style={"color": "rgb(243, 120, 32)"}, + ), + html.P( + """ Use a quantum computer to simulate the formation of topological defects in a 1D ring of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. -""", - style={'color': 'white', 'fontSize': 14}), - html.H5( - 'Spins', - style=control_header_style - ), - html.Div([ - config_spins - ]), - html.H5( - 'Coupling Strength', - style=control_header_style - ), - html.Div([ - config_coupling_strength - ]), - html.H5( - 'Quench Duration [ns]', - style=control_header_style - ), - html.Div([ - config_anneal_duration - ]), - html.H5( - 'QPU', - style=control_header_style - ), - html.Div([ - config_qpu_selection(solvers), - ]), - html.P([ - 'Quench Schedule: ', - html.Span( - id='quench_schedule_filename', - children='', - style={'color': 'white', 'fontSize': 10} +""", + style={"color": "white", "fontSize": 14}, + ), + html.H5("Spins", style=control_header_style), + html.Div([config_spins]), + html.H5("Coupling Strength", style=control_header_style), + html.Div([config_coupling_strength]), + html.H5("Quench Duration [ns]", style=control_header_style), + html.Div([config_anneal_duration]), + html.H5("QPU", style=control_header_style), + html.Div( + [ + config_qpu_selection(solvers), + ] + ), + html.P( + [ + "Quench Schedule: ", + html.Span( + id="quench_schedule_filename", + children="", + style={"color": "white", "fontSize": 10}, + ), + ], + style={"color": "white", "marginTop": "10px"}, + ), + html.H5("Cached Embeddings", style=control_header_style), + embeddings, + html.H5("Simulation", style=control_header_style), + dbc.Row( + [ + dbc.Col( + dbc.Button( + "Run", + id="btn_simulate", + color="primary", + className="me-2", # Adds spacing between buttons + style={ + "marginTop": "10px" + }, # Adds some vertical spacing + ), + width="auto", + ), + dbc.Col( + dbc.Button( + "Reset", + id="btn_reset", + color="danger", + style={"marginTop": "10px"}, + ), + width="auto", + ), + ], + justify="start", # Aligns buttons to the left + align="center", # Vertically centers buttons + ), + dbc.Progress( + id="bar_job_status", + value=0, + color="link", + className="mb-3", + style={"width": "60%"}, + ), + html.P( + [ + "Status: ", + html.Span( + id="job_submit_state", + children=f"{init_job_status}", + style={ + "color": job_status_color, + "fontSize": 12, + "marginTop": "10px", + }, + ), + ], + style={"color": "white", "marginTop": "5px"}, + ), + html.P( + "Tooltips (hover over fields for descriptions)", + style={ + "color": "white", + "fontSize": 12, + "marginBottom": 5, + "marginTop": "10px", + }, + ), + tooltips_activate, + # Non-displayed section + dcc.Interval( + id="wd_job", + interval=None, + n_intervals=0, + disabled=True, + max_intervals=1, + ), + # Used for storing job status. Can probably be replaced with dcc.Store. + html.P( + id="job_submit_time", + children="", + style=dict(display="none"), + ), + html.P( + id="job_id", children="", style=dict(display="none") + ), + dcc.Store( + id="embeddings_cached", + storage_type="memory", + data={}, + ), + dcc.Store( + id="embeddings_found", + storage_type="memory", + data={}, + ), + ] ), ], - style={'color': 'white', 'marginTop': '10px'} - ), - html.H5( - 'Cached Embeddings', - style=control_header_style - ), - embeddings, - html.H5( - 'Simulation', - style=control_header_style - ), - dbc.Row( - [ - dbc.Col( - dbc.Button( - 'Run', - id='btn_simulate', - color='primary', - className='me-2', # Adds spacing between buttons - style={'marginTop': '10px'} # Adds some vertical spacing - ), - width='auto' - ), - dbc.Col( - dbc.Button( - 'Reset', - id='btn_reset', - color='danger', - style={'marginTop': '10px'} - ), - width='auto' - ), - ], - justify='start', # Aligns buttons to the left - align='center' # Vertically centers buttons - ), - dbc.Progress( - id='bar_job_status', - value=0, - color='link', - className='mb-3', - style={'width': '60%'} - ), - html.P([ - 'Status: ', - html.Span( - id='job_submit_state', - children=f'{init_job_status}', - style={'color': job_status_color, 'fontSize': 12, 'marginTop': '10px'} - ), - ], - style={'color': 'white', 'marginTop': '5px'} - ), - html.P('Tooltips (hover over fields for descriptions)', - style={'color': 'white', 'fontSize': 12, 'marginBottom': 5, 'marginTop': '10px'} - ), - tooltips_activate, - # Non-displayed section - dcc.Interval( - id='wd_job', - interval=None, - n_intervals=0, - disabled=True, - max_intervals=1 - ), - # Used for storing job status. Can probably be replaced with dcc.Store. - html.P( - id='job_submit_time', - children='', - style = dict(display='none') - ), - html.P( - id='job_id', - children='', - style = dict(display='none') - ), - dcc.Store( - id='embeddings_cached', - storage_type='memory', - data={}, - ), - dcc.Store( - id='embeddings_found', - storage_type='memory', - data={}, - ), - ] + id="tour_settings_row", ), - ], - id='tour_settings_row' - ), - ], - body=True, - color='dark', - style={'height': "100%", "minHeight": "50rem"}, + ], + body=True, + color="dark", + style={"height": "100%", "minHeight": "50rem"}, ) -graphic_header_style = {'color': 'rgb(243, 120, 32)', 'margin': '15px 0px 0px 15px', 'backgroundColor': 'white'} + +graphic_header_style = { + "color": "rgb(243, 120, 32)", + "margin": "15px 0px 0px 15px", + "backgroundColor": "white", +} + def graphs_card(): - return dbc.Card([ - dbc.Row([ - dbc.Col([ - html.H5( - 'Spin States of Qubits in a 1D Ring', - style=graphic_header_style - ), - ] + return dbc.Card( + [ + dbc.Row( + [ + dbc.Col( + [ + html.H5( + "Spin States of Qubits in a 1D Ring", + style=graphic_header_style, + ), + ] + ), + ] ), - ] - ), - dbc.Row([ - dbc.Col([ - dcc.Graph( - id='spin_orientation', - figure=go.Figure(), - style={'height': '40vh', "minHeight": "20rem"}, - ), - ], - width=12, + dbc.Row( + [ + dbc.Col( + [ + dcc.Graph( + id="spin_orientation", + figure=go.Figure(), + style={"height": "40vh", "minHeight": "20rem"}, + ), + ], + width=12, + ), + ], ), - ], - ), - dbc.Row([ - dbc.Col([ - html.H5( - 'QPU Samples Vs. Kibble-Zurek Prediction', - style=graphic_header_style - ), - html.Div([ - config_kz_graph + dbc.Row( + [ + dbc.Col( + [ + html.H5( + "QPU Samples Vs. Kibble-Zurek Prediction", + style=graphic_header_style, + ), + html.Div([config_kz_graph]), + ] + ), ] - ), - ] ), - ] - ), - dbc.Row([ - dbc.Col([ - dcc.Graph( - id='sample_vs_theory', - figure=go.Figure(), - style={'height': '40vh', "minHeight": "20rem"}, - - ) - ], - width=12 + dbc.Row( + [ + dbc.Col( + [ + dcc.Graph( + id="sample_vs_theory", + figure=go.Figure(), + style={"height": "40vh", "minHeight": "20rem"}, + ) + ], + width=12, + ), + ] ), - ] - ), - ], - color='white', - style={'height': "100%", "minHeight": "50rem"}, + ], + color="white", + style={"height": "100%", "minHeight": "50rem"}, ) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index f8a4c24..6ed6ac5 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -1,4 +1,4 @@ -# Copyright 2024 D-Wave +# Copyright 2024 D-Wave # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,199 +12,237 @@ # See the License for the specific language governing permissions and # limitations under the License. -import dash_bootstrap_components as dbc +import dash_bootstrap_components as dbc from dash.dcc import Checklist, Dropdown, Input, Link, RadioItems, Slider from dash import html, dcc -__all__ = ['config_anneal_duration', 'config_kz_graph', 'config_spins', - 'config_coupling_strength', 'config_qpu_selection', 'dbc_modal', 'embeddings', - 'job_bar_display', 'ring_lengths', 'tooltips_activate'] +__all__ = [ + "config_anneal_duration", + "config_kz_graph", + "config_spins", + "config_coupling_strength", + "config_qpu_selection", + "dbc_modal", + "embeddings", + "job_bar_display", + "ring_lengths", + "tooltips_activate", +] ring_lengths = [512, 1024, 2048] config_anneal_duration = dcc.Dropdown( - id='anneal_duration', + id="anneal_duration", options=[ - {'label': '5 ns', 'value': 5}, - {'label': '10 ns', 'value': 10}, - {'label': '20 ns', 'value': 20}, - {'label': '40 ns', 'value': 40}, - {'label': '80 ns', 'value': 80}, - {'label': '160 ns', 'value': 160}, - {'label': '320 ns', 'value': 320}, + {"label": "5 ns", "value": 5}, + {"label": "10 ns", "value": 10}, + {"label": "20 ns", "value": 20}, + {"label": "40 ns", "value": 40}, + {"label": "80 ns", "value": 80}, + {"label": "160 ns", "value": 160}, + {"label": "320 ns", "value": 320}, ], - value=5, # default value - style={'max-width': '95%'} + value=5, # default value + style={"max-width": "95%"}, ) config_kz_graph = RadioItems( - id='kz_graph_display', + id="kz_graph_display", options=[ - { - 'label': 'Both', - 'value': 'both', - 'disabled': False - }, - { - 'label': 'Kink density', - 'value': 'kink_density', - 'disabled': False - }, - { - 'label': 'Schedule', - 'value': 'schedule', - 'disabled': False - }, - { - 'label': 'Noise level (lambda)', - 'value': 'coupling', - 'disabled': False - } + {"label": "Both", "value": "both", "disabled": False}, + {"label": "Kink density", "value": "kink_density", "disabled": False}, + {"label": "Schedule", "value": "schedule", "disabled": False}, + {"label": "Noise level (lambda)", "value": "coupling", "disabled": False}, ], - value='both', - inputStyle={'margin-right': '10px', 'margin-bottom': '5px'}, - labelStyle={'color': 'rgb(3, 184, 255)', 'font-size': 12, 'display': 'inline-block', 'marginLeft': 20}, - inline=True, # Currently requires above 'inline-block' + value="both", + inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, + labelStyle={ + "color": "rgb(3, 184, 255)", + "font-size": 12, + "display": "inline-block", + "marginLeft": 20, + }, + inline=True, # Currently requires above 'inline-block' ) config_spins = RadioItems( - id='spins', + id="spins", options=[ - { - 'label': f'{length}', - 'value': length, - 'disabled': False - } for length in ring_lengths + {"label": f"{length}", "value": length, "disabled": False} + for length in ring_lengths ], value=512, - inputStyle={'margin-right': '10px', 'margin-bottom': '10px'}, - labelStyle={'color': 'white', 'font-size': 12, 'display': 'inline-block', 'marginLeft': 20}, - inline=True, # Currently requires above 'inline-block' + inputStyle={"margin-right": "10px", "margin-bottom": "10px"}, + labelStyle={ + "color": "white", + "font-size": 12, + "display": "inline-block", + "marginLeft": 20, + }, + inline=True, # Currently requires above 'inline-block' ) -j_marks = {round(0.1*val, 1): {'label': f'{round(0.1*val, 1)}', 'style': {'color': 'blue'}} if - round(0.1*val, 0) != 0.1*val else - {'label': f'{round(0.1*val)}', 'style': {'color': 'blue'}} - for val in range(-18, 0, 2)} -j_marks.update({round(0.1*val, 1): {'label': f'{round(0.1*val, 1)}', 'style': {'color': 'red'}} if - round(0.1*val, 0) != 0.1*val else - {'label': f'{round(0.1*val)}', 'style': {'color': 'red'}} - for val in range(2, 10, 2)}) +j_marks = { + round(0.1 * val, 1): ( + {"label": f"{round(0.1*val, 1)}", "style": {"color": "blue"}} + if round(0.1 * val, 0) != 0.1 * val + else {"label": f"{round(0.1*val)}", "style": {"color": "blue"}} + ) + for val in range(-18, 0, 2) +} +j_marks.update( + { + round(0.1 * val, 1): ( + {"label": f"{round(0.1*val, 1)}", "style": {"color": "red"}} + if round(0.1 * val, 0) != 0.1 * val + else {"label": f"{round(0.1*val)}", "style": {"color": "red"}} + ) + for val in range(2, 10, 2) + } +) # Dash Slider has some issue with int values having a zero after the decimal point -j_marks[-2] = {'label': '-2', 'style': {'color': 'blue'}} +j_marks[-2] = {"label": "-2", "style": {"color": "blue"}} del j_marks[-1.0] -j_marks[-1] = {'label': '-1', 'style': {'color': 'blue'}} -j_marks[1] = {'label': '1', 'style': {'color': 'red'}} -config_coupling_strength = dbc.Row([ - dbc.Col( - html.Div([ - Slider( - id='coupling_strength', - value=-1.4, - marks=j_marks, - step=None, - min=-1.8, - max=-0.6 - ) - ]), - ), -]) +j_marks[-1] = {"label": "-1", "style": {"color": "blue"}} +j_marks[1] = {"label": "1", "style": {"color": "red"}} +config_coupling_strength = dbc.Row( + [ + dbc.Col( + html.Div( + [ + Slider( + id="coupling_strength", + value=-1.4, + marks=j_marks, + step=None, + min=-1.8, + max=-0.6, + ) + ] + ), + ), + ] +) -def config_qpu_selection(solvers, default='mock_dwave_solver'): - default = 'mock_dwave_solver' if 'mock_dwave_solver' in solvers else None +def config_qpu_selection(solvers, default="mock_dwave_solver"): + default = "mock_dwave_solver" if "mock_dwave_solver" in solvers else None return Dropdown( - id='qpu_selection', - options=[{'label': qpu_name, 'value': qpu_name} for qpu_name in solvers], - placeholder='Select a quantum computer', - #value=default + id="qpu_selection", + options=[{"label": qpu_name, "value": qpu_name} for qpu_name in solvers], + placeholder="Select a quantum computer", + # value=default ) + job_bar_display = { - 'READY': [0, 'link'], - 'EMBEDDING': [20, 'warning'], - 'NO SOLVER': [100, 'danger'], - 'SUBMITTED': [40, 'info'], - 'PENDING': [60, 'primary'], - 'IN_PROGRESS': [85 ,'dark'], - 'COMPLETED': [100, 'success'], - 'CANCELLED': [100, 'light'], - 'FAILED': [100, 'danger'], + "READY": [0, "link"], + "EMBEDDING": [20, "warning"], + "NO SOLVER": [100, "danger"], + "SUBMITTED": [40, "info"], + "PENDING": [60, "primary"], + "IN_PROGRESS": [85, "dark"], + "COMPLETED": [100, "success"], + "CANCELLED": [100, "light"], + "FAILED": [100, "danger"], } modal_texts = { - 'solver': ["Leap's Quantum Computers Inaccessible", - [ - html.Div([ - html.Div('Could not connect to a Leap quantum computer.'), - html.Div([ -""" + "solver": [ + "Leap's Quantum Computers Inaccessible", + [ + html.Div( + [ + html.Div("Could not connect to a Leap quantum computer."), + html.Div( + [ + """ If you are running locally, set environment variables or a dwave-cloud-client configuration file as described in the """, - Link(children=[html.Div(' Ocean')], - href='https://docs.ocean.dwavesys.com/en/stable/overview/sapi.html', - style={'display':'inline-block'}), - 'documentation.'], - style={'display':'inline-block'}), - html.Div(['If you are running in an online IDE, see the ', - Link(children=[html.Div('system documentation')], - href='https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html', - style={'display':'inline-block'}), - ' on supported IDEs.'], - style={'display':'inline-block'}),])] + Link( + children=[html.Div(" Ocean")], + href="https://docs.ocean.dwavesys.com/en/stable/overview/sapi.html", + style={"display": "inline-block"}, + ), + "documentation.", + ], + style={"display": "inline-block"}, + ), + html.Div( + [ + "If you are running in an online IDE, see the ", + Link( + children=[html.Div("system documentation")], + href="https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html", + style={"display": "inline-block"}, + ), + " on supported IDEs.", + ], + style={"display": "inline-block"}, + ), + ] + ) + ], ], } + def dbc_modal(name): - name = name.split('_')[1] + name = name.split("_")[1] return [ - html.Div([ - dbc.Modal([ - dbc.ModalHeader( - dbc.ModalTitle( - modal_texts[name][0] - ) - ), - dbc.ModalBody( - modal_texts[name][1] - ), - ], - id=f'{name}_modal', size='sm') - ]) - ] + html.Div( + [ + dbc.Modal( + [ + dbc.ModalHeader(dbc.ModalTitle(modal_texts[name][0])), + dbc.ModalBody(modal_texts[name][1]), + ], + id=f"{name}_modal", + size="sm", + ) + ] + ) + ] + embeddings = Checklist( - options=[{ - 'label': - html.Div([ - f'{length}'], - style={'color': 'white', 'font-size': 10, 'marginRight': 10} - ), - 'value': length, - 'disabled': True, - } for length in ring_lengths], - value=[], - id=f'embedding_is_cached', - style={'color': 'white'}, - inline=True + options=[ + { + "label": html.Div( + [f"{length}"], + style={"color": "white", "font-size": 10, "marginRight": 10}, + ), + "value": length, + "disabled": True, + } + for length in ring_lengths + ], + value=[], + id=f"embedding_is_cached", + style={"color": "white"}, + inline=True, ) -tooltips_activate = RadioItems( - id='tooltips_show', +tooltips_activate = RadioItems( + id="tooltips_show", options=[ { - 'label': 'On', - 'value': 'on', + "label": "On", + "value": "on", }, { - 'label': 'Off', - 'value': 'off', - } + "label": "Off", + "value": "off", + }, ], - value='on', - inputStyle={'margin-right': '10px', 'margin-bottom': '10px'}, - labelStyle={'color': 'white', 'font-size': 12, 'display': 'inline-block', 'marginLeft': 20}, - inline=True, # Currently requires above 'inline-block' + value="on", + inputStyle={"margin-right": "10px", "margin-bottom": "10px"}, + labelStyle={ + "color": "white", + "font-size": 12, + "display": "inline-block", + "marginLeft": 20, + }, + inline=True, # Currently requires above 'inline-block' ) - diff --git a/helpers/plots.py b/helpers/plots.py index 027d98d..daaff15 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -19,146 +19,147 @@ from helpers.kz_calcs import theoretical_kink_density -__all__ = ['plot_kink_densities_bg', 'plot_kink_density', 'plot_spin_orientation'] +__all__ = ["plot_kink_densities_bg", "plot_kink_density", "plot_spin_orientation"] ta_color_theme = { - 5: '#1F77B4', # Dark Blue - 10: '#FF7F0E', # Dark Orange - 20: '#2CA02C', # Dark Green - 40: '#D62728', # Dark Red - 80: '#9467BD', # Dark Purple - 160: '#8C564B', # Brown - 320: '#E377C2', # Dark Pink + 5: "#1F77B4", # Dark Blue + 10: "#FF7F0E", # Dark Orange + 20: "#2CA02C", # Dark Green + 40: "#D62728", # Dark Red + 80: "#9467BD", # Dark Purple + 160: "#8C564B", # Brown + 320: "#E377C2", # Dark Pink } coupling_color_theme = { - -1.8: '#1F77B4', # Dark Blue - -1.6: '#FF7F0E', # Dark Orange - -1.4: '#E377C2', # Dark Pink - -1.2: '#2CA02C', # Dark Green - -1: '#D62728', # Dark Red - -0.8: '#9467BD', # Dark Purple - -0.6: '#8C564B', # Brown + -1.8: "#1F77B4", # Dark Blue + -1.6: "#FF7F0E", # Dark Orange + -1.4: "#E377C2", # Dark Pink + -1.2: "#2CA02C", # Dark Green + -1: "#D62728", # Dark Red + -0.8: "#9467BD", # Dark Purple + -0.6: "#8C564B", # Brown } -def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_data, zne_estimates): + +def plot_kink_densities_bg( + display, time_range, J_base, schedule_name, coupling_data, zne_estimates +): """ - Plot background of theoretical kink-density and QPU energy scales. + Plot background of theoretical kink-density and QPU energy scales. Args: - display: Displays plots of type "both", "kink_density", or "schedule". + display: Displays plots of type "both", "kink_density", or "schedule". time_range: Maximum and minimum quench times, as a list. coupling_strength: Coupling strength between spins in ring. schedule_name: Filename of anneal schedule. - + Returns: Plotly figure of predicted kink densities and/or QPU energy scales. """ if schedule_name: - schedule = pd.read_csv(f'helpers/{schedule_name}') + schedule = pd.read_csv(f"helpers/{schedule_name}") else: - schedule = pd.read_csv('helpers/FALLBACK_SCHEDULE.csv') + schedule = pd.read_csv("helpers/FALLBACK_SCHEDULE.csv") - A_ghz = schedule['A(s) (GHz)'] - B_ghz = schedule['B(s) (GHz)'] - s = schedule['s'] + A_ghz = schedule["A(s) (GHz)"] + B_ghz = schedule["B(s) (GHz)"] + s = schedule["s"] # Display in Joule - A_joule = A_ghz/1.5092E24 - B_joule = B_ghz/1.5092E24 + A_joule = A_ghz / 1.5092e24 + B_joule = B_ghz / 1.5092e24 n = theoretical_kink_density(time_range, J_base, schedule, schedule_name) - + predicted_plus = go.Scatter( - x=np.asarray(time_range), + x=np.asarray(time_range), y=np.asarray(1.1 * n), - mode='lines', - name='Predicted (±10%)', - xaxis='x1', - yaxis='y1', - line_color='black', + mode="lines", + name="Predicted (±10%)", + xaxis="x1", + yaxis="y1", + line_color="black", line_width=1, ) - + predicted_minus = go.Scatter( - x=np.asarray(time_range), + x=np.asarray(time_range), y=np.asarray(0.90 * n), - mode='lines', - xaxis='x1', - yaxis='y1', - line_color='black', + mode="lines", + xaxis="x1", + yaxis="y1", + line_color="black", line_width=1, - fill='tonexty', - fillcolor='white', + fill="tonexty", + fillcolor="white", showlegend=False, ) - - x_axis = 'x2' - y_axis = 'y2' + + x_axis = "x2" + y_axis = "y2" opacity = 0.15 - if display == 'schedule': - x_axis = 'x1' - y_axis = 'y1' + if display == "schedule": + x_axis = "x1" + y_axis = "y1" opacity = 1 energy_transverse = go.Scatter( - x=s, - y=A_joule, - mode='lines', - name='A(s)', + x=s, + y=A_joule, + mode="lines", + name="A(s)", xaxis=x_axis, yaxis=y_axis, - line_color='blue', + line_color="blue", opacity=opacity, ) energy_problem = go.Scatter( - x=s, - y=abs(J_base) * B_joule, - mode='lines', - name='B(s)', + x=s, + y=abs(J_base) * B_joule, + mode="lines", + name="B(s)", xaxis=x_axis, yaxis=y_axis, - line_color='red', + line_color="red", opacity=opacity, ) x_axis1 = dict( - title='Quench Duration [ns]', - type='log', - range=[np.log10(time_range[0] - 1), np.log10(time_range[1] + 10)], + title="Quench Duration [ns]", + type="log", + range=[np.log10(time_range[0] - 1), np.log10(time_range[1] + 10)], ) y_axis1 = dict( - title='Kink Density', - type='log', + title="Kink Density", + type="log", ) x_axis2 = dict( title={ - 'text': 'Normalized Fast-Anneal Fraction, s', - 'standoff': 0, - }, - side='top' if display != 'schedule' else 'bottom', - type='log' if display != 'schedule' else 'linear', - range=[-1, 0] if display != 'schedule' else [0, 1], # Minimal s=0.1 for log seems reasonable + "text": "Normalized Fast-Anneal Fraction, s", + "standoff": 0, + }, + side="top" if display != "schedule" else "bottom", + type="log" if display != "schedule" else "linear", + range=( + [-1, 0] if display != "schedule" else [0, 1] + ), # Minimal s=0.1 for log seems reasonable ) - + y_axis2 = dict( - title='Energy [Joule]', - side='right' if display != 'schedule' else 'left', - type='linear', + title="Energy [Joule]", + side="right" if display != "schedule" else "left", + type="linear", ) - x_axis3 = dict( - title='kappa', - type='linear', - range=[-3, 1.5] - ) - if display == 'kink_density': + x_axis3 = dict(title="kappa", type="linear", range=[-3, 1.5]) + if display == "kink_density": fig_layout = go.Layout( xaxis=x_axis1, yaxis=y_axis1, @@ -166,36 +167,34 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ fig_data = [predicted_plus, predicted_minus] for ta_str, data_points in coupling_data.items(): for point in data_points: - color = coupling_color_theme[point['coupling_strength']] - kink_density = point['kink_density'] + color = coupling_color_theme[point["coupling_strength"]] + kink_density = point["kink_density"] fig_data.append( go.Scatter( - x=[ta_str], - y=[kink_density], - xaxis='x1', - yaxis='y1', - showlegend=False, - marker=dict(size=10, color=color, symbol='x') - + x=[ta_str], + y=[kink_density], + xaxis="x1", + yaxis="y1", + showlegend=False, + marker=dict(size=10, color=color, symbol="x"), ) ) # Plot ZNE estimates for ta_str, a in zne_estimates.items(): fig_data.append( go.Scatter( - x=[ta_str], - y=[a], - mode='markers', - name='ZNE Estimate', - marker=dict(size=12, color='purple', symbol='diamond'), - showlegend=False, - xaxis='x1', - yaxis='y1', - + x=[ta_str], + y=[a], + mode="markers", + name="ZNE Estimate", + marker=dict(size=12, color="purple", symbol="diamond"), + showlegend=False, + xaxis="x1", + yaxis="y1", ) ) - elif display == 'schedule': + elif display == "schedule": fig_layout = go.Layout( xaxis=x_axis2, @@ -203,7 +202,7 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ ) fig_data = [energy_transverse, energy_problem] - elif display == 'coupling': + elif display == "coupling": fig_layout = go.Layout( xaxis=x_axis3, @@ -218,19 +217,19 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ ta_value = float(ta_str) color = ta_color_theme[ta_value] for point in data_points: - kappa = point['kappa'] - kink_density = point['kink_density'] + kappa = point["kappa"] + kink_density = point["kink_density"] if not label: fig_data.append( go.Scatter( x=[kappa], y=[kink_density], - xaxis='x3', - yaxis='y1', - mode='markers', + xaxis="x3", + yaxis="y1", + mode="markers", name=f"Anneal Time: {ta_value} ns", showlegend=True, - marker=dict(size=10, color=color, symbol='x') + marker=dict(size=10, color=color, symbol="x"), ) ) label = True @@ -239,10 +238,10 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ go.Scatter( x=[kappa], y=[kink_density], - xaxis='x3', - yaxis='y1', + xaxis="x3", + yaxis="y1", showlegend=False, - marker=dict(size=10, color=color, symbol='x') + marker=dict(size=10, color=color, symbol="x"), ) ) # Plot ZNE estimates @@ -251,19 +250,19 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ go.Scatter( x=[0], y=[a], - mode='markers', - name='ZNE Estimate', - marker=dict(size=12, color='purple', symbol='diamond'), + mode="markers", + name="ZNE Estimate", + marker=dict(size=12, color="purple", symbol="diamond"), showlegend=False, - xaxis='x3', - yaxis='y1', + xaxis="x3", + yaxis="y1", ) ) - else: # Display both plots together + else: # Display both plots together - x_axis2.update({'overlaying': 'x1'}) - y_axis2.update({'overlaying': 'y1'}) + x_axis2.update({"overlaying": "x1"}) + y_axis2.update({"overlaying": "y1"}) fig_layout = go.Layout( xaxis=x_axis1, @@ -275,141 +274,134 @@ def plot_kink_densities_bg(display, time_range, J_base, schedule_name, coupling_ fig_data = [predicted_plus, predicted_minus, energy_transverse, energy_problem] for ta_str, data_points in coupling_data.items(): for point in data_points: - color = 'black' - kink_density = point['kink_density'] + color = "black" + kink_density = point["kink_density"] fig_data.append( go.Scatter( - x=[ta_str], - y=[kink_density], - xaxis='x1', - yaxis='y1', - showlegend=False, - marker=dict(size=10, color=color, symbol='x') - + x=[ta_str], + y=[kink_density], + xaxis="x1", + yaxis="y1", + showlegend=False, + marker=dict(size=10, color=color, symbol="x"), ) ) # Plot ZNE estimates for ta_str, a in zne_estimates.items(): fig_data.append( go.Scatter( - x=[ta_str], - y=[a], - mode='markers', - name='ZNE Estimate', - marker=dict(size=12, color='purple', symbol='diamond'), - showlegend=False, - xaxis='x1', - yaxis='y1', - + x=[ta_str], + y=[a], + mode="markers", + name="ZNE Estimate", + marker=dict(size=12, color="purple", symbol="diamond"), + showlegend=False, + xaxis="x1", + yaxis="y1", ) ) - fig=go.Figure( - data=fig_data, - layout=fig_layout - ) + fig = go.Figure(data=fig_data, layout=fig_layout) - fig.update_layout( - legend=dict(x=0.7, y=0.9), - margin=dict(b=5,l=5,r=20,t=10) - ) + fig.update_layout(legend=dict(x=0.7, y=0.9), margin=dict(b=5, l=5, r=20, t=10)) - if display != 'schedule' and display != 'coupling': + if display != "schedule" and display != "coupling": fig.add_annotation( - xref='x', - yref='y', - x=np.log10(0.25*(time_range[1])), - y=np.log10(1.0*n.min()), - text='Coherent', - axref='x', - ayref='y', - ax=np.log10(0.50*(time_range[1])), - ay=np.log10(1.0*n.min()), + xref="x", + yref="y", + x=np.log10(0.25 * (time_range[1])), + y=np.log10(1.0 * n.min()), + text="Coherent", + axref="x", + ayref="y", + ax=np.log10(0.50 * (time_range[1])), + ay=np.log10(1.0 * n.min()), arrowhead=5, ) - + fig.add_annotation( - xref='x', - yref='y', - x=np.log10(0.5*(time_range[1])), - y=np.log10(1.2*n.min()), - text='Thermalized', - axref='x', - ayref='y', - ax=np.log10(0.3*(time_range[1])), - ay=np.log10(1.2*n.min()), + xref="x", + yref="y", + x=np.log10(0.5 * (time_range[1])), + y=np.log10(1.2 * n.min()), + text="Thermalized", + axref="x", + ayref="y", + ax=np.log10(0.3 * (time_range[1])), + ay=np.log10(1.2 * n.min()), arrowhead=5, ) return fig + def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): """Add kink density from QPU samples to plot. Args: - display: Displays plots of type "both", "kink_density", or "schedule". + display: Displays plots of type "both", "kink_density", or "schedule". fig_dict: Existing background Plotly figure, as a dict. kink_density: Calculated kink density derived from QPU sample set. anneal_time: Anneal time used for the kink density. - + Returns: Updated Plotly figure with a marker at (anneal time, kink-density). """ - if display == 'schedule': + if display == "schedule": return no_update - - fig=go.Figure( - fig_dict - ) + + fig = go.Figure(fig_dict) ta_value = float(anneal_time) - - - if display == 'coupling': + + if display == "coupling": color = ta_color_theme[ta_value] - kappa = -1.8/J + kappa = -1.8 / J fig.add_trace( go.Scatter( - x=[kappa], - y=[kink_density], - xaxis='x3', - yaxis='y1', + x=[kappa], + y=[kink_density], + xaxis="x3", + yaxis="y1", showlegend=False, - marker=dict(size=10, - color=color, - symbol='x', - ) + marker=dict( + size=10, + color=color, + symbol="x", + ), ) ) return fig - - if display == 'kink_density': + + if display == "kink_density": color = coupling_color_theme[J] else: - color = 'black' + color = "black" fig.add_trace( go.Scatter( - x=[anneal_time], - y=[kink_density], - xaxis='x1', - yaxis='y1', + x=[anneal_time], + y=[kink_density], + xaxis="x1", + yaxis="y1", showlegend=False, - marker=dict(size=10, - color=color, - symbol='x', - ) + marker=dict( + size=10, + color=color, + symbol="x", + ), ) ) return fig + def plot_spin_orientation(num_spins=512, sample=None): - """Plot the ring of spins. + """Plot the ring of spins. Args: num_spins: Number of spins in the ring. @@ -419,8 +411,8 @@ def plot_spin_orientation(num_spins=512, sample=None): Returns: Plotly figure of orientation for all spins in the ring. """ - - cone_size = 0.5 # Based on how it looks + + cone_size = 0.5 # Based on how it looks z = np.linspace(0, 10, num_spins) x, y = z * np.cos(5 * z), z * np.sin(5 * z) @@ -436,71 +428,71 @@ def plot_spin_orientation(num_spins=512, sample=None): cones_blue = ~cones_red num_cones_red = np.count_nonzero(cones_red) num_cones_blue = num_spins - num_cones_red - + spins_up = go.Cone( - x = x[cones_red], - y = y[cones_red], - z = z[cones_red], - u=num_cones_red*[0], - v=num_cones_red*[0], - w=num_cones_red*[1], + x=x[cones_red], + y=y[cones_red], + z=z[cones_red], + u=num_cones_red * [0], + v=num_cones_red * [0], + w=num_cones_red * [1], showlegend=False, showscale=False, - colorscale=[[0, 'red'], [1, 'red']], + colorscale=[[0, "red"], [1, "red"]], hoverinfo=None, - sizemode='raw', - sizeref=cone_size + sizemode="raw", + sizeref=cone_size, ) spins_down = go.Cone( x=x[cones_blue], y=y[cones_blue], z=z[cones_blue], - u=num_cones_blue*[0], - v=num_cones_blue*[0], - w=num_cones_blue*[-1], + u=num_cones_blue * [0], + v=num_cones_blue * [0], + w=num_cones_blue * [-1], showlegend=False, showscale=False, - colorscale=[[0, 'blue'], [1, 'blue']], + colorscale=[[0, "blue"], [1, "blue"]], hoverinfo=None, - sizemode='raw', - sizeref=cone_size + sizemode="raw", + sizeref=cone_size, ) fig = go.Figure( data=[spins_up, spins_down], layout=go.Layout( showlegend=False, - margin=dict(b=0,l=0,r=0,t=40), + margin=dict(b=0, l=0, r=0, t=40), scene=dict( xaxis=dict( - showticklabels=False, + showticklabels=False, visible=False, ), yaxis=dict( - showticklabels=False, + showticklabels=False, visible=False, ), zaxis=dict( - showticklabels=False, + showticklabels=False, visible=False, ), - camera_eye=dict(x=0.15, y=1.25, z=0.15) - ) - ) + camera_eye=dict(x=0.15, y=1.25, z=0.15), + ), + ), ) fig.add_layout_image( dict( - source='assets/spin_states.png', - xref='paper', - yref='paper', - x=0.95, + source="assets/spin_states.png", + xref="paper", + yref="paper", + x=0.95, y=0.05, - sizex=0.4, + sizex=0.4, sizey=0.4, - xanchor='right', - yanchor='bottom', + xanchor="right", + yanchor="bottom", ) ) diff --git a/helpers/qa.py b/helpers/qa.py index e4e883f..b42127e 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -12,23 +12,36 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json +import json import dimod from dwave.cloud.api import exceptions, Problems from dwave.embedding import unembed_sampleset import minorminer -__all__ = ['lmbda', 'create_bqm', 'find_one_to_one_embedding', 'get_job_status', 'get_samples', - 'json_to_dict', 'fitted_function'] +import numpy as np +from numpy.polynomial.polynomial import Polynomial +import warnings +import scipy + +__all__ = [ + "lmbda", + "create_bqm", + "find_one_to_one_embedding", + "get_job_status", + "get_samples", + "json_to_dict", + "fitted_function", +] def lmbda(coupling_strength): - return -1.8/coupling_strength + return -1.8 / coupling_strength + def create_bqm(num_spins=512, coupling_strength=-1.4): """ - Create a binary quadratic model (BQM) representing a magnetic 1D ring. + Create a binary quadratic model (BQM) representing a magnetic 1D ring. Args: num_spins: Number of spins in the ring. @@ -36,46 +49,48 @@ def create_bqm(num_spins=512, coupling_strength=-1.4): coupling_strength: Coupling strength between spins in the ring. Returns: - dimod BQM. + dimod BQM. """ - bqm = dimod.BinaryQuadraticModel(vartype='SPIN') + bqm = dimod.BinaryQuadraticModel(vartype="SPIN") for spin in range(num_spins): bqm.add_quadratic(spin, (spin + 1) % num_spins, coupling_strength) - + return bqm + def find_one_to_one_embedding(spins, sampler_edgelist): """ - Find an embedding with chains of length one for the ring of spins. + Find an embedding with chains of length one for the ring of spins. Args: - spins: Number of spins. + spins: Number of spins. - sampler_edgelist: Edges (couplers) of the QPU. + sampler_edgelist: Edges (couplers) of the QPU. Returns: - Embedding, as a dict of format {spin: [qubit]}. + Embedding, as a dict of format {spin: [qubit]}. """ bqm = create_bqm(spins) - for _ in range(5): # 4 out of 5 times will find an embedding + for _ in range(5): # 4 out of 5 times will find an embedding - embedding = minorminer.find_embedding(bqm.quadratic, sampler_edgelist) + embedding = minorminer.find_embedding(bqm.quadratic, sampler_edgelist) if max(len(val) for val in embedding.values()) == 1: return embedding - + return {} + def get_job_status(client, job_id, job_submit_time): """Return status of a submitted job. Args: - client: dwave-cloud-client Client instance. + client: dwave-cloud-client Client instance. - job_id: Identification string of the job. + job_id: Identification string of the job. job_submit_time: Clock time of submission for identification. @@ -83,8 +98,8 @@ def get_job_status(client, job_id, job_submit_time): Embedding, as a dict of format ``{spin: [qubit]}``. """ - if '"type": "SampleSet"' in job_id and job_submit_time == 'SA': - return 'COMPLETED' + if '"type": "SampleSet"' in job_id and job_submit_time == "SA": + return "COMPLETED" else: p = Problems.from_config(client.config) @@ -92,74 +107,83 @@ def get_job_status(client, job_id, job_submit_time): try: status = p.get_problem_status(job_id) - label_time = dict(status)['label'].split('submitted: ')[1] + label_time = dict(status)["label"].split("submitted: ")[1] if label_time == job_submit_time: return status.status.value - + return None - + except exceptions.ResourceNotFoundError: return None + def get_samples(client, job_id, num_spins, J, embedding): - """Retrieve an unembedded sample set for a given job ID. + """Retrieve an unembedded sample set for a given job ID. Args: - client: dwave-cloud-client Client instance. + client: dwave-cloud-client Client instance. - job_id: Identification string of the job. + job_id: Identification string of the job. num_spins: Number of spins in the ring. coupling_strength: Coupling strength between spins in the ring. - qpu_name: Name of the quantum computer the job was submitted to. + qpu_name: Name of the quantum computer the job was submitted to. - embedding: Embedding used for the job. + embedding: Embedding used for the job. Returns: - Unembedded dimod sample set. + Unembedded dimod sample set. """ - + bqm = create_bqm(num_spins=num_spins, coupling_strength=J) if '"type": "SampleSet"' in job_id: # See modifications to submit_job sampleset = dimod.SampleSet.from_serializable(json.loads(job_id)) else: sampleset = client.retrieve_answer(job_id).sampleset - - return unembed_sampleset(sampleset, embedding, bqm) + + return unembed_sampleset(sampleset, embedding, bqm) + def json_to_dict(emb_json): - """Retrieve an unembedded sampleset for a given job ID. + """Retrieve an unembedded sampleset for a given job ID. Args: - emb_json: JSON-formatted dict of embeddings, as - {'spins': {'node1': [qubit1], 'node2': [qubit2], ...}, ...}. - + emb_json: JSON-formatted dict of embeddings, as + {'spins': {'node1': [qubit1], 'node2': [qubit2], ...}, ...}. + Returns: Embedding in standard dict format. """ - return {int(key): {int(node): qubits for node, qubits in emb.items()} - for key, emb in emb_json.items()} + return { + int(key): {int(node): qubits for node, qubits in emb.items()} + for key, emb in emb_json.items() + } + -def fitted_function(xdata, ydata, method=('polynomial', 1)): +def fitted_function(xdata, ydata, method=("polynomial", 1)): """ """ - if type(method) is tuple and method[0] == 'polynomial': + if type(method) is tuple and method[0] == "polynomial": coeffs = Polynomial.fit(xdata, ydata, deg=method[1]).convert().coef + def y_func_x(x): return np.polyval(coeffs, x) - elif method == 'pure_quadratic': + + elif method == "pure_quadratic": # y = a + b x**2 coeffs = Polynomial.fit(xdata**2, ydata, deg=1).convert().coef + def y_func_x(x): return np.polyval(coeffs, x**2) - elif method == 'mixture_of_exponentials': + + elif method == "mixture_of_exponentials": # The no thermal noise case has two sources. # Kink-probability(T=0, t) ~ A t^{-1/2} ~ (1 - tanh(beta_eff))/2 # Kink-probability(T, Inf) ~ (1 - tanh(beta J))/2 @@ -170,22 +194,26 @@ def mixture_of_exponentials(x, p_0, p_1, p_2): # Strictly positive form. # To do: Change to force saturation. Large x should go sigmoidally # towards 0.5 - return np.exp(p_2)/2*(1 + np.exp(p_1 + np.exp(p_0)*x)) + return np.exp(p_2) / 2 * (1 + np.exp(p_1 + np.exp(p_0) * x)) + # Take p_1 = 1; p_2 = min(x); take max(y) occurs at max(x) maxy = np.max(ydata) maxx = np.max(xdata) miny = np.min(ydata) - p0 = [np.log(np.log(2*maxy/miny - 1)/(maxx-1)), 0, np.log(miny)] + p0 = [np.log(np.log(2 * maxy / miny - 1) / (maxx - 1)), 0, np.log(miny)] try: p, _ = scipy.optimize.curve_fit( - f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0) + f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0 + ) except: - warnings.warn('Should modify to check exception is no solution') + warnings.warn("Should modify to check exception is no solution") p = p0 + def y_func_x(x): return mixture_of_exponentials(x, *p) - elif method == 'sigmoidal_crossover': - # Kink-probability(T, t) ~ sigmoidal crossover. + + elif method == "sigmoidal_crossover": + # Kink-probability(T, t) ~ sigmoidal crossover. # Better? Requires atleast 4 points! Not tested. # Sigmoidal cross-over between two positive limits. # This type of function is quite difficult to fit. @@ -193,7 +221,10 @@ def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): # Strictly positive form. # To do: Change to force saturation. Large x should go sigmoidally # towards 0.5 - return np.exp(p_3)*(1 + np.exp(p_2)*np.tanh(np.exp(p_1)*(x - np.exp(p_0)))) + return np.exp(p_3) * ( + 1 + np.exp(p_2) * np.tanh(np.exp(p_1) * (x - np.exp(p_0))) + ) + # Small lp1 << lp0, and lp0= (maxx-minxx)/2; We can linearize: # lp3*(1 + lp2( lp1 x - lp0)) = lp0*lp2*lp3 + lp1*lp2*lp3 x # WIP # lp2 = lp3: equal parts constant and crossover @@ -202,18 +233,26 @@ def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): maxy = np.max(ydata) maxx = np.max(xdata) miny = np.min(ydata) - lp0 = (maxx+1)/2 - lp1 = lp0/10 # Should really choose rate 1/10 to satisfy final condition. - lp2lp3 = miny/lp0 - p0 = (np.log(lp0), np.log(lp1), np.log(np.sqrt(lp2lp3)), np.log(np.sqrt(lp2lp3))) + lp0 = (maxx + 1) / 2 + lp1 = lp0 / 10 # Should really choose rate 1/10 to satisfy final condition. + lp2lp3 = miny / lp0 + p0 = ( + np.log(lp0), + np.log(lp1), + np.log(np.sqrt(lp2lp3)), + np.log(np.sqrt(lp2lp3)), + ) try: p, _ = scipy.optimize.curve_fit( - f=sigmoidal_crossover, xdata=xdata, ydata=ydata, p0=p0) + f=sigmoidal_crossover, xdata=xdata, ydata=ydata, p0=p0 + ) except: - warnings.warn('Should modify to check exception is no solution') + warnings.warn("Should modify to check exception is no solution") p = p0 + def y_func_x(x): return sigmoidal_crossover(x, *p) + else: - raise ValueError('Unknown method') - return y_func_x \ No newline at end of file + raise ValueError("Unknown method") + return y_func_x From 757d87db09f57312c17924e4fdbb1f921eab06f0 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sun, 1 Dec 2024 12:40:32 -0800 Subject: [PATCH 066/170] Used black to reformat code --- MockKibbleZurekSampler.py | 46 +-- app.py | 706 +++++++++++++++++++++++--------------- 2 files changed, 455 insertions(+), 297 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index 05cab89..e4fd1af 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -7,49 +7,51 @@ class MockKibbleZurekSampler(MockDWaveSampler): - """ Perform a quench (fixed beta = 1/temperature) evolution. - + """Perform a quench (fixed beta = 1/temperature) evolution. + The MockSampler is configured to use standard Markov Chain Monte Carlo with Gibbs acceptance criteria from a random initial condition. Defects diffuse (power law 1/2) and eliminate, but are also created by thermal excitations. We will seek to take a limit of high - coupling strength where thermal excitations are removed, leaving only the + coupling strength where thermal excitations are removed, leaving only the diffusion. """ - + def __init__( self, - topology_type='pegasus', + topology_type="pegasus", topology_shape=[16], - kink_density_limit_absJ1=0.04 + kink_density_limit_absJ1=0.04, ): substitute_sampler = SimulatedAnnealingSampler() # At equilibrium = (t^{L-1} + t)/(1 + t^L), t = -tanh(beta J) # At large time (equilibrium) for long chains - # lessthansimilarto t, + # lessthansimilarto t, # At J=-1 we want a kink density to bottom out. Therefore: - beta = np.atanh(1 - 2*kink_density_limit_absJ1) - substitute_kwargs = {'beta_range': [beta, beta], # Quench - 'randomize_order': True, - 'num_reads': 1000, - 'proposal_acceptance_criteria': 'Gibbs'} + beta = np.atanh(1 - 2 * kink_density_limit_absJ1) + substitute_kwargs = { + "beta_range": [beta, beta], # Quench + "randomize_order": True, + "num_reads": 1000, + "proposal_acceptance_criteria": "Gibbs", + } super().__init__( topology_type=topology_type, topology_shape=topology_shape, substitute_sampler=substitute_sampler, substitute_kwargs=substitute_kwargs, ) - self.sampler_type = 'mock' - self.mocked_parameters.add('annealing_time') - self.mocked_parameters.add('num_sweeps') - self.parameters.update({'num_sweeps': []}) - + self.sampler_type = "mock" + self.mocked_parameters.add("annealing_time") + self.mocked_parameters.add("num_sweeps") + self.parameters.update({"num_sweeps": []}) + def sample(self, bqm, **kwargs): # TO DO: corrupt bqm with noise proportional to annealing_time - _bqm = bqm.change_vartype('SPIN', inplace=False) - + _bqm = bqm.change_vartype("SPIN", inplace=False) + # Extract annealing_time from kwargs (if provided) - annealing_time = kwargs.pop('annealing_time', 20) # 20us default. + annealing_time = kwargs.pop("annealing_time", 20) # 20us default. num_sweeps = int(annealing_time * 3000) # 3000 sweeps per microsecond # Extract flux biases from kwargs (if provided) # flux_biases = kwargs.pop('flux_biases', {}) @@ -57,10 +59,10 @@ def sample(self, bqm, **kwargs): # for v in _bqm.variables: # bias = _bqm.get_linear(v) # _bqm.set_linear(v, bias + flux_to_h_factor * flux_biases[v]) - + ss = super().sample(bqm=_bqm, num_sweeps=num_sweeps, **kwargs) - ss.change_vartype(bqm.vartype) # Not required (but safe) this case ... + ss.change_vartype(bqm.vartype) # Not required (but safe) this case ... ss = SampleSet.from_samples_bqm(ss, bqm) diff --git a/app.py b/app.py index 0411d44..30bfb99 100644 --- a/app.py +++ b/app.py @@ -45,191 +45,220 @@ # global variable for a default J value J_baseline = -1.8 -# Initialize: available QPUs, initial progress-bar status +# Initialize: available QPUs, initial progress-bar status try: - client = Client.from_config(client='qpu') - qpus = {qpu.name: qpu for qpu in client.get_solvers(fast_anneal_time_range__covers=[0.005, 0.1])} + client = Client.from_config(client="qpu") + qpus = { + qpu.name: qpu + for qpu in client.get_solvers(fast_anneal_time_range__covers=[0.005, 0.1]) + } if len(qpus) < 1: - raise Exception - init_job_status = 'READY' + raise Exception + init_job_status = "READY" except Exception: qpus = {} client = None - init_job_status = 'NO SOLVER' -if os.getenv('ZNE') == "YES": - qpus['mock_dwave_solver'] = MockKibbleZurekSampler(topology_type='pegasus', topology_shape=[16]) # Change sampler to mock - init_job_status = 'READY' + init_job_status = "NO SOLVER" +if os.getenv("ZNE") == "YES": + qpus["mock_dwave_solver"] = MockKibbleZurekSampler( + topology_type="pegasus", topology_shape=[16] + ) # Change sampler to mock + init_job_status = "READY" if not client: - client = 'dummy' + client = "dummy" # Dashboard-organization section -app.layout = dbc.Container([ - dbc.Row([ # Top: logo - dbc.Col([ - html.Img( - src='assets/dwave_logo.png', - height='25px', - style={'textAlign': 'left', 'margin': '10px 0px 15px 0px'} - ) - ], - width=3, - ) - ]), - dbc.Row([ - dbc.Col( # Left: control panel - [ - control_card( - solvers=qpus, - init_job_status=init_job_status - ), - *dbc_modal('modal_solver'), - *[dbc.Tooltip( - message, target=target, id=f'tooltip_{target}', style = dict()) - for target, message in tool_tips.items()] - ], - width=4, - style={'minWidth': "30rem"}, +app.layout = dbc.Container( + [ + dbc.Row( + [ # Top: logo + dbc.Col( + [ + html.Img( + src="assets/dwave_logo.png", + height="25px", + style={"textAlign": "left", "margin": "10px 0px 15px 0px"}, + ) + ], + width=3, + ) + ] ), - dbc.Col( # Right: display area - graphs_card(), - width=8, - style={'minWidth': "60rem"}, + dbc.Row( + [ + dbc.Col( # Left: control panel + [ + control_card(solvers=qpus, init_job_status=init_job_status), + *dbc_modal("modal_solver"), + *[ + dbc.Tooltip( + message, + target=target, + id=f"tooltip_{target}", + style=dict(), + ) + for target, message in tool_tips.items() + ], + ], + width=4, + style={"minWidth": "30rem"}, + ), + dbc.Col( # Right: display area + graphs_card(), + width=8, + style={"minWidth": "60rem"}, + ), + ] ), - ]), - # store coupling data points - dcc.Store(id='coupling_data', data={}), - # store zero noise extrapolation - dcc.Store(id='zne_estimates', data={}), -], + # store coupling data points + dcc.Store(id="coupling_data", data={}), + # store zero noise extrapolation + dcc.Store(id="zne_estimates", data={}), + ], fluid=True, ) server = app.server -app.config['suppress_callback_exceptions'] = True +app.config["suppress_callback_exceptions"] = True # Callbacks Section + @app.callback( - Output('solver_modal', 'is_open'), - Input('btn_simulate', 'n_clicks'),) + Output("solver_modal", "is_open"), + Input("btn_simulate", "n_clicks"), +) def alert_no_solver(dummy): """Notify if no quantum computer is accessible.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - if trigger_id == 'btn_simulate': + if trigger_id == "btn_simulate": if not client: return True return False + @app.callback( - Output('anneal_duration', 'disabled'), - Output('coupling_strength', 'disabled'), - Output('spins', 'options'), - Output('qpu_selection', 'disabled'), - Input('job_submit_state', 'children'), - State('spins', 'options')) -def disable_buttons(job_submit_state, spins_options): + Output("anneal_duration", "disabled"), + Output("coupling_strength", "disabled"), + Output("spins", "options"), + Output("qpu_selection", "disabled"), + Input("job_submit_state", "children"), + State("spins", "options"), +) +def disable_buttons(job_submit_state, spins_options): """Disable user input during job submissions.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - if trigger_id !='job_submit_state': + if trigger_id != "job_submit_state": return dash.no_update, dash.no_update, dash.no_update, dash.no_update - if job_submit_state in ['EMBEDDING', 'SUBMITTED', 'PENDING', 'IN_PROGRESS']: - - for inx, _ in enumerate(spins_options): - - spins_options[inx]['disabled'] = True - - return True, True, spins_options, True + if job_submit_state in ["EMBEDDING", "SUBMITTED", "PENDING", "IN_PROGRESS"]: - elif job_submit_state in ['COMPLETED', 'CANCELLED', 'FAILED']: + for inx, _ in enumerate(spins_options): + + spins_options[inx]["disabled"] = True + + return True, True, spins_options, True + + elif job_submit_state in ["COMPLETED", "CANCELLED", "FAILED"]: + + for inx, _ in enumerate(spins_options): + spins_options[inx]["disabled"] = False - for inx, _ in enumerate(spins_options): - spins_options[inx]['disabled'] = False - return False, False, spins_options, False else: return dash.no_update, dash.no_update, dash.no_update, dash.no_update + @app.callback( - Output('quench_schedule_filename', 'children'), - Output('quench_schedule_filename', 'style'), - Input('qpu_selection', 'value'),) + Output("quench_schedule_filename", "children"), + Output("quench_schedule_filename", "style"), + Input("qpu_selection", "value"), +) def set_schedule(qpu_name): """Set the schedule for the selected QPU.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] + + schedule_filename = "FALLBACK_SCHEDULE.csv" + schedule_filename_style = {"color": "red", "fontSize": 12} + + if trigger_id == "qpu_selection": - schedule_filename = 'FALLBACK_SCHEDULE.csv' - schedule_filename_style = {'color': 'red', 'fontSize': 12} - - if trigger_id == 'qpu_selection': + for filename in [ + file for file in os.listdir("helpers") if "schedule.csv" in file.lower() + ]: + + if qpu_name.split(".")[0] in filename: # Accepts & reddens older versions - for filename in [file for file in os.listdir('helpers') if - 'schedule.csv' in file.lower()]: - - if qpu_name.split('.')[0] in filename: # Accepts & reddens older versions - schedule_filename = filename if qpu_name in filename: - schedule_filename_style = {'color': 'white', 'fontSize': 12} + schedule_filename_style = {"color": "white", "fontSize": 12} return schedule_filename, schedule_filename_style + @app.callback( - Output('embeddings_cached', 'data'), - Output('embedding_is_cached', 'value'), - Input('qpu_selection', 'value'), - Input('embeddings_found', 'data'), - State('embeddings_cached', 'data'), - State('spins', 'value')) + Output("embeddings_cached", "data"), + Output("embedding_is_cached", "value"), + Input("qpu_selection", "value"), + Input("embeddings_found", "data"), + State("embeddings_cached", "data"), + State("spins", "value"), +) def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): """Cache embeddings for the selected QPU.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] + + if trigger_id == "qpu_selection": - if trigger_id == 'qpu_selection': + if qpu_name == "mock_dwave_solver": - if qpu_name == 'mock_dwave_solver': - embeddings_cached = {} L = spins - edges = [(i, (i + 1)%L) for i in range(L)] - emb = find_subgraph(target=qpus['mock_dwave_solver'].to_networkx_graph(), source=nx.from_edgelist(edges)) + edges = [(i, (i + 1) % L) for i in range(L)] + emb = find_subgraph( + target=qpus["mock_dwave_solver"].to_networkx_graph(), + source=nx.from_edgelist(edges), + ) emb = {u: [v] for u, v in emb.items()} # Wrap target nodes in lists embeddings_cached[spins] = emb # Store embedding in cache return embeddings_cached, [spins] embeddings_cached = {} # Wipe out previous QPU's embeddings - for filename in [file for file in os.listdir('helpers') if - '.json' in file and 'emb_' in file]: - + for filename in [ + file for file in os.listdir("helpers") if ".json" in file and "emb_" in file + ]: + # if qpu_name == 'mock_dwave_solver' and 'Advantage_system6.4' in filename: # with open(f'helpers/{filename}', 'r') as fp: # embeddings_cached = json.load(fp) # print(filename) # embeddings_cached = json_to_dict(embeddings_cached) - - if qpu_name.split('.')[0] in filename: - with open(f'helpers/{filename}', 'r') as fp: + if qpu_name.split(".")[0] in filename: + + with open(f"helpers/{filename}", "r") as fp: embeddings_cached = json.load(fp) embeddings_cached = json_to_dict(embeddings_cached) - + # Validate that loaded embeddings' edges are still available on the selected QPU for length in list(embeddings_cached.keys()): - - source_graph = dimod.to_networkx_graph(create_bqm(num_spins=length)).edges + + source_graph = dimod.to_networkx_graph( + create_bqm(num_spins=length) + ).edges target_graph = qpus[qpu_name].edges emb = embeddings_cached[length] @@ -237,9 +266,11 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): del embeddings_cached[length] - if trigger_id == 'embeddings_found': + if trigger_id == "embeddings_found": - if not isinstance(embeddings_found, str): # embeddings_found != 'needed' or 'not found' + if not isinstance( + embeddings_found, str + ): # embeddings_found != 'needed' or 'not found' embeddings_cached = json_to_dict(embeddings_cached) embeddings_found = json_to_dict(embeddings_found) @@ -250,57 +281,90 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): return dash.no_update, dash.no_update return embeddings_cached, list(embeddings_cached.keys()) - + + @app.callback( - Output('sample_vs_theory', 'figure'), - Output('coupling_data', 'data'), # store data using dcc - Output('zne_estimates', 'data'), # update zne_estimates - Input('btn_reset', 'n_clicks'), - Input('qpu_selection', 'value'), - Input('kz_graph_display', 'value'), - State('coupling_strength', 'value'), # previously input - Input('quench_schedule_filename', 'children'), - Input('job_submit_state', 'children'), - Input('job_id', 'children'), - Input('anneal_duration', 'value'), - State('spins', 'value'), - State('embeddings_cached', 'data'), - State('sample_vs_theory', 'figure'), - State('coupling_data', 'data'), # access previously stored data - State('zne_estimates', 'data'), # Access ZNE estimates - ) -def display_graphics_kink_density(dummy, qpu_name, kz_graph_display, J, schedule_filename, \ - job_submit_state, job_id, ta, \ - spins, embeddings_cached, figure, coupling_data, zne_estimates): + Output("sample_vs_theory", "figure"), + Output("coupling_data", "data"), # store data using dcc + Output("zne_estimates", "data"), # update zne_estimates + Input("btn_reset", "n_clicks"), + Input("qpu_selection", "value"), + Input("kz_graph_display", "value"), + State("coupling_strength", "value"), # previously input + Input("quench_schedule_filename", "children"), + Input("job_submit_state", "children"), + Input("job_id", "children"), + Input("anneal_duration", "value"), + State("spins", "value"), + State("embeddings_cached", "data"), + State("sample_vs_theory", "figure"), + State("coupling_data", "data"), # access previously stored data + State("zne_estimates", "data"), # Access ZNE estimates +) +def display_graphics_kink_density( + dummy, + qpu_name, + kz_graph_display, + J, + schedule_filename, + job_submit_state, + job_id, + ta, + spins, + embeddings_cached, + figure, + coupling_data, + zne_estimates, +): """Generate graphics for kink density based on theory and QPU samples.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] ta_min = 2 ta_max = 350 - if trigger_id == 'btn_reset': + if trigger_id == "btn_reset": coupling_data = {} zne_estimates = {} - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) + fig = plot_kink_densities_bg( + kz_graph_display, + [ta_min, ta_max], + J_baseline, + schedule_filename, + coupling_data, + zne_estimates, + ) return fig, coupling_data, zne_estimates - - if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) + if trigger_id in [ + "kz_graph_display", + "coupling_strength", + "quench_schedule_filename", + ]: + + fig = plot_kink_densities_bg( + kz_graph_display, + [ta_min, ta_max], + J_baseline, + schedule_filename, + coupling_data, + zne_estimates, + ) return fig, coupling_data, zne_estimates - - if trigger_id == 'job_submit_state': - if job_submit_state == 'COMPLETED': + if trigger_id == "job_submit_state": + + if job_submit_state == "COMPLETED": embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) - sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) + sampleset_unembedded = get_samples( + client, job_id, spins, J, embeddings_cached[spins] + ) _, kink_density = kink_stats(sampleset_unembedded, J) - + fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J) # Calculate kappa @@ -310,102 +374,123 @@ def display_graphics_kink_density(dummy, qpu_name, kz_graph_display, J, schedule if ta_str not in coupling_data: coupling_data[ta_str] = [] # Append the new data point - coupling_data[ta_str].append({'kappa': kappa, 'kink_density': kink_density, 'coupling_strength':J}) + coupling_data[ta_str].append( + {"kappa": kappa, "kink_density": kink_density, "coupling_strength": J} + ) - if kz_graph_display == 'coupling': + if kz_graph_display == "coupling": # Check if more than two data points exist for this anneal_time if len(coupling_data[ta_str]) > 2: # Perform a polynomial fit (e.g., linear) data_points = coupling_data[ta_str] - x = np.array([point['kappa'] for point in data_points]) - y = np.array([point['kink_density'] for point in data_points]) - + x = np.array([point["kappa"] for point in data_points]) + y = np.array([point["kink_density"] for point in data_points]) + # Ensure there are enough unique x values for fitting if len(np.unique(x)) > 1: # Fit a 1st degree polynomial (linear fit) - if qpu_name == 'mock_dwave_solver': - warnings.warn('WIP: Execute for mock_sampler only') - y_func_x = fitted_function(x, y, method='mixture_of_exponentials') + if qpu_name == "mock_dwave_solver": + warnings.warn("WIP: Execute for mock_sampler only") + y_func_x = fitted_function( + x, y, method="mixture_of_exponentials" + ) else: - warnings.warn('WIP: Execute for QPU only') + warnings.warn("WIP: Execute for QPU only") # Pure quadratic # y = a + b x^2 - y_func_x = fitted_function(x, y, method='pure_quadratic') + y_func_x = fitted_function(x, y, method="pure_quadratic") zne_estimates[ta_str] = y_func_x(0) # Generate fit curve points x_fit = np.linspace(0, max(x), 100) y_fit = y_func_x(x_fit) - + # Remove existing fitting curve traces to prevent duplication - fig.data = [trace for trace in fig.data if trace.name != 'Fitting Curve'] + fig.data = [ + trace for trace in fig.data if trace.name != "Fitting Curve" + ] # Remove existing ZNE Estimate traces to prevent duplication - fig.data = [trace for trace in fig.data if trace.name != 'ZNE Estimate'] - + fig.data = [ + trace for trace in fig.data if trace.name != "ZNE Estimate" + ] + # Add the new fitting curve fig.add_trace( - go.Scatter( + go.Scatter( x=x_fit, y=y_fit, - mode='lines', - name='Fitting Curve', - line=dict(color='green', dash='dash'), + mode="lines", + name="Fitting Curve", + line=dict(color="green", dash="dash"), showlegend=True, - xaxis='x3', - yaxis='y1', + xaxis="x3", + yaxis="y1", ) ) - + for ta_str, a in zne_estimates.items(): - #print(f'anneal itme: {ta_str}, a: {a}') + # print(f'anneal itme: {ta_str}, a: {a}') fig.add_trace( # Add the ZNE point at kappa=0 go.Scatter( x=[0], y=[a], - mode='markers', - name='ZNE Estimate', - marker=dict(size=12, color='purple', symbol='diamond'), + mode="markers", + name="ZNE Estimate", + marker=dict( + size=12, color="purple", symbol="diamond" + ), showlegend=False, - xaxis='x3', - yaxis='y1', + xaxis="x3", + yaxis="y1", ) ) return fig, coupling_data, zne_estimates - + else: return dash.no_update - + # use global J value - fig = plot_kink_densities_bg(kz_graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) + fig = plot_kink_densities_bg( + kz_graph_display, + [ta_min, ta_max], + J_baseline, + schedule_filename, + coupling_data, + zne_estimates, + ) return fig, coupling_data, zne_estimates + @app.callback( - Output('spin_orientation', 'figure'), - Input('spins', 'value'), - Input('job_submit_state', 'children'), - State('job_id', 'children'), - State('coupling_strength', 'value'), - State('embeddings_cached', 'data'),) + Output("spin_orientation", "figure"), + Input("spins", "value"), + Input("job_submit_state", "children"), + State("job_id", "children"), + State("coupling_strength", "value"), + State("embeddings_cached", "data"), +) def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_cached): """Generate graphics for spin-ring display.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] - - if trigger_id == 'job_submit_state': - - if job_submit_state == 'COMPLETED': + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] + + if trigger_id == "job_submit_state": + + if job_submit_state == "COMPLETED": embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) - sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) + sampleset_unembedded = get_samples( + client, job_id, spins, J, embeddings_cached[spins] + ) kinks_per_sample, kink_density = kink_stats(sampleset_unembedded, J) best_indx = np.abs(kinks_per_sample - kink_density).argmin() best_sample = sampleset_unembedded.record.sample[best_indx] fig = plot_spin_orientation(num_spins=spins, sample=best_sample) return fig - + else: return dash.no_update @@ -413,35 +498,46 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca fig = plot_spin_orientation(num_spins=spins, sample=None) return fig + @app.callback( - Output('job_id', 'children'), - Input('job_submit_time', 'children'), - State('qpu_selection', 'value'), - State('spins', 'value'), - State('coupling_strength', 'value'), - State('anneal_duration', 'value'), - State('embeddings_cached', 'data'),) + Output("job_id", "children"), + Input("job_submit_time", "children"), + State("qpu_selection", "value"), + State("spins", "value"), + State("coupling_strength", "value"), + State("anneal_duration", "value"), + State("embeddings_cached", "data"), +) def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): """Submit job and provide job ID.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - if trigger_id =='job_submit_time': + if trigger_id == "job_submit_time": solver = qpus[qpu_name] bqm = create_bqm(num_spins=spins, coupling_strength=J) - if qpu_name == 'mock_dwave_solver': + if qpu_name == "mock_dwave_solver": embedding = embeddings_cached emb = find_subgraph( - target=qpus['mock_dwave_solver'].to_networkx_graph(), - source=dimod.to_networkx_graph(bqm)) + target=qpus["mock_dwave_solver"].to_networkx_graph(), + source=dimod.to_networkx_graph(bqm), + ) emb = {u: [v] for u, v in emb.items()} - bqm_embedded = embed_bqm(bqm, emb, MockKibbleZurekSampler(topology_type='pegasus', topology_shape=[16]).adjacency) + bqm_embedded = embed_bqm( + bqm, + emb, + MockKibbleZurekSampler( + topology_type="pegasus", topology_shape=[16] + ).adjacency, + ) # Calculate annealing_time in microseconds as per your setup annealing_time = ta_ns / 1000 # ta_ns is in nanoseconds - sampleset = qpus['mock_dwave_solver'].sample(bqm_embedded, annealing_time=annealing_time) + sampleset = qpus["mock_dwave_solver"].sample( + bqm_embedded, annealing_time=annealing_time + ) return json.dumps(sampleset.to_serializable()) else: @@ -449,146 +545,206 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): embeddings_cached = json_to_dict(embeddings_cached) embedding = embeddings_cached[spins] - bqm_embedded = embed_bqm(bqm, embedding, DWaveSampler(solver=solver.name).adjacency) + bqm_embedded = embed_bqm( + bqm, embedding, DWaveSampler(solver=solver.name).adjacency + ) computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, - annealing_time=lmbda(J)*ta_ns, # Changed to lambda calculations - auto_scale=False, - answer_mode='raw', # Easier than accounting for num_occurrences - num_reads=100, - label=f'Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}',) + annealing_time=lmbda(J) * ta_ns, # Changed to lambda calculations + auto_scale=False, + answer_mode="raw", # Easier than accounting for num_occurrences + num_reads=100, + label=f"Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}", + ) return computation.wait_id() return dash.no_update + @app.callback( - Output('btn_simulate', 'disabled'), - Output('wd_job', 'disabled'), - Output('wd_job', 'interval'), - Output('wd_job', 'n_intervals'), - Output('job_submit_state', 'children'), - Output('job_submit_time', 'children'), - Output('embeddings_found', 'data'), - Input('btn_simulate', 'n_clicks'), - Input('wd_job', 'n_intervals'), - State('job_id', 'children'), - State('job_submit_state', 'children'), - State('job_submit_time', 'children'), - State('embedding_is_cached', 'value'), - State('spins', 'value'), - State('qpu_selection', 'value'), - State('embeddings_found', 'data'),) -def simulate(dummy1, dummy2, job_id, job_submit_state, job_submit_time, \ - cached_embedding_lengths, spins, qpu_name, embeddings_found): + Output("btn_simulate", "disabled"), + Output("wd_job", "disabled"), + Output("wd_job", "interval"), + Output("wd_job", "n_intervals"), + Output("job_submit_state", "children"), + Output("job_submit_time", "children"), + Output("embeddings_found", "data"), + Input("btn_simulate", "n_clicks"), + Input("wd_job", "n_intervals"), + State("job_id", "children"), + State("job_submit_state", "children"), + State("job_submit_time", "children"), + State("embedding_is_cached", "value"), + State("spins", "value"), + State("qpu_selection", "value"), + State("embeddings_found", "data"), +) +def simulate( + dummy1, + dummy2, + job_id, + job_submit_state, + job_submit_time, + cached_embedding_lengths, + spins, + qpu_name, + embeddings_found, +): """Manage simulation: embedding, job submission.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] - - if not any(trigger_id == input for input in ['btn_simulate', 'wd_job']): - return dash.no_update, dash.no_update, dash.no_update, \ - dash.no_update, dash.no_update, dash.no_update, dash.no_update + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] + + if not any(trigger_id == input for input in ["btn_simulate", "wd_job"]): + return ( + dash.no_update, + dash.no_update, + dash.no_update, + dash.no_update, + dash.no_update, + dash.no_update, + dash.no_update, + ) - if trigger_id == 'btn_simulate': + if trigger_id == "btn_simulate": - if spins in cached_embedding_lengths or qpu_name == 'mock_dwave_solver': + if spins in cached_embedding_lengths or qpu_name == "mock_dwave_solver": - submit_time = datetime.datetime.now().strftime('%c') - if qpu_name == 'mock_dwave_solver': # Hack to fix switch from SA to QPU - submit_time = 'SA' - job_submit_state = 'SUBMITTED' + submit_time = datetime.datetime.now().strftime("%c") + if qpu_name == "mock_dwave_solver": # Hack to fix switch from SA to QPU + submit_time = "SA" + job_submit_state = "SUBMITTED" embedding = dash.no_update else: submit_time = dash.no_update - job_submit_state = 'EMBEDDING' - embedding = 'needed' + job_submit_state = "EMBEDDING" + embedding = "needed" disable_btn = True disable_watchdog = False - return disable_btn, disable_watchdog, 0.5*1000, 0, job_submit_state, submit_time, embedding - - if job_submit_state == 'EMBEDDING': + return ( + disable_btn, + disable_watchdog, + 0.5 * 1000, + 0, + job_submit_state, + submit_time, + embedding, + ) + + if job_submit_state == "EMBEDDING": submit_time = dash.no_update embedding = dash.no_update - if embeddings_found == 'needed': + if embeddings_found == "needed": try: embedding = find_one_to_one_embedding(spins, qpus[qpu_name].edges) if embedding: - job_submit_state = 'EMBEDDING' # Stay another WD to allow caching the embedding + job_submit_state = ( + "EMBEDDING" # Stay another WD to allow caching the embedding + ) embedding = {spins: embedding} else: - job_submit_state = 'FAILED' - embedding = 'not found' + job_submit_state = "FAILED" + embedding = "not found" except Exception: - job_submit_state = 'FAILED' - embedding = 'not found' + job_submit_state = "FAILED" + embedding = "not found" - else: # Found embedding last WD, so is cached, so now can submit job - - submit_time = datetime.datetime.now().strftime('%c') - job_submit_state = 'SUBMITTED' + else: # Found embedding last WD, so is cached, so now can submit job - return True, False, 0.2*1000, 0, job_submit_state, submit_time, embedding + submit_time = datetime.datetime.now().strftime("%c") + job_submit_state = "SUBMITTED" - if any(job_submit_state == status for status in - ['SUBMITTED', 'PENDING', 'IN_PROGRESS']): + return True, False, 0.2 * 1000, 0, job_submit_state, submit_time, embedding + + if any( + job_submit_state == status for status in ["SUBMITTED", "PENDING", "IN_PROGRESS"] + ): job_submit_state = get_job_status(client, job_id, job_submit_time) if not job_submit_state: - job_submit_state = 'SUBMITTED' - wd_time = 0.2*1000 + job_submit_state = "SUBMITTED" + wd_time = 0.2 * 1000 else: - wd_time = 1*1000 + wd_time = 1 * 1000 return True, False, wd_time, 0, job_submit_state, dash.no_update, dash.no_update - if any(job_submit_state == status for status in ['COMPLETED', 'CANCELLED', 'FAILED']): + if any( + job_submit_state == status for status in ["COMPLETED", "CANCELLED", "FAILED"] + ): disable_btn = False disable_watchdog = True - return disable_btn, disable_watchdog, 0.1*1000, 0, dash.no_update, dash.no_update, dash.no_update + return ( + disable_btn, + disable_watchdog, + 0.1 * 1000, + 0, + dash.no_update, + dash.no_update, + dash.no_update, + ) + + else: # Exception state: should only ever happen in testing + return False, True, 0, 0, "ERROR", dash.no_update, dash.no_update - else: # Exception state: should only ever happen in testing - return False, True, 0, 0, 'ERROR', dash.no_update, dash.no_update @app.callback( - Output('bar_job_status', 'value'), - Output('bar_job_status', 'color'), - Input('job_submit_state', 'children'),) + Output("bar_job_status", "value"), + Output("bar_job_status", "color"), + Input("job_submit_state", "children"), +) def set_progress_bar(job_submit_state): """Update progress bar for job submissions.""" - trigger_id = dash.callback_context.triggered[0]['prop_id'].split('.')[0] + trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] + + if trigger_id == "job_submit_state": + + return ( + job_bar_display[job_submit_state][0], + job_bar_display[job_submit_state][1], + ) + + return job_bar_display["READY"][0], job_bar_display["READY"][1] - if trigger_id == 'job_submit_state': - - return job_bar_display[job_submit_state][0], job_bar_display[job_submit_state][1] - - return job_bar_display['READY'][0], job_bar_display['READY'][1] @app.callback( - *[Output(f'tooltip_{target}', component_property='style') for target in tool_tips.keys()], - Input('tooltips_show', 'value'),) + *[ + Output(f"tooltip_{target}", component_property="style") + for target in tool_tips.keys() + ], + Input("tooltips_show", "value"), +) def activate_tooltips(tooltips_show): """Activate or hide tooltips.""" trigger = dash.callback_context.triggered - trigger_id = trigger[0]['prop_id'].split('.')[0] - - if trigger_id == 'tooltips_show': - if tooltips_show == 'off': - return dict(display='none'), dict(display='none'), dict(display='none'), \ -dict(display='none'), dict(display='none'), dict(display='none'), \ -dict(display='none'), dict(display='none'), dict(display='none'), + trigger_id = trigger[0]["prop_id"].split(".")[0] + + if trigger_id == "tooltips_show": + if tooltips_show == "off": + return ( + dict(display="none"), + dict(display="none"), + dict(display="none"), + dict(display="none"), + dict(display="none"), + dict(display="none"), + dict(display="none"), + dict(display="none"), + dict(display="none"), + ) return dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict() From 28508e3a47cc373ca7c9c498e87d69ceb2885b9b Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Sun, 1 Dec 2024 12:43:24 -0800 Subject: [PATCH 067/170] Used black to auto format files --- app.py | 19 +++++++++++-------- helpers/qa.py | 25 +++++++++++++++++-------- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/app.py b/app.py index 75bf0e8..a9c709b 100644 --- a/app.py +++ b/app.py @@ -389,12 +389,14 @@ def display_graphics_kink_density( # Ensure there are enough unique x values for fitting if len(np.unique(x)) > 1: # Fit a 1st degree polynomial (linear fit) - if qpu_name == 'mock_dwave_solver': + if qpu_name == "mock_dwave_solver": # Fancy non-linear function - y_func_x = fitted_function(x, y, method='mixture_of_exponentials') + y_func_x = fitted_function( + x, y, method="mixture_of_exponentials" + ) else: # Pure quadratic (see paper) # y = a + b x^2 - y_func_x = fitted_function(x, y, method='pure_quadratic') + y_func_x = fitted_function(x, y, method="pure_quadratic") zne_estimates[ta_str] = y_func_x(0) # Generate fit curve points @@ -548,11 +550,12 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, - annealing_time=calc_lambda(J, J_baseline)*(ta_ns/1000), - auto_scale=False, - answer_mode='raw', # Easier than accounting for num_occurrences - num_reads=100, - label=f'Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}',) + annealing_time=calc_lambda(J, J_baseline) * (ta_ns / 1000), + auto_scale=False, + answer_mode="raw", # Easier than accounting for num_occurrences + num_reads=100, + label=f"Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}", + ) return computation.wait_id() diff --git a/helpers/qa.py b/helpers/qa.py index b43b25a..0ee52c6 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json +import json import numpy as np from numpy.polynomial.polynomial import Polynomial import scipy @@ -22,15 +22,22 @@ from dwave.embedding import unembed_sampleset import minorminer -__all__ = ['calc_lambda', 'calc_kappa', 'create_bqm', 'find_one_to_one_embedding', 'get_job_status', 'get_samples', - 'json_to_dict', 'fitted_function'] +__all__ = [ + "calc_lambda", + "calc_kappa", + "create_bqm", + "find_one_to_one_embedding", + "get_job_status", + "get_samples", + "json_to_dict", + "fitted_function", +] def calc_kappa(coupling_strength, J_baseline=-1.8): - """Downgraded energy scale, see paper. + """Downgraded energy scale, see paper.""" + return abs(J_baseline / coupling_strength) - """ - return abs(J_baseline/coupling_strength) def calc_lambda(coupling_strength, J_baseline=-1.8): """Time rescaling factor (relative to J_baseline) @@ -178,13 +185,15 @@ def fitted_function(xdata, ydata, method=("polynomial", 1)): def y_func_x(x): return np.polynomial.polynomial.polyval(x, coeffs) - elif method == 'pure_quadratic': + + elif method == "pure_quadratic": # y = a + b x**2 coeffs = Polynomial.fit(xdata**2, ydata, deg=1).convert().coef def y_func_x(x): return np.polynomial.polynomial.polyval(x**2, coeffs) - elif method == 'mixture_of_exponentials': + + elif method == "mixture_of_exponentials": # The no thermal noise case has two sources. # Kink-probability(T=0, t) ~ A t^{-1/2} ~ (1 - tanh(beta_eff))/2 # Kink-probability(T, Inf) ~ (1 - tanh(beta J))/2 From 57ea824d7593a28386d4acba892aa12c7ef38f47 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 15:22:39 -0800 Subject: [PATCH 068/170] Added plot from ZNE paper --- assets/ZNE_fig2.png | Bin 0 -> 132353 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 assets/ZNE_fig2.png diff --git a/assets/ZNE_fig2.png b/assets/ZNE_fig2.png new file mode 100644 index 0000000000000000000000000000000000000000..a35303107682bdfd3ba73b17c4f5240519d9d019 GIT binary patch literal 132353 zcmdqJWn7fo`#p?EN{58x(4ErVog&@c9Yd#-Al+R8k`mI5(x9X?NOyPmzehdie9!s4 ze%?HO?$L4Pjvd#&Vy(3&LRnD?<=Km8P*6}PGScFzP*6`@p`f6X5#WGNp4iNX0)OjT ziHRx8h>4LZJ3E+L*_uH?(ZspM3dlVbK@VM?knYvg6qm+e$wE-+#{XEuT@Wh#^-S&F zvfB)cSyV82C<0+;zudMCeLv`@@aVWKIs=S|$o-ScK%1B4wGrv!PKK#rDION)@Y@70 zd6;jZ5(nSeQEZXx%=2OVB!EUR!KM?XDsxttnK140I6yf(qZ*l@ z;XHHPj+T5Th2i-Fd(bddG*MF`ijC>${?3o6!W_!Q1V)2M;hhV!rE5t+G|7`d*cHwm zh57rY4?k``v`koMJY`~Wq!Ks5I#z-ju>dXUfznfie^#B-tl`u3@7?$f{CLnlK{6nP~k zBLn>V%EZ~s%-+S)!8OOr<`rb^V>2eOog?HDD1I<6@Yc@E z)rb^qXKU}m3l<>zbp|i+9`Z3W8R@T6Tx|r%G!>Ld#T=Z?NI98Um{`aJpOKQ1@;jTF z^QwwV{<$3Zp8%PqtE(d~GqZ<>2a^XolY_GbGb;}d4>JoJGaDNta0a7`r@gBYn9<&a z{P#`%x{tV-i;1(9qpOvJJt^eAM#c_qt^#CakO%$!=XX2Jz*he}$=>Bpw}1{ZL%v~V zWny9e``*B%{E$z1m94;Lwp!v=cEIoe&k*F~WaIyJ{{Q*rpC|s$m74!t$<6)pf3N(X zZ~lAbYZo(TF$X)~k*yQ|M$y37xFVhdj3Bn@q3tmeF}`T;4^;azvoQwnYiFQ zFy+EfGU6g?;3xZ8@IKhDaX&kVVq!{fhb*<6qvFKotHNMZ(?1J;DIvc99f{L6TvG*I z0{c9H42M@b4mIyeahp;Y;lO+AO~bIgxttfLH&e&RaJ%Q3`KA!4#p2NS3QkX*>Ln)W zlm8qYN|9N^Je=TlxPr zef^ryBN<#N)z#He0wo)k*seHuvrhG@MY7soS8*BH*x2~=$*Lm$HA!sIQ0)%5G|HqC z@5Tyg(n7Q0|2y-6lNhhl47;(Y51?f0Zd(*C@^;NVDztMbqWNwd0!1?^0=7J)|jbQ>hd%_-tHACX7L_+ z99M6Q7s=IFek0SWHVh=>b(Ma>WzPT^-5^*bukUgcAdpBhlOCO1GD92+0jG2zG;E>Y z!>#j#0+)=MT4JAXQ{0~}*6;!yDEA8k`$2|jr5FZy2J_B%(c4Tx-?^P<*`AyD_`I$q zFtD&Z_1<1jRi-O+xbTJiT3@rY}vS+cbEwpuS?sA1&OU+ z%ouPvG*SFnsT~xZv2q=)cKlY5Zuif@Y>s456Y;vDgQxuNPo#L<_fr{k8|v?|X_ho>AP%E;|Nd<4MutOs9I=2Gx1=b7c@o|0^&g+#*0^jbM!b9HZtKzj zEfR`809dY!ynNL9KtkWQi=q_$as5wWBz~Ihx4ZOGeCp{ht;pWuGAEA--pL;~#7h1# z&mECm=Z!($Op*y`ww~?0A>>_tDu_jynn1&&5hpgeY{`m?hb;JBEh=hg=Zhl>crXeI z>QgCY@7IkiW*}lwZ7b-txVvGxSo&<|g`Iyg@PByCq}!M~b7Jy%f1^G8x=dZc>+0~p za_iY2Lp9|B%zgIY-KiI3>5?WOl0JDR%2?HM$UBfkuMuBswb)#5AAB{8E5?R}7oXaw zeRH}2TU8(xGnmY%Lzn>!(TAC%wkv%^R8*YdqToLCyfkjE7k}Lx7g|0tUhj~iDi~r= zH3(1Gb1E#q<$tmW5vQuI&}o>n_pm!R<3H~L11+0sG07$>SI!e1FMUOw_R=ao6(7#> zZo436z5VWR!ngIoks1nvGqRTnJ?fA1{|PdDtPBe=MWam=sO-|W$`sE8{olaEia4e|60+ejL^rrT?+y? z$S5dN^9n)_UwY6c#umZv7tec?Xy%8JHkm*i@hw{Jk3|Ls57asqDdvh>IpXtvpYdPG|MibS)K0Rts(J?-4h z=M;om1gAOA#b%%GhZWLQnsa626TIoGFh;#g=cKR~>2hjm^4!E*U0r4KI=4KVw(`8c zb`oG7TXZiT^%47JYO!Vr>-}+J?GKka4QRx*!x6V&GUWR!4D6OTpku=Bqyh4=G`#}G zy$E_F_7}TTinE}JA1?rhQ5wc-6jNzJEa;=jm4_V2oE(25o5EBP#_6)F8)GuZWxuf9 z9C7r=cXn1Gd==Z$Xn0F42YD(jwd3W!meq85+NG?PR3PpL^O1DbDt!UHYXZ4eDR0?w z^$Kk-F4K>_y)HAcIM{e$QyO@<_`_N^z9#fQfWcHgT-y1Tn!D4#`5mA;bt z5=}szN@8U3e(_af(W0L%Ip9$OqsRip6{TC(N0{`~{VjsG3r@b!@;PiRj*WVx%=}tw zIM4{pBH>0wvPtW0S4#;q`rZm-pkOYfBDnVJA4oeB#W63fC#6ZlRjp~NBTaIN7h65g zGDEu-KGD8W@)@@t z7s)6r6)kifMB;ayrw9%BpCq*`yc*eVuk&rt-PO^|9=d)E|7o}%8mq-cN#4_5(`ID@Gd1q+e9;Z0S>l7qWF^k%n*9qZqD z?mUOuGv`A_);30d>c5Plh| z3-?2Bivx(AFcg8n_*`yMK$d8Xtu1m5A2*doqmhFGBaH4Qe|V?A+#XhR438~@wkKxeY-A* zXD+^D=vsQWXUEsO(6HAYUyC~`o0I-PNx{u3RUfOzxjCh5f=R>-NI`YE)~&Fet>N{-3aT{f35{pBQ1m?iCS`Vez5{(? zX@OpvK&>>5Xc#+ydA6C(X-DVK@$f147~JH%vF~m|@GBJY2JXVzSgPQD5RWP=9#tHE z-LJ#?d5aK}knF)mfGO!r!!4XwLn*5)H8_AFr$IC_W@qSK*eG#gy_E5!O|(yCw=A=+ zGinZ(2_9?1MK6w)lVw^ax5t9*vsAO#^ceKi_5oX;wYg3E3`lO2A^G^ci0cAM)% zbk>Ag`!c&}GMs0Rhgw%Y7NblkS)q?p`tYlC*stafYYa5oG*=vPH|Ys#-2|SFXT<>> zq&k_@`mC#_N$i<__zD`Wn5)MXYM8z+)A|*`)VyH!CvxaQ;Xr6ej-|9XZbu;Cx)aKI z21m?h8t!x41%K;yP(Ne76mi@StPib7s8&@*oln@{!_}$oVB+7=QTP{7l7}Y458dI_V@Kpk))GBRy!{z6Tl1uq-E61t;a!! zkvC@2doe8X{MRC>lC~w5>HRjFyDU2DLg6Qs=!QHF6iRAV_*|dm}D`^TB6OGKMLMm(z*>@isEr8#c;7m z3_B>qw5A@*lGpvfkapW<9UQ=Z;IyUGqyQa^HH+Jz5oRFhs-Se^b5R_`GPDlfJpXLk zLjq(X$L~+$AwjV7o$%S_$VNeoVEl7>_5Rb1Ay_Rk|DO*Y52F6V-aWeYw-v-YK4q>X`?d8nG0p+@t+9SugxWJ|tQ74+BR#}A`aRA3 zn3fxdcTcYy2lE++*z9CDX}jhQeAVY6a7g!g|L;G<%$=|KGH^;!%c(^DMO=KA^+X-`*t^ ztZbqZw$iGpzpvd>YM5pAC7k|5@Xk#(f<63oB5U^gW*k*e~TT{a2%OedZ{Yqs?=@zjF&?UiU@ zFt~9PFQPo|i*gl8Hs7Q}0c0eh5Wi#f6Q$2rl+>WrkAo?CDm`o-ctB(hSfAVdR7fxGY4_HpSpQX>&a{frgNjTbFUiY-nb@@ zyi!jhORL1edd(Btpap_o88XK+mblzJ4I0+2cq&Py)^cj zQc>IkiI!F|bcg25dAcY}M zO5urRIPj;3jv%f|`Ufy!3kb+6p~rfy{{HAz&bw-#-=(s6gKignwBgb+DwSnOd@R$! z7}aEkQP%*c^wv_&7HUgN!V8bTK=L4T!^y0M#^wEpJso?mtIz_m(Zqju%8bmaE zq_oo}A*oHwqJhRS8>pE+cF-_qvm$gxeKO=2t8f&rQ>;Z}o||6NNy0r!yo?p;#Cg3` zy8w##F%r%_96dw_r?MqCPMFCouGd94z}ywPU^8`wz9&!w+rS4*O-KoW=9SDqN^QWU zcKW5Tmg-m7!K_WU_+P#fSnt;yPsn#;NuRpx9tYi!Z4STDks#U>>0+^9YBEL@McE2A z`Ppa`h2wVoUe};CE%Oy90Norx?w;8_j;_;J&4N2>^tJfK!;(yAR*UPXOjO#Lt6sK- z)!7WO{=roY?x_>o zio-^PGy5qdm0%6HDR0SVMY)ZQRa>9^0)Z|tIUWgAW&UL=q)6xi-(`SWU@K_`Ne-{NH^p%0b+&EafL)NrrdBNK+FOK( zuV;hG;yP7)$xY{%lyu+vK}k_h=UdcUUom}f96`+R#{?g}z_y}TtlHUdCpOU5*U@xT zd_I?A;7uwpl~x&@{1UHe0;4@h$N~Vu~`bcC9 zSTk5DTg>`e`c^_{BB5@Js3PXKl#%SV!OC*`F0ZSNgoerGqCZn+#$QOyyf+WWK6uL_ z0siFtfyYFXOowbY5nqgkx;0Qjt;DQR7AmO8OuJl9E6h^NJH)zAb7?<${D9Evtj`|r zV0)=r+9XCsi;!r-5;j7Hxb&OS^Ao|+8Zn$>?hhDl*sjE^P9}7S0KkYz>OuXNTa=cC zGB^aOk;b2&VkYC(Yfyevs3_ws=hL6Ql2uD{2dADlFQI;X<6A9n+jkn|6@z_>TSvn_ zkD21tf~mHh(C8=zoRw<>lca>{NVLFt$Fk)OQ&)KfpMc5cI zW~*{vww9 zFSr8xsTjpUxj?yx|vJ9tg zPa|LY72C+T`6m%mJv}LZZD$0Son8>SOdj1^(q5$LkK#thp~C+jE757H{PT9TB{R-4tEUln(bm5M%f`QVplooVnX>eVa*Zlgi~o8Lr?m%iR` z9LKn{=hS{=jCb>kaGpe}>NyYx^HGbNL2>EIoOK#sxJj1klFYD~i^h1Tm!K|n;69|l zZ&}jP$NdGL2`5uktNS|X`#D-A3-$>M@Pzcefq7#c`20gR+==LQuKt*lnU!5j4a$$9 z^RVz|uYG&YKhcvN3^wmrh-ct9e(1=eMjDVK{p6^aU86M}V_$;hw*MLp@lmA+nYipB zWbHCq>t}@&YuLN#{gi@fztT}k=@d#wu)ANLDM_VEFM93u4KzTUoC!3@WrLvz)GyeYGUu!H(#0k2~Jlv|tTC)g-aB+DN)wUe4>OASg(RAn_inH3DhiFOC` z-lsF0Q*XxR!cI4AnpjcoMx~1Vz+<;SU9D}@$L5rpmWkvwIK;a%m4ZN?CzYq^ztOfD z{y?v9C~6;9f6#@Gj$-OuC~eRNu2M(-q^c|{vi7LkackB1SuGR9C^zBfR!Wg{d7Vqb z&zj0tq+J(=ke%Cm@ZEi5<2z0{%=1eHUD4FMs)8IGOK{cPH>n7Qw$@W(3t>6pKwxzY zoJH$TXA3!ai%z!9tyfU^>Ll7S)+b3JU5vNli!Uk^&6l)iC-@o?DjB2BmpyN!QV`F| zO6@s%&YuZX)`}|d-|GUXghg<_Hss(c{YLS$V9?1lglbVo*KVIb0b}@bc=-BaxNPFJ zhx42h)voG%4ApuumWdB9r=b$&`ZJ4u$>`vR(_&UCJDC=K-W1 z_UsN5$A3NM|Hic6 zj8f+q;G1u5P}X<;Lkqz$0c)tU?W)!JpI-lZ#{YMn2xL$&97<-aao&(EcF7d-Z=VD( z)x*X$RD3@7@`+F9elF+RDq0_n-!K@o>;701cO7R+}HA#E{uey7H(g8S^JvS0} zNIL`9fgzG(V!ix(gdF7z0d(!O^C?`>{xsjv+L(Bs17@u$??zlgFE0_WR%W?o2Hq(>E>n3a8xC*CL zXBo$Ty%NmkeQA4;XERA(u!4 z`u6P`@txQFH9#-yF*Pi@XV_^V@O|H%Dzj@SerU4rtv-)~O?X>02>`&u-pdD3fsBz7p{0wl_Uxj0wsH=$pWO zB~stVTQh(AR?)w$9VswH-%I9`-TZqU_rnER=8pRslf4nH<$cX7Gcy1Vs_52Y* zz2I)p3jQeEX(;#tGe!+S)$Nv}794-{B1S|+!~u*Qk^7Fe%1!Qp8 zBxhU9bol#cxl$Um`#4`88{GhOw%ONQ5jNZJEXBTF7rRAmzIl0hs_*6T2h!M!htt@l zAZ%U4+qVhD*j1N5)Q#yu`{^WmKwKbbhIj!MRhkhZfE= zjMmG>&#xP1{>f%WQSf$~d}p?{Sl4BQ!z<ykzr7icWAWzIvOyL zwhBPnptG#6Px#wUnWHy4?gSAEc`&e}X62h~BA~|I^PCl18tf@bZ=s3x8(1R;q2f~l zz_8c6k-qlBhs?hC_dW$1!mg633cdHV6Fz}YbSX&;F{H%u1BhpQ#;DzRasu$2|VEIoE7heyC6 zBftPd7U-Db8U5Qq;>#K;E!cZn>=5944Nnp>@M>Uq)(_~E;)@==ePM^rW;T?paH5$i z6uM4+wd~))q_o0|{_bSS`$(ojyB0Z-ZQ2SlQpWr2<7zwskew!oEKLqxQXS6@v}OJ|7%E#5iKU#`DQ zca&RGkR5HGZbwK6!5w0X8w7kw)mEZWWi}70_&mU{!-~@S`)yULOvZ=>C9P{NxBDhU zC1gG*m=6Ga?c8B#&>9Q5q#obwNQ=us@D;*T?*dPvXeTD@wEy)g{6o+cs@?IzH^8iG zgn&4-?;qp&=Ff!5=enxl-&D^QBWmWAT5^@YlOx$)S<2rsm>WmeLwQ-e+tWbEM5{VM) zy~%Tvci5w;hZ3>oPnL+pcAA3)6;!|Au;#8WUPlqv3qvJ3N4j$Wc)zd$GW=ok82)Z; z58A*%U+GRXNF-fVSfhnR(f}cJ;cob015q+~pV_`vI*KM?FK-V-a(=xT007^}@>u$I zR)$Kw9-;wR*Xk;jmjs z4#dX40$8nwSZ$)ErwIH$RzHDp{aom9_2$c968#14%J=V@fl}{P z3M3%`>ER7N$Za~^!hJ1^&qEQy_}R=xV*tKH_yG!8lzoM#dcT*Zh@xD7~ zAW~?ILFaF5__CtV!+p%Fg%TkI(Of7AJ=C>dP1~Wnvb6Nly#Ao}(S4-);>lAuOOY^m z1g=Hb1QUJ2N{{Xk#M?l)`1Zmt%6+Im!{nW@*{8DwykSc$V7S##WOE}CWE~=TiNOT z$#b96bz%jqjX;qA;^xlAu~u0~ANyILsU|Vu?{~|p&T%ze9ACM59WFF!r>6Ddn71rq z8l&W+pB2PbcjLv&{^E#+*xHo|M2tRVQ1S~OBcl*WJX^jPNMG^)hi{E|kt{$*&_D<{YMkf&=X3sjDt-F{vZPbs&H z98Zf+LWaw1fQ&}HfGBH|8AO~vya z1~SL_X>#_`$%9S)*{pK>v3-jJEPVV@u@lk-^zj{QAfu@yEnC4g@Bx(%ZOBruEfo|( zS(kvY(L-+xj@3X4R@%m=;W=}0=q1Tk-I!{R-6ZM59)r05EW4GXoaIKS5rHafTOZQat49wYMvF)r@b z#5OHH+F1j-b%du}PbkXS4!3(23p3(cBRd$uY6;QM4{uP>P|ze#^o=VDC-!%99Lb~o z@4xgctw4r+CK*2eyZDKhAbt5#?J5kfhJC{RcJ|`qr!+rvp*_y>zP`RastGiQSTh$O z#b&c#;BLqkZA(ZYk*-_i^%&R>ii&OzjZXK4_Cy3ZC`>*vlKM?3N(%rHWRiUy;dcPR zfu;0<~qU9zc zv5rgeNeDB)TgVm+m5^@==SG|WN$G1?10$c)@0N)LTJedknrH%2hkZ_JEB3Xt2VK5Y zcXs04kSrb-NsDjd(m59#uG`9OeAY90536A$?nLkYN1PIJJvDo3`uhnr{7*~^LNrD7 zK4MWTQk?WY0Ex!!3*}%y`IKih`L1V(qeuWVCs!PLHyy+%h@4&fj)-p#wauspZnn-U zL9J3(t7CsRaQ3kOZvZ4822gP2bA=X5KO$gSEQ zKK2n$9Q_t20V8ss1XQgZOFY@XG>E1~&@?M3-$H}F0_SGuCNG2_Vif`*EohwzNc);c zg{|5`08g(*+DN(!5#4a?%r{gK@`IJ%c9y;>-nEUuBe=^=o~<1dv{`(!_j?X0&{<&c zobg#2(s5-W{lS-l(q{wz0AzN>NYVx2FOg*=C8O{_54RU`(*wvPg1Uf6XT1ka$n|tz zWFLf$1%j-ZUj;LQIK-2HXFNwBcaD7mvpZA8+|S(v)M1cHb5uFv#_@f1;~s76^^doZ z`id6N>g`cwDVzqQ3C04Nz8ztMxIYg$ehNIq z&oKcU33fM;0dc! zb@I4s-2ifKYj9y7tOKpZzU?J4TkWg8g_Aq>yGL)FKaf+FW-atBvjWN z6HJ`^R|VS19+=~IE5l-N`b~<&%{7zFEYkIGVOGTfy@*wLnyd}w0;^alhl$U9KX#$X zW$$=fv6`7J>aNna&mGqUZ%m!7+$`HO{*#nhelj3u~4E?unuS#KiVsvxh2jD z>wN}_rG6ltWM(%Xjs-IQAvxwY3LyGpm>SmCO1#C9Y4f^p9Y9IkzuvDMThI1Bj03F3 z3awyK@VDS95gjPF5jhTji_3t98Oj-sX7V&c@%JiPBQI40TDqo#PxG0ffhkD!C_pi5 zw}?8Xf>aD~(nPV+P@~h@xlx>_8z44REUxywbBD+f35EQ0 zfxyVuhPF$SpG_8`4+uT+*>=UF9Dqqu6$7duj`K!l$nZmsfM8)!(V%j@7Iiv}N`|<& zxObO{9%i;-C1J19$&b%=9*la;b-4^*fR5LLYYr0OqNO{b z>4EA#{@d_9&qSZ=HBkAV?*gr{NIz!-G>gtNdLBC$fTnZGYMy|~EFRKS{RNw_1p(ZRb-u(~Yi1mNEpQTz3YnHg z9r(nU=u-p?*&#q@vW%hrV5Xw_qSv6-Q1FfnIj^vi(`8eB(X|JKi<|Rcxx?U?POT&s zNBbR$hB`#W*S5*qhpx{T5(8A%WkII&$Gf1HSlNQZ6F|zEHKCk3A9mo-c99(N=8YA` zOo0^IfrNN?$>I$!N+%aZ>}N#bVHB`c)Va5PBB7Ae+}i9HVqQ=T|3y8r-&$V)w%f4t z&dE$L!QZH!4~WHmU-Lj_uG%OVnSfIsq8K)CuRREMluO+MhS?J+lTIgJDWx?8Rm*|v zbnO5f36fFIn8zGZ1HxK0A7Yma(oaAZLEYkUluV_NZW;GxPlDUL!G6(oQn3*rBHrBf!hRtUg&b|T;X{Dg4;R1h)}>pWNOVvOo3F8cZh0pv+J063l?K59S_c! znUhofY7GtZSOdCPv2Vy|!)+edTmDwyXNAwDy2n5Ymq+|iVCvr4KFzGlN{Lq^2ARcZ z_QVhcQLWQcSUsG^CWx3BaOm2{b*Lmbkkp<3@p?^bWf=VIw*}II9QTLu(;BQyWZg#+ zeVyqYl*_DV3U9w(^9O@kOaLID63}U@wt2bD8O+S$GJHJdB~@S(QJV-*ti2p zuIS2(1EAn2#!4eqJDs)2aodL-TeGQZqY?{6cB(Uig+>nLadB4%u%SgC$%l6~i32;}kUFAf*DmN%99MK|dX36SQRvlB+Ipbcd)Dd$(K8;P3;n#W6+U(YbaX=Z zA-OIocpGn-+pCGld7}GmXWMPp&=RIbA;mLJ<@`*fwQao`gC@84hfoSjj z1Au)7rIe+`%G>a5e(pNf>(T_`T|J(7I2FJ1vH5$>+7kQ>Im~`Km`I5&7y8}(Ufn<+ z83C}?hW7Xwenna*{GdzCLR4u2`7uL%MH6v!$s0cVLvw?(g!GCrVF z2eykFQW0eZ2|H77s}3ClD)VM{!fqOjXA!29(vPQu^i};p_D}?vSkR9x1Xd378gH_u zh^#aiIvS>T6ttD0H(`}H7#MjOD^4%fTiors#0={|dC%gD4J(b0mfAjpWfcsVWPo|V zQ_*OZ_G8U%M$x(A>1K$wU{d}}cxcz@xqbW+*+2Cp3@@9Nf698P)iZwJOJ&D(YU2IF z)iM&Bn^6sWH|W?XnA+{H=w6K({wyYTv$VX&oQSz?2C1yNeKNiDbEubHHZmx}`*Qyq zD1rv_(E8ZN=n;Q+r0GRuGWp3MAjzjOXL=z@#!7+$3J3Bz=Q&v7I#?te?s`veASmHU znJa*oNpXf!n5*7oxfz7BB9#7oIP%w?6!>|6=2eU6CjMTDkI!{G)}9leP45m5E!(DL z(OZ5@m`Wj0SU3m=Ob@`iastJuGA75JsglsMfdLq9sr2wFSDgWIw>31Im`C=)h`ix! z_jzu<)^KrK=>mhdLovlx5VYo^LKjO! z9J$$QIy$i0b!-1*OfQ z&!~J9{2k=L^j%2IJ~=l)T6@mB`9tNaXF=b=qc~LlVX5H3MubKmucG&Mt%nS$_a<_9 z0H!NK79gzpX61+B-45)fA`&z*Y=pq9TK=7eqNmh3KKVl7%FD3$t%n^y{T})V@JpgV zdet&!wsX2EB_k%d)JJnMf9K952!@9Q!;Ab(2+a`HssE)>JASKmcq{}{tuIEpBkQ5q z{LvJu(_JR%;#jL`Uq~DMZZ>cqa8}c;?6^7GQ#y6REy=J2`mO*Sr71jgDm&Ny zTcJ!Qv;G#LmJz?5N!E-G#1bA?ny(SX{=mk`uvTEpil6|I*sdYJLQjaPPxwkH88Rq^ z#Q*-|;|gT+%0v}nHZNKE#Kc~21eW66@vj69$RMuSpClkaGIL%`IB3*~7g0v`-x(VK zcrPz+&ijq05r_38dLk(coCA@dg-Y&%bw5)?Aq7Nx4MFdlWsrDZ7WVP;f>(|^adKu$ zP%Ic>S!|{m1aeUOfF!2nqa&Uyop4>gB%vr9NR@Tw0Ki@+C=neO8wxD^xbyLi0;B|ZSIcU%1Bbf*1 zg`nZ$wYpxT*SyFk%|}_{<`)pRHo~hhT$T7;nvn(ygT6L67#zd}S#}|X);&V7JADYa zbm2|0A7ClQYKuUz_PVDo98kgUZR4IDWAlw>a4C78#9ECrg`juCCKCstu8F<`k-;y+ zlP052BYuqJaXt1LGGC-4sEJY_?bsNpmE+o)+r%ra6i!2vz@F*TmD0? z36r7=uMYy?NxdfPsvj8dnONZv1hNFO@zUIEg3SVQ)@Pgb8BM0Cl~7&5juK^gB;7bF z!t89fNt%7f!$ zVL4Y(zwvJZ8i#+QK&mk3J159$nusq9Hxs2GQ-y^rpX692p0^P4W0DQC^RIW@EcUsvV%7@Cd99Vq|{VJi0t3SXB-bMG@c?&9>$N~*df_W84e0~P>jgQ(` z-2xEcqqO1768Y&vuP2+?p=?ubcM_~IlkSw8T3Ol5QM?NxE`^0=en?DwBoO=2EroyG^nL(_W_n%+0Qy~9n*gMG(RvP~Gd94! zK~jE%g@lARqTVSx1rD)g4G-!im10Z0{hW!BO>?fE%>l`O)ySi-FKA@pQDFNqDw zD9~NT%5!vf;x1Sps-mPIB?e<4W!a|zJj!*Ps_$ePrkN)FY@d;2S~%U-W4`0f77d1@ z5dI8~^2yD>j*J>Nt~|Obpj4-X`=I@)v322&lNLYA08wx&6Zc(L&B-&^tdtBJ%&b)H zmz9eO2lx|XZr+k@4fLInCI#2l<^-EBa>bugwXF3JtfrJ8g*bj?(rZS@{yP4<9I0KENlkQcps(k)4AB^z5v?AXYk2Zi;-WMbaGVQ z8z=z}IBOL!I>S85)GJ)QtcMF&&M;$Wxo#l*=hl?bD2%G+#NW+|Y})sB@HBf=Fg(gD zIPIx>bL!e~sn2Y>b9+!@ea5si`>|=Jy^@IKr`>MUbGFMFf3vH0a>Wb`z1b2)SkL2k z=Hu0hKdyG)(uvv^3@$Z0Uop|Z=XykJ_H2^7jD!~6=j_EC(*ysG*%<}T(vcl^jfyJLPg7djDmvuma9xx z0yS%48~jtYPw~k?8arvw$g`A782P~sz&|L=#D2U4_J?LN;Y2nWPN(xs3+j4M1vG`y zJYlu`N?HnF&nDzG-ucsGz*a^gJKcYDwtztzK*wRMsy6s(`NpRCS0q^o-V9s@pt5=# z#)oVnB@pa+dYJDEII1oE^!eZlwoLVTd_bmV4U-Tw0( zYQ??tsI6A3T}DD`j|}7Y=GmQv?6l)r!gqWdlQ`iH(4%5uO*#k{!H^wb%s}(;mwbiea-i-&{WAQa*& zWg&R=dh6FDQifcA;35bz6<9}lJdl9a4|SuDGrB;bJvpGHn9WPgnj$3s#rL{|4kdQ^ zAkT#JP{k+E}@bIw?UAD@X~%klO5ilLP;b}?XS84f^-Of!E~hF^?B|h?aF5H z&Ff~h!QX7ESx-h$kegqV2^S^)dV8pbrwx6N{8#qphSpP^T}`)PX=j1G4qV5z5o76;nOX3xAjOsu&+FFF1m^Jwls}f^}Qj8 za!FPX!>Opy^s%fyN5x`PCB2jKiR@|#O~oi@bix)ut(T@ zPZ~v7IbEu&G+15u37C%hgmRynfKhC5}^nE<}j*z{^47DG?^zHZ|GtfR$%l<7j^wR)I+|A z4tISjjra&DYZsh{7@YysS%@g(O#We2 zD#z(y;ZK>ubkvuZt4pfl3h=T>d(($r0y(2m&-KWvdV3G!a*~;of2unqWwF9^@wEQZJO2|zLYzF;i5zPiKEm;c$*MkZ%=NJEDm8R6t=#-3Q zuM=o@?@(*Fo3Y#^hxOE4X69zfv&@a{M?XZLo|g&nf)?8@t6R^%!8G~E(l&ByJTGnz zjfBA+0zJ3JPU3woYx6{-Ok9SsKNC*&OGfxtmYd^4@Ado7Z{w*KTq=yidBK~cn?BE@ z2DpJH;aP9olo=h8bu;WumXc#p$vc`-FSdC%{8H`In8`gj zqfDL8>}`1dJ(lREgz%|YJa@U`v?qJ&(99#T0w`eDsgekc7u(xv)Z3En z7HB^r#Y9rje8VZ-Y=5n)YV=d0$!8^Acd3fsSw*9V+Nrbues%o1Inw!V1f?+fS%Elo zLfy;oW&}zc-;csbwRLQHxwYf{RC}q^jQ}oh>&%hQZUE6#1R1X*^tAfi)F|c#QE;GF zq5Bq1l3@HvW-h4%;FDZ7hri{Z06(W-++Du!KlJqUnLtEyVq}xt?%l*?Pgo*&KIM9@ zL*r8~@gy+|%FVu6Q>)_0o8VM2vIJcbXU{25jH0!m(+?L|AM`x8gYtulM2eip3vAe} zlv{j+mvYM8zhS#-9lR^-y2))P7DodB9O8tk_FFm>$&m$6_as&hzw7|_EdZ$oSBD!$86Vq*W|3{xi*qLCj za`(_Zpnf_s1qi8lxe-wF<(v&Hq4jjR)l8P%Y$~w5z(%D$U0y>*8QpwmfIaLeumu2> zF95!{`QdKTImed2+F+E5CeYkMowYcGP<(M?B4U~H8<8=SR3M@ANy+JSRx^dln;zQD zeQtBrjP#GvL-sJFe$Jc2r3SN%gcxRIaHxbOVvGvLiDVLy(r=zUubH=7zxV|?8(?+@ zi%+^ru$4`IH%h`Y?hVI~32i+2LSVK#SsLUy&HV-V34qi>C2cO@+$FEQic?^7`yilD zs)VIz4ZfR7F_VA7?Ri?*CnnmDg3l%nfO%*SqKrCxJx?_W;W_M2nnG{Oe<{IirsV;) z-l73%zJ>?bdN5bE+z^6 z7Wby-E}lVZm~j^x{4VG&TS>9C+wH2Kkp>gm<_DsfvlHGkVv%GQTwnJaSeUxpKDv0G zf4v!N`;n#(3c$n!Hqv7Fq$&b>J%|u(R-y6K95USj9hjN`a}!e53p1sr8a9a+Z`eh@ zscnw{x}+Kgeof(WE+Hv>4LpAplx`A&JS6?q?dQYQ(EsD<9mDHr+qU7xwr#tOZQDlU zq_NpHjcwa%Y}+;)+jjCzuKRtyANiNHt(i5)d1%J~L_UH{3GbLdDh6o^6MJ)DPdKmi zAK4Gs^|fTIb^wUV(RynmA>1sS)m(8-kpHjKwdUG=zz#}-X>q-JIg<}saBiMPPrR@~ z|E!6ackbQ&S)fM9V|~*@WZK4no|Im-6qA8+5uJ&#dArwSJ(RD;Vi>bId<29fB$EqZ zIltQ|SLUwG=Y;64X2+7%EyuGi)tPqkF1RVzX@<`T)kLut9+`D}5mW&<10?Am>-DrEg9$kqGW&x12gO=s1cff>TdX_@p`$`PZ zM~g$RcwMw>3`7#p0UVx%t)$2dU3Fy07GQxRv>S@j&ZviPw*|6Z(8dk}1EVZk5X%H$ z3Oh2bd1xAIs7vLjA%e0l#d?D_M72i0p9N6x%fXeOcE!;26p>gPO1SLMk{A*`{%YC= z&W={YVR5!Mdc~>meW}d%#VW<1W1OFJE3}^TiJZugcu#HDrhL86`mv1S<-)<4l8_k| zK16f2x{$zQYeR1;jo#9-UIVEVQV){0tg)>b+ z@&e88&qt~xz10(-*P1{4^11Ymy6J&qBKP7A=A2XXIW8%39xzh?lTBlnvlE{SDHEC# znj)D)2b3&1O!t2@&~IKo#orQ%CxG$C&?mffkbM-j_sOKeb~>1-XO{tB9p^{twrJK# z@W}MYIA4MAn7HEy^q&2D&%5>WFDwtY{#Vj;5W~gieDz&5C*Is!>C%d*W#8m&Ri8lE z`w64}2C4R-VwfF;1qAFyhfX8g^{f8npxB(N@!qn4X;yB;+P3Lo`WPmvRUbL}Ce`k- zCnG_JvM9MyUryId+GbvwpM0}dVWLX}+Y)X(PG*V>;_bj3h}8T^4y=+TSIzk!Hkps~ z`;)qKU7S0Bj6K8;&y-OO06=MKS&0B{mIVA!!8F#LhjUp1+tfB^%ph(cjD}8su=)Sl1~>?})B0Ti^HZBDFNMpg@Y5G)rol#M9q|eds0mKD z(M^y&$PW;?*qJQ={qrzd@#ImrkxG%-Eq`q}JI0&MEwYWzru5u|3ZUrst~K7*Tge0* zTC6{GN$p9gZ6h#2bI?EF^OO<>E)h-Ev+=TuCP8t!K8Nor`kle3)nwN`e_*1f_m)`q zJi%Qbtg(G{I7ZcKwk?`vZ-W^|0gP@`<0?k*7PAxIE*xSrmZB*DmYC`AYEPUr0*Hw~ z?;1BW7dC(Nf222Xv>W8hfIt6B)_rw1i5c>R9M(IIe8h4oWr^pyI5p< z441TcO}hNAag6GnOl7VYj_zyf7t0Lp9<_(OO0?v)#iCO3PfoAW&E?W)b**I;ZJr9| zt9nY_ubMy7$Fk~#^#5@RA@bV`V>10CmC>+U%$Z!sxI_lkZ(=;dmAWaMuXCGd$vAlj zc)Amv7etf)z(D+q4G}?R1k#u>{0}xou$`b=T+dQ>!KQlc*F-oTMCd6QS;}qjiGQ^Lh>_p31GD=r@$RM?P*)!C-}wFtmUqiSmslHw zhhV{PWmTvb$#`qoxicL%ccm^&??wsrCw{#_Aq%x$z{KKD|z=fi)r_ z)HIA7huf^zR*PLO0ZJ)y-jGe=VwUMceum0U)xXY8O@q79`jCBC3OKr(Zs4)+YL~4t zn=o;|yg=PcL!$AZxuJQ+ihhygLqZIrxPIrCT#xEu_+Kgh2fcYg&nqc8nT0FTL@`#c z3$kJl?>?;90I5%d-DZj$>zWhXB0}q{DFeRdvZyB|E{q3G!0R~hNwwBHeN8g(o!%a< z^U}tR!UNCE$U!8OCg0RP>#Mor9e)>p#32cGC65%U!EtA2F-LR)HA+5!>JH_u)H?fi zm5Vwn!1SbWZZS1wt=>OtWxF%SR(AW!?qgK)=pZci@Iz(TwSnd;P&Q7}md{->(%{PR zncJD4VkX)W6-I^@BP3CJTC50~kXPqGT zT(<`T=WSFM=WZ5XEe_EkMS3lHtwuaOSU_t@TZev0j8! z^#GdoGoP?z3^oLeMmdh~^v}smKFuXc*=f93fU<36kF?n=a*ZC$J;oy;jKO>Qx*f(8 z%cA}4=5rE7^Hoc1o_?cVCD8T!Sba`t3<3M$Pq|Wgw9oYx9_m4p2agDJ-I;XNR{h!3 z7Tu_bW1pO6Y3tRdORN1=X>p4l-_qgysW^e(ku8zJ8L|se`}423Vz0uFzct?Dh$-A6 zrlq`TtSY9NL(rD1H4UkFi;A@IA@{7jji%oKcG2=_;I-o#T9=9(ubh|#?Ed1GNc{iV zkT}hpAG^EAt9jy+l|@+khw~s2h$6*?2`Zw~s!E1c0X;FSFnt8F2sSY|#>{IgF0TPK z*sp7l1!KbwLU4i}#&J$FgWGSUBXu4++I)t{>YN7g#=oFl2#AE?GHZbn&Mi!Gh2sR+ zI<58!b_rFH+@cucP2$EDcAW$teF12e^2*y=7oBJ#N6K*)19$$?@zgnFn#ldv&nJ~9 zRiiA%14HInK8fT38GtGbJEvx}#zIqC!{IlNU)?6&a-z8QvepXJ8T z#MV-8oXa15m<*|OES4wU@Lz>KPb4`kN1H_9&@T4zaKccMfq8kkBBwSjwcV-|hV5(j zMhHox8U0*@2#8(lX;NWSBZcTXp29jwdlnu=>jOcIc~GQ6jZCyviC%lSy#37a^YI8? z5_U&gJ{evO8_+)f@bU6+a0O?sL}b$Vw#=;bI6uZ`f7bi(e|I3wMuYm9bmvL$VokO+uU9uwEaMKzJ%?` zvtUCf2+|6`>?_V7789rv3IRoj4$7{<9pM+8%E?>dAMxx(^jS24HL&dJrol33kUqciOr!?`~4 zc{sTg8H8k_bD3-Qaeiq3dRatdBbK2yhN1ZVyxFz!xY_0#N4ZMRS@5Z9u1%44X7e_| zog?R<*`8oRuE&k^lkjwDrjF5@-!Hl{LUDhCl$+p*A+sl7{S!hXclkXi@_M|jSziX1 z6VasBQHqaTv&AY+_T9XBOuI@R`TLPoPU*Ss;(dggrYRVW4y}(Ba|>bIMeV2DtlY~^ z+1t#0I|OEr`KFJx{}04e8K2_I0FNlj1Q8kj;@$x8 z^%BVOr^&$`lCUZ2PaS>)H-A#%fqBr?fOzmr~XJMYMy zrXlf=Y8HtqjYi84@nDno81n5kZf&(GBy)$|bsCh*n^u0_ z7;fff74jo}E^P0J+diW|-oCf3!7(7-Ud3;`{GbJO;l%c*wI%3W%AaI7^_^z3&c62! z_v8E0(JtI+G6}FQ!Vaq{)pVn)=ejuXfGw_Mds;5-VGkM(u*!&DO{C5lrqz zelt4t@ka;zCJd#p{`c?SZ-X*hbF2SJxSTtDpAd#wWtWgIPp23h<%r&nx{HnGjhV=J zj0CSC0dY5FcEk8cs%YNF)j|4QnS^iw(hE?cbaxNj` z1uFTq-mn|=31G6`!AS^0N|PNT(}tL+CeMaHStNr61w~S#QCO9Cqb;<1xfN}o1V;d3 zu_G>hR|@lT!j`&D+UaeCEEv5O=NJ?+@xvrQWpWCD{ba!Amqm_7Ul{un7RI6j6cZ=9 z?D53Eie*dsi=Q0r6w*i+sf7ZRA%Q0*_Z7WIaEl?M*{J>XcH{fgW`!5|j12YOH=ET_ zu06{)5f6#=PkRTi!`t2YUFBo^{`Q3CsVr!;04|B#Xws6)fm8K*eM1Dc zu7kh7*HHp>XapR&BS1C)S#?0s9B#|37%D}#H_4LRmbt#}@&k>;{*#8ZtERB@H;_F) zJSe)(cZ|dmLV1L&kEy-~aWRnTxlldj?(uh92jD4r_FF`f0yR<4nasrga{>*GrdgZIth_*xfFzSzM11yPjMzhM*^yoNya|D?@?tBP%?p+-Q zO@ik?mLwU=f`}wl!QP2p8ztw(RvG$l_Tn_OH|TlTe@waDDHhcOyB}>6WBA}^l1{m~ zVLn1TK&!o&Uov}j`foG2Qtn+ZtcCgRWAwPh3B&14yUrOGG(_UQIEcY(?(R%|(I^qw z(cwxe&X@D-bYD(YvnN(-uA<}sgG_zeKLK0 ziJCVu>Dh_G*znO!@S`!s2;DhFhz~4E zN+^kU2^ypai&a*4FsPKpTMJl1ImMP+o3$<$Ja_Rt9T~DQk^Gy!>#26AJxzb zMe>yG$%W%?a0mc@&FBI)rq0_?Nx@!aucLj;k4zJUL4Y(eZSskcZL6y?AvwlOh)ebxLv6YBJQJ<8-VAm~mOy)~Kd z`f$fv{ziRR&dOW)a6B(Vg~3w4LR^C)91I`!$*VCaCceIUz(PSAMv=;lI{nazsrh*8 ze4isV-342Z4d!+VKvqv4y(>cfBS)ti9d*$>`6JfYictTf+SnxQT2o}s=e_HXr*CrJ zAMNVU<_hUGzWKt8(jWV2%ONej(&Z`4qiJP}(Vin=%@3jOe}a2G9S`c4GZ0W~co%9C z5aaxR(b-c74_DY}H^_x)ktK^f3cnz7kIl?3Av<%$C%08QV>}t}OFua+ScUL;lMXX( z>WKs>xmxIH*jlNpG@Nl$gX!b$AUj>~{)~F=j1__D0++a#32h=H5b_H zvVV{vB>jFj%T~5shh|UJvAli+nD?@EqX;u?VfZ!S0VpalikNhS--MgvkYY%)OEMDV zBsH&V(}#O*gcqzxJUFUrm$7K8^k#Hu)lpgB5@W&=SSHnfU<20{Me7ZtrE*8lywF@K zl~NKeOOWvDOBB)KzxwJh%n@-6?ljE6AF*QByD`cTu+o-yUdqz#xp~(^ZXXj0j^X>X z@hd7)f9g21;v*e)HPc`D+#+OEgl*pveHwa2u|~+wwC7@U5~Ar;zOs;e-#zO!e4l_9 zR5F*${{e(JAnl-w`}8En=k1{X&OZbYqSGnQ$W#jfPVO+F`%B%2$=f1YzGuX&a&hp@ zIo1v;x$hzl7n-VSD7>#Q=}5fud6ygcrUW=LY6#Pr*gOT|LI*rtqP*F}oNc+( zwh^9=$j8X`$=@VyWIwHt#Kuyl&qUXOT|T~1qHAWeu{rUQ{LnkvAew>3!UGLTP(v+@ zR4>H%RU1RhP`+eln=-1O9EF9DCb>VJJ~hHGWO?I-ABog*%-8QwE~w}xo&H5kbRpcq zf0yAF=@iyzwb~rZq`x@V^Jg>@qA4$TsOed1wzfTY+TR(SN3I9$<_H;(UyY^sjL)de zMG*W`AVvg)HYOZGZg)RJAFR!hP@$N(A1=}M#=bz!)MSB*pnRUO|1y{}pRr9Ef= z#^lEm)Q}Z07&evdfq)jY!>stKwtgw@w#?VqG4`=bRigUtA5ZUP%5>+=1pSsD^Q-f7G1NX7zCaQpqxM*pkt{OMx-+8Vt2nO2>wcWySK6~uZADNSTt%mV z#(>o+$%o!e*9Eq8t_ssdMhg=&bVn=toSSM@rD&cDk4mn2rCv0#?!Xtqb-JTH z)1`Oi;I>m#$9FEMSfiiAG70ic@I4i8LSsjdPiRNY=nN zx7IA!(HQtYQHD3F|C9{vQqU7qS(VM}@!)i29{)CZ9alHmfI~aIU(-JR|k-GQG z>)u?1FVTYHs9n5l*!vR8DKu=qPQ5!cwIeyaAMLeToJ%%8z6n#dnF9{OtZZx7Sze zX{@r_BO%_??~g?qNsmo~rhWMd2Kw@TbF3n@*_0~si_0~c&fil8u%u4JvujhDgsaB$ z?C}>+-R~5q6)lhU7Za0=JGe*ylQ|j9bsLl_IqX%=!3}NxK1mG2fKEc<(Yw3$&$Px| zP+)lRZ_=*KK3_oHy2G+KD2~*Z4c!G>W^P@qOo!S3S2s3$dQf zZJ4Xnl1D6+P4vkmpztz|lIpJ!BM+aY{?S~`atCM=l_#Ugz?pvoHWk(^P~ge8QSock zijnb06w>iiCIC)Ikrnl-U$CQ34z-vb*Nqh@<(#5B)1fDGv}jNeX{#u<67Hd|?!zLs z|H`Fv%-%umQOU{fC+sBYu61XWdXe7uaa)r9`ocuR?jG$qDK!c5YMr#lMBpT6bbef$ z;!1YfriVPx+lzK;o&Ss!#{^fb+Pk_5C*~r7n*BmCYqn5S_M!jFQGg?+136{gRl*q-#LY^b;%8#YITao}KaHe&o8--ae zslV6hNsc>?{>>J3*aq5L{i{BRmd(P-GeCAehc}9OBl^MM=4`QzYy*Wz=!WW))jqFx zeb9~?x?@Y+5GM%x>E4Bm??wiKEU0~ZcxaKr>5z*Lcy5absSEhKki;28ZtBwVY^0SA zoeK>U;WA{v&c`=}iJ3likX`Quhm>|*gFkKdCaHc8*2#b0$=I_$@vM3LOH`ov$X}v< zP~-WWHN-`3AU{k{5L+r3#hfPz~g9@9QxZjD_zSF+Q% znWbF(%)!fagUj)$Jm$>}vZF;0{n@jo7=hKUP#fiZR&_g^N%}M95&wyX?JCPfJ@7W?xdcpI|&VRi#S3lgF_48 zw8fwm3cOeTpMC``O)SE7m$+~1?rPcI1v@L1<2qV=FbJ`)-6S)lKT(zmvozEP&yx6& zI;jCQUIGZ1r)18tX~CJBg3OrP&(Sf1kJyzvCQ0Y3J`z7iaG8nN7}VI0w)8YUdDrnY z8nH}oX~#svaY`tY3KJO6`BDnXYrv_bnvZB!|5JN&RuM788s<^Dbg^QcAyR@I*$lwD z3=30}^Y3pD29_Hg?b3L@N&w)IFOsYGa_M7;D(II!3v;l$tHoxZHJ-qW@{`@6M06$O z+cDqDEuB)E5kU{aOMU^?c!IAyMxqAwSLJb>uo;>53jftthO|4~uh7klb|>@1S7 zr`Y9Hn7YHhvr%7TDp7uhlf%sURrTLoc_5~vM;klOza%D_M+OLmD%Z~WD|0?InM;FVew`liBUn|!PnlgoCw76kTC%@|er~wP)?;jV zXZl*SK9pZUMl+)xU-MxPpE&z)Doij+oY8l>+#j{ z2~xAv`p`pRDLei*DwA{irrkaY;jS?C;*tH?Q&hfR&R(|QCi6lUu&LJopLm~9_N%xlh0OfHpi^gb_zj1qIZ0T$B( zfh`fR&LO&KU=1oM@fU%<$nZEIorw@XIkA2)NcagZlQ3J&Q71?~1JI!|i*@?-pHFk7 z2ojONYyduA0)$;4MzPh#gQb15NCL0SfRyGS^9OFTOEpFMJY!1oy2BvBu{LD0?cL$D z$lVPfTKZ)0)TNiDb<%{v@2CQbg%;{ivKyI-pJLSpup zz06v64V2pTMtF3$zKkXFyI>fpQ>Krn_AzJT=_*K63)8pro8YPfqLIHz(-UuPpn=srbIZ-C){v|8aAfM8h7n8}4&=o0AgTP972) zY?*Vv(P_G8MO}P3Rv$sd5I3MXz72kCYNY zthj}2INz!=ZC-g_z%^m0cI=5?v(SeDB8Oso>F{$)6!#Vy4rb8znUvsaY0+ZE0hj;f z+LZN&CKdzHaRm47O!XF+W38^|7lIZpn#_s2rGMfCvJsN36d~#bo!)TfxJt2m=0V+Q z&vB&C@l>DyuVnx*2de@!rKWtwC)i=oR}Lm7+%D=|0bW=8~?;_D+y%Ggcoj334 zt!4lZr#)dUK+PtGjzqxi4|tOp0uq4C#?7aLEDcgx%1}DbtpKb!^=uFV`*b?A?$Hv? z;4fSj(;NZ?Z4|G!kQhG)SS*C76F^%Nj<~21j z3`XsASFBI{HOf)ri-NI8Y?-b#+bl@4yKCB3u4+pf(v^bMIi`h8*?H=C>BpzfzYI9@ ziZmLdLqeB1P98&`NE&R1%IoIZ&h%{Js5yFT^%z7c*K9;8qh5CVF6|kET*YHL$bL5r zdqCpAVxMp7=*cBXmMZw=F4A^1;JgaJw0Me@SIa+ zYz}GL{>-(;Qw1g7c@Gb26}0nQ8Mw8%R{!{r#$~ZI6iWG*R~cXRW?aUhil8V6L41nr z4~qR=g4l$RJS?c`Z^17iEcVh-F=P_7cZ;8f1I$P$K*UL4@?O!)J~mZ4C0xsMUA~njEO~Cb|EBr6 zK8^#Qk-&ScOyXoi{(zTb7w_2uaWR25G^ZbIV zHom~jkGN^Pa$vkDZX$QqYDH=5?ae6l9{MFQ1o{9AiB~nZgt_F!cGH-`N=FNK-N_oO zoLsoUa~7mc^gLWFhFjt5AZERwG+dv@_JS?H;BMr1!Z7MnNH4M0z@`Q-ncL%qZ9fcZ zIJj)@_kmt_^EG@o+4aWZ!2^>DKqkc zrWa;A`eM`l?|ONrD~;nsgML@p4v|J$T(L1Dim zF&i!3eL+w$CT5}8ZSI)AOyj0UiJudZU=fN`k|BixK7t4G4*xw?12;e{h;!?qc?Lax zfb(O1u)jq&GW7{6F=qbSk#4NrhvqyvBy_k7uOC8XJn<_U1l)@fesxNeN0wDBOXt4N zw~U8_RCY=S@eFrz)w_o&M#lv2`}*O~bUh!jSKi~qZr3G6+Ctr3(+Z^w8zrN6VD<`;BGmDB-i`1y)DoGS}`UGA2N==w9o76loBJpZ{$w|2>5R z^%JBaR$PW52vepTqjiKk;NXlH)BPq;ypGJ-Zm*Zu0e2c zMz=li0*MtucD*li2CJhgv$Yq^I0Ew9f7iK<$CX_i;UsM6GTUk57tA7u08=jy*+sxI zlfvFfx*K}iq115;T8USi0aLkdC(u43X9B0v7y0zkkM( zbq9u2S|>A(GY)!L3+^9zDA44iX$)*&X#!axOY>>d6*7!gb2OYLkra5YH4Dq$Jt+L! zVgNmQPmp>^HWC~}XcH8RG~nvU@;qi8F?;^({SEFv1wdsKphN0wBw^-Upm z->VN%{e@gJ6CVfZk9gM9Ne9NSly;eHSxKbOy6~V(9XuGLi)Uhghmd6-?G>v(@1Gt@R)zbDE}vM(`~1v6Tg7n>X$^1?tkQHkL|o>B&5_ zXU3+63}$9MW|hFe#VUKr!3EOLtn?@i!_(m2%^etLt5X! z4L)Scs4^O3m3D|0?wf!yh_hBD5^%J(lBGdJogjY7h`DUu=Ju~eCH_#3CxwOv-3t}O z3%vpAmZ}-JKH&-!l|0q<<&`_{+clC{ERY-)OXR?KzimFF3S5UAl9%UjHnFAM8?T-l zSD9nIWsHOoFi_{XO;(I!#8}S|*Yp|dO7?CN3pf`(^348LIgpjxOquzGWteOsjRMXH z0lMwzL8JIr&?hvY;#EH!i61*kLEx!D6b+h6BjMJWuBPVn95{stwGxtW;8Ue7=EGwo zN2aL$l2zs;#gLaz{#a8_q3lD2oPK`|#`~`X7PC!*bXhI572k*##jRX^{H;wvzP@#0 zPGBynJy@z?0`Bh#TDLrav6E^U4IIcwvgOjk&YFq`OT(h9rC3V2QWSCZ#~lz5|o~dJ$&RtKJ@(YRIJtdadAc{l(1HRSuB8cn)F6iw^ zYi)O4BeNfY<6be3)n-L>xQ5A~0~G2$s}CUNCd}KatPq}#^7>f|ujCa@m#@?enjnk_ zX~+?YdYpMzTf5*_Sbo4mzHZ#PzxNN?zcA%h?X(7Hve?*-RmVybX`97Md4-;OW~d_5}SA^Yzb4j_J)`u}RuRWHz} z4K_zf8YLB^DN;$#vc1Rzla_CGIWc2S2~CvY7ZZj7ECRZYW?3xeirIj-;faA?1gE-y zsmSRPh(2IC_R3PA{reX#o6mWAiQu~V<}jK4fS^HQxdBuD^Fo6YRX&fefD@W-S7--Q z&LJ>+B^u?@M%jC2#nDeL5Cfc+9yo|vMh2<0Y}K4Ku6%sQ5)_m(7m>CH-VMbfMJ%u5 zH12oHJP9><<~t4>Og&7si_|gc9GRVXnfVpWDKEEoK~wd;Pd|yOUiRdWd=3fO(;a){ z(k|%{iNaJomf4X*R-K5oV`SX&gn82EUPdD2?9~n^52Z`v>0d5V%;m|PT&&ep+&oJS zFy$>3i>3wYk zyfzU&>*qb6*4h1*yE=UVr$xZN?|i~Ffg850)^IE2(@x#^@8UZ6aBNNxgjLzzMN|mBqA*b@7iR}Tn1pwet6Z6uPrXeTH*LX}xTZn`YjkKePOOw2{w2SMejc!XVjOF3|wrPNHo)54JKyuu9g)Q50Rq}1gYhuhNXx3{X zXW(M-ATS{4kwm!3o)1K8opo?ZI+(6^6VXOl4{=r|X%%Rvk6q4e;ty;ux-HNf#ysw|lIJ3_>d?tD+xlxmfL28f7lg!F?RhUDs7xA~6MuO((8E7i zPorg-%^kmZJ)MmzN+*V@qd@WyXfmOOMh&|l3e7Kd>pHDr8LzB_g2&$rtG?(<#5 zTgl3h)!O5oM432OY&}_JZHJ32ofs%qSo`0r@)n*O$W^;AOi!T~5~hi%^icD7Xk-Y;$Kvfz z(Uo*D}kMC1t{c$cjdM|%?-SOcw7T;Py(`u9{L>tnQy_XQ| z{!HUOi`DV}$E;8`wen(vm|kNVh0gwa*qG%vqUWUu+Dv&K+xhYZF5~^}!xzKV3ge~R zhCEIWZmHJ#p_S}K8jg{LUBqVRd0SHNVNR5c;ByUDI`${`7=mdj`GCvbqCa7+x|_Ul zEp~(OwD$6dKxG2~o?=pO8U3Ut1@h&?!(g`1pDLFV^|3S#Bur_puBQ`s>-9p$@ABq` z-GzS-G3IbVxHY2FGvgWXQkM|EMaRajq9mOm2xM7FI*A&8zqNHn4DQm46c6SvlsK7a zE}kVuqW$jwZ137j&Ng-!!O7Sc~GH>^1Udg`+{aBzGY?=%2g#la7?4 z!s3)QL4WzmbDyqAgTR^-zdHy2W5Wr*jDxUiVO`>57IZKpMNWLp!^vECxdaA~DV(V9 zd1qg!o%6c%QxGvR<8XZ*`x|jV=yeYfN)Dho3KIn03^-k9sWO(qgT>?8ARga4O(zKZ>lS|7Pn zT9P}msZ&-S)l~we1`eU>LJLR8?zmnXM9A*Hp#@fk5pBHQ+WiYmdF4#g$pMEEvc(Ql zK2%B^MU$D!9vTEQ`lT-o$Uyf6{(DA55L)lZh=vaa)HLvm&IzICA&K+h6bL}B97y?A z0l$5OLjxT(MN*>2Tv(|Wrr-US#vmgsquN3`e;QAS`AmFpYAfQuTF87)@vj` zq@o7Db?aSCzRI#%ZHb1t%0KS<2tR^jZ!XFr=?U|qZafpU@#uyfE5Q3>?X3V!=KmTK zS7`gOwJQ}E2*~tS6Sz>7&N*DqST5I)wzuc|;Lp{yqP)sID()KSU2ZK5QTeJ3BREOyo~Q$y{a+DhD@%DrV(n7=Z67@CTBr zGN^J3FfxMuPGq35tuPXK1r{or0P#E3uXjt(+xu&?nl_7o0~&4$se_BX6L!1a$C?&v zL3PR+GXe;wxbn=^vOcp1)Q;$aWuLAJ5#Oy5C$kfVJb_d08Afxl7RSY~h1de})`4;_ zZwo3_P`SkAFDevwm&I#$XB?IPO2{Z8h}@Teg4gBoSmQb0M<223<0N?GR)uronfvj{ zpF4-c;8h!ee5*nw2<#YPSy(>*IEvBaS?toMTKQKROM|}LEH|rY$(j!3aiFh89Y9dJ zCXtpaOZrXlaOSk?{T)IXi&b#<^1v_{o{0(CMEAdNE#(EpP1)gf%4Bis z$hRmdG2nJ|Ou!yKe=Dlu32lrEE{q4wETGnqZl8b}$DJ3ntxzd|Hne-AB5Z=>xc#x? zQTE>rL5KkX3gKg9qV3o9MB0_u0+95&q*)x_ zA_(@Hc48D9CH`90o6~xYsY+xo&}=j^7z#)7ME4n>f=9%P=qcZ@ojJuZ8-uF^6Lh_> zVj3qqU&=(mAfMCb_~`X#5*yUztXstZ6@4f4PsWW%@w?KbVkS#+;PZP?zh19tKNhp+ z%&V*y8J~shRNg4b(Qj*+?z;1PVFP|;eqV8K-A}c3zgTD5%1?%#L^T)qlX7)3-J=Vq z2b$IuJvec_&J0G=57KN@sL8zB%`~0sL_QGi1*x<#CNVc*%;_-)XVbb%TBELCiLPN3 zOwKWeEG^w(#}bM3gq8CrQ`m4ZGP8H2pD8 zFnPW|3(J!TL;S&*fs&sT8Y%d4|M#*F?O~_&qA-(9jmk3VY@Aoj5`KG{p%^Chah`fJ z5J%EB*#Ri>oTGll3|EJ<4>q?y+o;Z4-U(0Bo4%YRE-U6CHXFG*^z44U?Op}`8DSXV z+dSmxu0jlVlp_KH5vYiJeeFOJ8j$fndzF5&E;xYoNJVyhX?aRD(~zi24IHo09AgJ7 zx_8^fgkvM2LoXF7(*WZWonIfzS9S--B13Ec4JCs8pDD24OODY~TuDjEc61ETG#yI3 zxnZprIV(OPyPw@1R5|WJ;&X0>7SO$dYc;!;OD^XPt+VC)ML9KAmeWL--3yeG8m`6X zG#X7#<-PhpFMz(=i$x>bP3^X~{a)sBf#5?IFBGDkfPI?`i?!G_W||EGEWIj}_url- zg9JjSGUys^azv^>FKykAcJl@TqF?_TN{ER-FsZm4c?qE+`=&6%-hzznFD>Z5{{67= z*wNRF_cz|qX8RkED7x0fPnh9+?RG8hT&eQdRwsqu&f% z_Mc-WgHMlN3w&gy)5pJp!)-w9Kk2>b*^-i}Hn)b9$G<0?j@R`+C@8!kbu}+G5|<1H zYdE{H|8WPaiuVc+$|GanPIdp^R+D9mSzcr+hxjsgx#V#xktG$xTZUHL(v z`97)r<6j%t%RPbraMTQ@Dx9c~G<;gRH*DwoJzQYan*jcj5Bf&LBO=POStFhqbBR&I z>=WsPZ5K%ygv3L>;U=8GR)rVOR$!MIXM;1GI@6W-*>n1S7fjyh6qKC-u+<04QK%-_ zX+?_pn&r5S-~}64H6>gdFY>=MhX{JKOX!hYZd<_+X|*z>b}id7YEwh5U`5^?HxU2u z*JB6Q9A7)(*ZlLeE6bM)`)-cm zQky``<0KXZ?<@fGEW=`MP3oNAy9^7a@R2kco#m`riSw;TxRCRvM)^ejW!id~|TfD+J&F zLK^`A?9*z5fv(8(&T+IC7L*a3(_Al(QjQr=u>V+ZQOhuK83VMwbO~ch#(~BcU4Pu| zE+vX%tQZdikBTJx`H_Qw6W#s2Unh$}{H+kg{d`|OSgQu3m0II0L`2_Wg8?hYqvWXV zFes!tQJ^${JG$P1GO|K5@bbe8668#0MCN~;dw>`*6$e(4u?jUKL(CeP9$pbFr}ztv zxh~vyiVR+~x|UtU;lxHtgi<=tMgKPP(jcgWIyg(J|8jZ^G;|IfDkc{Vw5kvInAx{a zN4SJ&?}c)$CcxLb>!5C?#uV+f%@MSNkO$|(w-rAk90g*Hd9`(&`J2P5A!HztZeRxo zs|Z#a)`X(s$4KYlpbQz+inD8vq4V9b%}jUHP!|8N-#3HypxqV%C8r_c!!ii#Ug^(P z#7{7Vw5v4MA;gl?g*#-9BF^5A0~K?No0}46IWqkw6^Vz6S!##ZHsV;Un3E&Y$6ApC zA98rsLtj(c7zjIR93X(C1LNvYKc|cFG>$<)W}aYLaVY{AaH9EKv=TP1yYPv>ZSX+z zpZ~G)kQ!#=7*o{y5=qA~COc-}i5=!mfbF1T?ry^)?ni^zmwqc;7AenaQM%W6LkfBj zwn^;R8@?P8N0#~LXO=H?XPPbtTk}{CTFmE{BZD)7f~2kg28O=iNV&p@<7D*oP^lgo zZZkN11A&bmk2Nb%@E9)>a9l09~<3k9wE&Rcv zDmBs3rv1q{tJ8IvFi30_5#b^%OCA5h%H4sQV#?@u!>ZKgyc?)BNVK)J8`$iM8~I*~ zJ|Bb-&@8soqw@c5Vj@HklYzE>poo-x3;ug?WJn$Ze}JTJDXF6r4UQP?TW0NSTUbXv zKq3opS3YHy7^E*cI)!5Dy#Ipto>z>l* zZz+soc%lU=^{`WIqln^|bdFMiYGXsB#<}(8Sk+2pHXU z?Y4j%PtELI=T>CJJ;Kj;`UdVlm4Gw%HJVtl`M=j~16IPn0msdfklOypxH-4;H5>u? zC-9s7w+ID&yr@Y2yGhsHxj4K+wlrOqZ!AtK@P6J-%*7%i?pe}3({K8mK5GSm!@Ke! zMl;a-Aduh%s%_81Mx259_b%kqZ>0Z$>~Bm zxHd=946z})xWjJ-@LIEGTXy$i45T*oq#|m1z*?a$!uX$e-=5ANX)EB`i%o!YTxP_?_XbQZE9f zJZ#fah7o3g9s@H8Z{OTHD$j|5tO5zvKjNCT8gGBgCp{iNK3-R*{3Nsz4#A1!m=g6Mt$rKgXLNr6bbKJd2w!6G9!N=`A<#+~q>r}OoUs>tlu zI=M_OII>X~Jd(JHD6^GnQN(eo>S8%zZbR3vHu97Lu3{j9J`)3k;Ah9{j$#7qCnzS? zm8M?qb$1*9*e3(VA~>Ahk5#Ewo}SzQaGX04h4%odUa=OeVSxWfe8E1nh>T;5H&43? zC=N#w=&eSCRVIcvmUym%eL3dESS!yE#BcD_G+ZbxxcY~4y-<{>s$1K8XAs760)Que z%=_JmWW_o}i~=7bQCwvUA zJ|?byf&qfr+VxiMfT;n{aX|trJY+2`Eh~M%e^{OTq994XXWrU;b6!JWpge_cithj8 z>aD`+TDmSwGUKElfo;A>f%grS76v*%P_ohF{IMjrR1Ph#Mcf>Tpux?)dh-#A znr%_pkF#}L%IUe=*97Fn<28?qixnxM$?5uIO}h)Mt0q!Oe`N7yRhDXMOlueQ@|R-7 z*ipH}#~viEbdGDy?`m-k>rsXkKB4(ARfsfy6m(%P`6M|e#{-B~t*TCUxX)(~je4@TlpLeROm4A= zl$bhykfu>Yb7)l3>WuX4EWT90*JRrZg1z`a_s#DpmOy5CR)V3C4Q91!Mzf}MFoXrbgyqbV_2R%n&u$UKl?L-mFtD)W>7E-7-7|R)@zp7ms_p~LK84CGQLcu&v8W&uX zS8xXa7C-L*s8m#3QH?4AVUbZLTv3WkI^vyxf6DE>cXjd#N;(ArB9RBe*Vh+^(}tK+ zdUMQruhgVsMScAFHuVSoJ&D(LXDubDGKw zM3Q_!`@u{a+{VyIH`3bL_nAX#gkn>mzqS;WkBMTL-(TxMWFvW3I`{ppEx!a^8qep``XUk-1+?!ZG6 zfiwO%{?`Gp1ObVt`28Ui>+mPH5zBkQ0<$VuZI}#P+b!(gue=dB7dREaZ)p)-3^{KN z2?yA_EK!V^N_S~#xG1U+*r7I=n%&b!hkN6iEF6%w{~%gVwb$kC=^s=|6wT5irneaP z3{7cNz{A7R3lbfB~+>s-byrq0Ss7%K}IyL&T=wYfmxa8+r z??9XY;DA~R>3rBZxoT1bPJ;KNnHC$U3~1+%C~5&spBDlM^F;>CVL<=~?pNz~NA|Vt zW5iKORdGKW_Hnx&ivyFayluB*vdK7hlNt%kX}`ZEPoy&=^;1N@sXi4-Ak|)Z(kqO( z{PMbMiMwdq7cbYLx86Rz$FwcoCEYH@)(eR!I=>%;f(A$O#m;Qj`WvtQE^++RO3{U) zE)fg+XSwA23mQ{4%g5kRCTc3SJ()b1)mOeOT60AfSHF?cvQjc~%YGsswA%}0@C7&B z#`7T$#-G3psGb^H?Xc3cT5y|HX8Hz&RqZ zEad*R7^q19(dgg*P)og^XB0~;3B>Y!s7$GUTpkY!Q2`uEMqKXdYgUo*mCe z7NcgEMSit!5;=F&kiGBO|6W3YQs#Yk`|jnqvk2vDCzu7GUtB;jzH1KCG9e+bG)^Dj zZrP;6b^K2bs02T7eqSKOmdv3^ha5Gvf){lIVSDi#bqN$>ZJW(;o4MPpQqm=-M9h4B ziT3w_Fu?KiT>owIA*P=<>NqJ0Vt)JozUsR?SrP=ik0rNZ!&k5nm|);oR=^AvhqmjE z7{#gla-7}JV*~RESl@iPxa5~+)m=E#tXf zR@K*C#N!Yvc#}e}!D`nKu3uID);>rL0;^w#8w~`9TI(wEvHpF=|2*m}$#=;~@^@NA zuqyx$vc3AlQ5qR5CzZRo&u>i za?^jUiibs|A*APYYUi=|<@x4O=bITn*qHiu*w1m~s;ep2o^?*&I>sOpg9D2@zW?7Z zE|j$lm;T4N2gs5O=&F9 zogS8US$p|VlfFR?ijh0^h#-gbKDSQ$^n_m%b?mrC#q-}PcwY?4PEf<_mO{$P7*-uG zPJpF(A(AYdmWkwGA{&z1>A2_+Nc~Y@An##@A1+-9;)&5 zmT5wu==bBeDqigQAFSPpu`UXs+O60KPWVE>B%Tvlp5!bA-ryo&^k_Rfb5o!2s~8{Q zRXjN%2t5e3ceC{O$guyniTv}Y2oW}}H1ISKTU*``1h))e4{i6-opS(!plYKP0YLPL z0w5(cir9jmueSOlOA%d$@qt$`3l<8QqIN9R=nz{e3 zPDNt*3al2uADIn&+Jz=t18@XQNy#ut={BG-VTzaej{qHy4bE5-gM{nQv4ju8Pvb<5 zRIe}TF=gwvYTTNZ6M%l>c>~4~CxJwW=Xz84_BStDeq2^?M>>)w+9EE*)e6I{SHp61 zA)1dR2mXWVCViO_YH@$ZLh(vIt)HE!7l!?Hf<+*DMX}6m!=w-L_T4_?_g{5FR^o6h90|A*=`V<(IK|GSel(V@oiHlH0~!NMz{9stH2%kQv{0w76pL0lwUR8(uf-8lfs9bJ>=K+bHajh zRe!=RdWAKNf~D&EiVuDL(1*`9EF(TZIs0+#H-?J(Fq zu3hx)G}Sp$LzmPUX|VRHtYKv)(o;bn(+W2%zhg?NJ5W=+i*WyY0ph`Z?HrqrCv#zd ziBJ{NG#7TQ_hUd=21>obqIkK&e5Po_tFyOP6ab>4rgB(!jw|sGELg90f5}8wO=&|{ zYkpveGu||k89JN`ierLo^Q@-p^F4r5$#a9Hqd4L5?I3)@dBQpo-QSmso_=O9>|0nT zzbwM6#7C&v^@banLH?DBR+^she@D^#4hbJA*o@W!SoHMgE3{MrJ9^*;3CedFAd%1) zkSVqnO)|e9eBo@6{fzg;q7#UT_uHjE)H`Y6YC6m~O_GHhLRP<&z}~};VyZF?gNvjg zam`u5iycyLh0(7x*ha$pu>c2!Fs4mCfiL?{`9d)iVw}dPtFsx`{I_pGCfX8^#yAPT_6x} zwq^Qwu&<=~r|NRmVFocY3a^1Sj~pJuAe8b`U<&iq04k~C-<_lb0C||QZ2Hv6!#H>0 zLD+9ruMZ?7sbL`)ct)o=H>$@i$XCOn4mobXH8GXB@ z>lt!C^=I4(%{SG;7Me1O!W{y^P(t)eT=*YtEZ7(~FOD7o?yv4to`gxCTxO|Md!zMQ zyK21|X43uD-lqXAJ3fFCv+S}JX|ghw>Hgl0d3}Ad79)m)gq+2Hetu@MTw=TNw4amN zv-R8SxyVp|-g8~1mHA&WGmPpBOi|$ryj;hk8BHHCdUYjWV~L1}%$_A&x|9L0naBM` z@Wr}6a!I@*C|aZ0j$#4nYRSCvyUr`(M~yT7*Lwq-F>V^*&9r$S1_&5Gv|TCk*<89T zWq!^SMU&b`_)VI2N-^^J=YRGRmqieu`?l=I7CeiDV#FcnDPT?NW;`?TWMJU-v7lES z)qFgvLn=zPw?;Ixwr!qtJUH$acE1&}$qKvX0Si{ZBm4s6>UU`JClwCeN$4~dHU@j|a! z1tyJk2GE(07pk|)T7f59IEX)QYioN**noBE9{ywyX!QP-{@HT`jl{Dw=^ZlGjf{;; z7`K5T(PCw2&9lW?fSGM|2mBNp0AA>}P3%_6$47Nv5-f(T7z5T~t(^eh1RHBtv3Z6v zJw6^|Ha1}d z1LLj838AGfp9hA-mOwBUF+q?GdZ&;Zq1b#DeLi;+p0>~ip^4EvI{0m!uCtU=^frF3 zx}G^!t9A|E39t*vH(F_(C|0vG-f98dThqC+Pw$9|&KeE?;uC?x3;}>22Y|%P(^LN; z+*ZT6N*yAAot(QI+}yhN6DrMO@@XMUOnpVg5Z<2`=^{(S-mwTxX-e}I361vpv6x?Bm}DJ&hx1fj zpV{#11)d<93C{YGe<$1IVPMGG;;%@UC0X~hyM0Jgd)R7%X&ZHx&EPHOZ(ZbeJ}Pg- z67(Nm0djx`NiY=~HskZ&8N}=c&VRdqS|tru_gMn5aHr6MR%I(a42c(C1Z6$HcQ4R>TjXVFS;r#GICidTL zsb~}yBkZ|yG>X(7&_{hokibFV`bUTemQ{VheT+%lfJxj_wcePrKjPuTbbDqbA^0Cu zD_G_8{hyyoL6z7L9uXlolUrG{em!+7C}yHPyv%WBGl(@aGL(vqfy+!!xpO0Adcgd5 zZ)LkrCJknPir4v<7JmNp?f#!StO$#(D&00aS`+MtHqE~gP53Gkbl7-#pUccWgn259 zldu*Cx3;}+umQx*ApYF|e6i;15#czO!@*RaL7iV~+P?Nt={n~6{l%8m{ABn1w(7s% zDr=Lpli^KsSOKp_8ORR!QDP%$^8RoVV5>SmXqddmzEZ?>8Pw{2Xw|K8GzDO;+uF{b zGl(v}fbb(k_By|wU)XU`LdDjP>FoJ~r3!3*Tk_J*i7#{hsEKD0K*MN!I&-)ZvgQR# z5VRUf=~*ef!q_3WIqLdNiw%AEP@75l&OdQ%Hj|YeuAy(OMx?C@p;EexI= zt98U)udgx(USqJy^d!t|%N0~^tvOcMfzUi)FKxM|U+vT9GWPxT=&|`U-u!=?%}%hO zlao_)2Kf$0E&`q!>`LGUr8C^SWG9pbjJV=C^~(|9 z>FbcEJ5~00*tl!zoJsmKy+0|Y%{Vj!%m<+$;eE+l4ttTD z(geC|R!}Y^N9$ew*f+2C&Xhcfm=I+sqw1K?i+vZpi3S^-=C}Jak#W$N;2k4Z-^*YJ zNAQ6=Le>n*IwSd*NPpL$n!%QCJ5Zd-lWYXgVgxfASFaM!DG zp=pxD%sjQoYRAW3-|FzHI2Zk>!y)H`arWCrAh26^h_U#e!IzNQk^Y=@e5luP|MZFH zj^yH~ay>B#!T?EN-;ax#3zcX1^nkMo8brpEW$jLXoo?lq=sUy07>kC2l9j>6#nn7W zwTtmVv(8EE%O&T~NfYBLaMEnq_SI`Bd#kTu^T1F#0NAxTHG|90%`xo7rli>SMq3Jp zwKlCMCTl=iMGmaZGgT{OV*gWBm@s+NcSHzgX6CSo-#}A>DVZVjj8cxXWT^vho)L4L zD$es?&4mU6kjrrZ%FeLVE=|AfdMEe}l}@P&Ie@lBhNph6p_ir z#!Yc3I>YnHv*4rbGuCr9ZooqlPax1}Pm1i5^9q_FV?MY33vtu$4Q|rtZnK+(G$%ry zwmF#*s94iTA9yp*5&hep;#ca{X({bl)oh%Uu&{7+RB`%V90Uao_S9P7vQkX*w|~16 z?@uY`{eqtJo0NqJ__R&bP^#ql0D;g2SgMnG66mwodqP)NSN3=A;+c3=7Ap>-x0ZhK z#qN3|e|{8QQ<-Z)O4`b2UM2)8317w+bKR45g|QNb_M_k759mM&bdM3elIFlFd+!s@ z+}|S@J~j%vDRb9=3d!a(5Ep*P1UoLd4{|k(@I{rh^w3Z?*$CT$wlB>ecO%fgn0HsAw$uOvcjSf_uQRW-spU z7;R)e{&xfS9S#xtuO%fl=!~sY2J<=uH6QcG6mX`LfqKA;xG=D^ZnKViSY zPZM_l@|?{wF~6xj`_mgj!h96fOO$d-L_$D-9Ef*K0El@iYFfEDIp+XtkS1r4+zQAq zcX`-J{4W1gq>#&NEd&rPF!T&2V+IJ_5ou^>hEEdu>=)cdY!k7`j1_g6-raWp>Ucou z3ooS|es_9*P?(8vu=Q%yyn}Urf3FMBCk&2;k>OgTCC#?x60)|5|AA9F7019KrmT%2H z-?LA&Gu&(0lu#7YDoA?&J{*Mpp*HnnB2ZI&F5ZXdScK=!Hpy`P`eL)`A-Acn48*r2 zs{kG53Mc80Un~~nbv0khHUhSR(xdW@@+geSg}g zo)r?`*SITDNVshBd1Z2${KGpn#9*D>fe6*^cUDG`TfBaO2zvnr z*u#!Tb0o~nr~vBsdqqMv>h>A!mkQ0Sb#`wQ+8v&`fGRe09RbLW8JBe;sLCXg#)x3`-_SFT*Df|T|vO-oUw ze96aI+#`%~&J1>H=d)2wJ;S0<0(UEV@khJ->zb_(!x}$E_Jt-w*4dj5;2j8Ukt2kR zXMf);(4-M^fcMXkF(uh$SAGp0`uRC*!SzAWs&)5=@LCB>n_qYRfy6(4gMY6}n6SPh zDHGL=RIOAJ%?`llYjs$V9RCjE3xqGk|9ZYYn1&_{(|Sx=Ffv*^KS&B)U@~G?Paag? zyJ!{*;_jWybO_6p8g0m-v2D+Y)73ni?B`$u`@!qYc&L?0H7%FvDIxVHb~!FU*2##z z$}f8lwU1f7rA|S0mj-vUqtqB^H*j4ng(VQx&X+p$KQG$*2aDr_NMz5(W6}i!KtIeb z=emGa*m68snL}2?QbalZjKasHMiDy zhN4Yy$=_{o4V#besJg=3p@asEE5Tu=1=N z^GryMr--U5)MS|}oGvT!#2EZJt}zk|5!ky&s06;q;f;K*N~u{mP}hWJ)JdF`Yo)CJ zPqg^&QyXB`aoO)u*52qg$dOOh_A@un!52q|;xJM_#DgH5Nx1*}gPQSY2ybtt{$ zr+1(u6AmIY?%pmp^Fi=I($x@M9ovjwV{koPs=-mnjFG&$KY4Kc6v6gpiY}Rl8ZB!j z4uteR=P?6YyFUijmF!>i7!)2ye!9{`vQ7a!H!?@jm&1iA@zbQ$IOMZ3;hLwG{&Y=J zTGX4d7CNf`4iumeus~@lgCn4d+R3W|_zLuZ(IpoJ84eCy<1}eG6n-)AH{6L+>u_VQ zGca6%xaN6B5HO*IlVCT$6AISqZ?9-^OMFge(QJU(4{+wlzI*|%M0!J6$Oy6#$ueOz zDwiS-*D?n7Ud_*L&_|PbC^V(EVyq4XH(<}^D)wvUQ6~Bv$e&78x&=Bv>V#EqbqC1m z{P_kIg=XT8^6lhnVu&Q8*tw{vn26YWQ~zJ__MZ=&PSp$WG!=T^(@touQFRRJxnxO4 znCaTvAEq`k?uK5QQGemI*l&^jfcCu-^06-hb`I&#{AD2dSu)3EGV?Rj#m?D8? zJ(HS-v|r6ax>~im+VLnjmlfoScX(0VD7r$nhq;*Q4Ll+_j8WJ-6nVydZ=u=%y8kx- z6nYn`;CQHg!thOD&ynr4*LTxE>Q>tAw}9HF2iz;vS&N~4Yf*66%`o_1u9fP+ zclWD}r5yteGeli3g)-*YZedtvvmdl`!#y;c=2yChysk!`#W2E*h50UkkE^X=S?AS z9V`xVCwv_7!e#7&rz*B+L1>QH#b5I;3)Ja({F^$s`pS`}mV7!=qvQqK+uvUz21aRr z*8arclI_E%eBWvbu#eXl>}dQ16<`gGjcGRue0;zt=4SGx5_3|9xP;c}pcYa_K12hd zzr*pV`y=R0< zngGg>ur&5d=$3mz5Q52%Gr+%Ay0%~;A8n@x2v{JST2;Cvwx2PimKK*UhozOu(%|0T z(m8D*0Eie*hUcxC#bOO~$LnJ$+WkEclg)r=+bs3IVfcm%`Uupiy~HTtn(BbKxVUJ) z=m8QUC%|yY@O*%4`+ZN)J70(HI^qM<}#-z~Te-An+vn zayfT|{_bE90jc)m4F_}U~>C<@AUQ^*wv0bqVX%m8`9hJLdm%mEvg8)bHA)I z%HuEE9BUvSMMUYQKcM12ae&3?3rQ}LzKiwj>jqJLD!RJ4lOgZ8`}9Thg!KiwKkT{+ zW`J3bn14N;YQ$O1>NR(%al_$2(}vp#d;K0A3)|%&Khb7?g5=#NgpV`EAl-)v$~SN% z{8tGE5;lN}vhRGOr%Rx>ewV2zu7LU)GMlxssPsbW7UK$UDW z2dT=qg!#Xi&na*zfdfiFZobvr{zLUuD>t>H*kgFQI7at*l1SUTi zj8WMxG$e%Du3odY-SkA#+VDW)pVeB6s#~vh>tS%TlJ30f>IBK9^b>`ex?ZJ!8t`!b zV@Ab996P0?pg;Db`9~=94f6p*=c05tW*Bp2U!+ia0*Y8hPEM3tv(+Ro7lI0SvoK08 z_8E#XWfY+R2A@$U+uj?*RdCLDqaNgBwSD}o9!y`uu90xpZy@f_j$p4q^&btQ!P%au z%S8oqd>%)$!v2$Q!Gi{cC|4tzP5@VwFy<~iJ6{I&c$8KfgOOKhSsC38SzKnOZ4*8MpsOPkB+PZ9`xh0x07q73 zSfNHn4NP#E?LO)<{wsgQa|$I((JUFosIuhOiYu1x8b?QI9A5Wemt$Nh2 zTYhu?W`;+yKJat$xAxZ&t#YSUdRa2PKSWWD${^S0fT1yOSeLE=^(xt|6MdsWZcDdW zve)Ht{m`(_Hf&VF{5nMzh9`V}JS(&YfJu2DRW3=XQ_ z?p`wy!#v@An?Mu(fgx-s3#OK`;wP9 zZHoWA3&YsH?`^oga=S|fn!Vsoz-@~JQYum@*X?v|nD(75R!GXM`V_jz@Pqd>Sxl)U z))anV{ySA=K+6Zad51%$%m5{?NGXwsp4|)|vJucqY=Z9TZ)?3bqR&JAEIRbDc*(eP zV!6OL?^We0FgkPlO(0hU5w&$2iVuI$wHLIsqh6!8=!&1P0@u6vys;y-J(j!OPw0Pg zHpGA$e`7Bpnr|w+X&ZiwRF2x%EmL>p2q20)B>uh7Qe#4`dcS$LJ?y5^G7@+T z1O^8uN_I?1JOFP2Ln4K%gC#|6Q<8toHHjsZuT&`59+5`72Nz(haHa{SnQv!hVL>4v zKyvbCC|UNn`UCF|Bnx3VKGg8f17|_JlKs^#WSr=u1u2HPT;K71q^VVfgspv~9;vew z!&h~y(ygOFhhUBfCjm)OD?#A~=#Gpr%9=#bUvu%!`CY}nQOTjs0yz_6vqwn>mjdKN zZ7DA9s|Va?+x_xrkI*EQ+cEfW)MI&!8SiJTWDQ&q3r-UR#7H_xxzn;TVnd1e+%N#i zfO^6L@a!C_>Z`$blES=i7Lu?wA4h?vr5K#EZTm>~#cDoVgoDyY*f_3ijSeq9&eb3U zDb8A+DZqF*Q@uyjFlwvINrkgVDuBT^8T}F#09I+mT-=%_6^YBSKRuFgHnky1Tt`ho=zKAPFXe||_ zOQKGH3t;#KrR?!?IZe3`mHxrHC4}j|AW5f&kJI%z%`9FGkfo5vDTziM8z%P-$U{z={YdC=wJW&aK5tgvcPVb ziRL>DxH=@m-|kSQ%HOx@^+4_T5da1_5}EHcx1Y8gz2ngHapwzU@NNL_Cu{s9gEJK8 z5M_ssnqP*2&VuA7lVWP17JX~9ieYr2m0xAvc)N$GY`BP4V>pmSF*Ml*bUnk z@X0+JVd6`5KV!ovjPwUT-{apE>;zsM+e~B$HJvp4<&|r`dsst@x&gd1&Ex6jCP1P5 z?hsY4GtJgl1}Ndh-wuF=)(_&fPj_84&*)UL$}66LNN9#n7~Tk=wMwqut=j`-m#R&w z)VJoq^L+VielS;#01(j9 z?Pb?wOJa}4@HJf`Wn8emFCGI80f#wW19zDoZY2X14t7C1$lbRNEhm{s3#E=tk*e-FEST z_v%kIHF!tI!&SET+v@|zU8U z@4M9=;PFQR-V@28zm=NJXvaMnF)=x-f0vyB6UPi`Kj0>10tj};i?uL-RfhUkX;|nM z(9z}%wmGXkW{R3^=WAYy&pMBie03fq57&Zl^#D>lqFtG zRT`f)IDj#W+!#0WJLQ5Fxi!3to~X{Z4DaA??&{G>^GA+=ESFK|*MdtT(Z2un=SK)f z%2^dTDI0d0b_P}P;duVnZNH!5Xnox;Z5wRD!nNnm>_5otk5DDaWvd6S--hCOP=6aE z2TwLDR1F zk7hHLwj1P5TdUs`AUeQRPEBdgDcgM9@ z+yq;dAGl&DrOsKk77KPBJTw$bq4V2BSVt8i^9L1s;!4hL*U3YBI6Xj-l{fUmq(f*G zNay!dS6h9 zyKH&{MW`E9XaQ6-W({5j9)F0}Hb*W_X{^ifC}#=*PPgpNt^+UQ6z*r5gERU$s(Ohe^shk9)T|mv!!aX=tU+ zB#kd$W0Aj-8jK5J8YDJYGAhP8%oIjRI(Kf5{SC$;#iT;z7+GQ$y{2xc6fwa`og>$8 zEcB6=ls#5BH`d{ayv6Y8|}jK3b`$MQ|LZE|0I>=iIG32c^)PaNND<+P`uH#ScbuYeGOWOVNF0s1lc z^~JBZpFcvp-vtXEW5)J@%LGl;)M(Ag9+~+Y;0($zvekzGM*6e$3pfvRakzcTUK z?<;RO8WranktPrtN_Q?ae8Dp?w1sRIan!0dPUb6_t+ z&M~}sy*h&CXB&G8P9XZCZy>X+#Mte4fq688FHuvMh~}3~3wS=^z4;_YgvSP--C}w$ zWWw}5+5%MM3u*i+++Rt5nmmB_5Z4p7^`E zqlMjU^TmLsWWC4gy1UAq-R7l;PY-XNvL)1T{KR)l)13oaRtWsF20-$9A5(h2>&E@1 zK#xVB5o_i!EiDq$GHCqFKqZ?df0O^%W0in_&7iEJOEXq~<+0msm79j2 zEb&th0srp3o4UX7yWR#c9Q*&xpS{9u)vfJ|s@CI}Es?*!A!OccCM7a@eRg%RJOAC3 zBgGRz$o^qO$*FYN@kcbCF=syOy09A!?H?hgX5pZPn~<>@;`o?Qft#tjrl;u77Yo1d z4926vqd%qQENP&R>jG{t)SD-}I-)7;`bw+vG56OC>uE$80k=;FJh^dw44!*lhItP~ zf1=`{Sd<{8<=G&>->@67=qIIRI4b=o zuuD@GmXmcH#dd_sGOR8CS54XOuqG^`3FS^iUU_eze!k|m9GMTpRH7V2{Z$37GzQY; zSxxV~q=-o1O% zm)BwbVZGOOwD@}#Kv)_bRH69AZyRn1NZ!}i7Sdc)R8&0BKmjteTVHTJ!H*0*xtIi8M0<7`1@b4oH zTRn1_etX%m3wN_R)Qohf&WVW$f$UKpej&cLIQNw!C{v~3D_T!CN8lavZ{giqFc`Xt zF0g%K+|@2_G^0s`bt>hgWJnJPQ&|W|!FTuJ>sW{W!+A(W+{L1B%{t$;KMv2o1%%Oe z2w`)Gik{SE6U>rqtGlf~UxVe-{@Ah0`$Ky8Rj{I^w;Ds*pUaoU;1C+(_lr!lV7A;( z)KA-4-kNku=6^9@&Ko6wi~%$=^F z%kZ6~@}H@`Hd=N!7z%e4G-9%u@>hJT+B&1R3%b;1f6tK#K#*M2I#U`PoLJ|K z^!kQ$m~eaWEX-#;_T51-2Esjw((W#L1}muN80bH)OUXnEghL zcyF}X28Bk55&@2OCi`7=TxVoczrP|QQr}>@7)oytzB;lQ z+n4ewF_w(#5E5Rgqt-TZ7SJ;HX98w1*bBKwWTzmJw!uLW=iQS=1tGiYm9AI28vwBm zYC3`=|H3-t2qn3#_epiGtVb^0Wj(=;0^|IuGh)c>sV9EdP*D-@N%e1D(&pquQ^&K0_Fk^wS*its1V z2yL$(6@neYXW}t-YUcTvxL<853z5LM7$&#J7cBM4{GpZql|WpDJfw)uyW(KPnb_cu zl|!o;+vD;VXhLx1R09JqNVi-bdT1UrvEI{TUyQie?XCk0`pcVHQq1XrOdqZKOTX-r zDloB*H=oT8-VJs$d|pOZoLM!AqJ>7#H|01C`St)3*6(z5-dW*?2wJKprXmvIQG|xY zrsKv~c0b&*j9ju(z~YN+S!Y`m4#^9prbgfkBfZL&d~Berpz(b1JkQv@;x^^HVJ!Z% z_ca9f@#kBn2xUOLcIr+*L46l2A}oc0Eh`heS>&x&9nHFxkwO`a7BE1{>iL#+(FEmN z->OV#*-B{2A~-PlsldCDO*{ZSVPL1>T2Sf|43bv1(NJ&zV@t*oAJvcs%B!;xTK{?d ztgQGTt}N8Ve3bTJPWclT! z3Fyq;Nq@-|Z9OC2oj41*F$W_AxtBd|im(=aBn)=dBH!-pZGCNi+k~ zhGDTLg4!xZEh$N%C59#-P+a)<+bnEBf!mk~>8E^7?W|Egwe}5_aH&NqF%sC?B~>!S)05Pc#1|A-u2a3~_=Ps?OvGP{`&cLwpCm{nucg zQKe%jc;zxPkrI-c%#n?mXqL4REzn|LR;3WaX3RYxdN;3m%LR#j1zN;Wk=VXMLkre{ z2M{y;pekjBy$-A};ApzRqtbq!IasCOPThhfG58`z4J)igJ7Ng0=8%^5Ertj_(i|?tO zf%dxD!=}t`!H(i=F=CZwk~cFV#SUuiL7R_wlzKNMD{;7GU^RVz=W^`_zKu2=8k4^? z{Ebr?Vu9iFTt<~xKbEiN@p^Rl{Hoptm+8%i9?SpC0VCCs&TQjYl?b}1j3?Rm=XmU9ZmD~uL>NJ*?9t*V^I>(RF+IHk%${{5}+2EXpYDqJu@ym z0@2*pWeBikbl6!@N8q2Hixq@Wk?Iy2d9?*llBB#D8&EE$u=bY*(5mnlNuMYuN4b|9 zc*WeZ>fN){zzNL5F8&&Osp_TJ&BnyUKzVX24cNO;3RI~R{&*UyzZNubT?elFm}^3y z7>A=d86PalGhJ|9tS#fLshL~vw>xjY%}ffUB*AsBP=uKNpDy(4{ zF_Q6_utKwJ>YAQby3bq`@P0g?iX}9J`%RO{R4wq~<6-y%IpUbpwJIgy%RT+5$DzPE zuQY1oclZm8LNn;-(JozYYeUqZl8SR`n#>z;%N}o{ytlpS`%F35AK%*H-1&9KyN9G+ceOZ~y-C7iqeASmyLss)2*LPYdpj31 z%>0JoBMZ@T&Gh|Lmvao@7PR2cQW=GICSOFe>@dC;vY4im0Fq8+ALNf6w0s`=Z>tsf z1*y}DVJTqFW-XN9zf~^4ES>#pvT1nbwN%R5=$O8)l&BH4{U?shU_KOeXc z_(bMd;gZ=*e+W=$$=kx#l8yYk#H~P&ejV1;}S47L`psQ5GjCHXnX-lR=QZ)Z(Zbkyg&lj7#tr2@aqkb*riaq12CU|@c}A$ z1Od++8~+gy>>uLjf;)weK{&#ePnl6q(kwlbGPhLTAArU%D_ry)iOut8LicNIb@4pI)a$?06~9W6K!h4zhFcp?jnq>sEB^JhR?&O&i`PBjmNFM; zqx$e`Mo1oiwqe%rT269x7s_xhz@?S$gWQxs#m^h}8+&!%1*Q1Ch+=3`nsLM6qu-_y0ju7(-iCF7zNUP7A z?bk$a6A+&tZuN17%Z{|*X-li41U;R0K0UAb$6lQ0Hilzk<(t5Z)t#PO<0*XqDa$d9 z5-Ktu9+dN@h26u`ou;POlGW1HX|*K_y^wEQRIYz=kSp}9Ou)g!9*QP>M+hmu^dMJ% zo$xQDl`Y@KTs>a(Ub16#Q-OHoI*nkIpf-u`cPbqjvo=qe8yDJ%SY)q+Ou?fSEJiS# zDz&+mXhDu@2`+D?yuXx(Um_Jalv?tlJW$u$&{GRcW|O6rkJTcFMKoxTS02xB3yoR3 z(B>BpxE;(>;D$=;ej?ZX6dXYHmty>lksykgraz>BBs;LZMCb*%0=PkJUVf=}%oKDm zGpx2jIWTZbJ0%jD?ht;HkmzxoUkRSUsNhi=U0cH@8ufzP8i?qOVx=)_wcsFNA|oD~ z9;B`8UCn{GSE#cZYh*UMxW53mu0`#rbdd)J*&m9=qXfI2i&T^2(M5o z0gK1mNOF4i{==uEzj1LA;HyRa=<2lG@pERpo$T)MPjd({6ym5JmZmTYCWTRWi1LwH zY(Q~wE|~hkLHf|<5BvN&AhBb%^LakjeM2xf@1Hp=VP=M&|7DXB%{I?V44!WO8uL3J zM!gn>)BhD{w-%%fvK=p`ho!>>%RD-a`n+~RHf|Ib5(&zhecXL{lET2}F>-9h#0cTh zR16czr?TDanLa-Ywq}mb-aeKU>V68hVPRf5#E%jry8dlR+oHvVtKO(i81U1o`KW=! zzN?a2LcoeWC^hk;-pf)~d4Qtpc!odMFDubC5a9&1TjAG>I^Qmpt&5LGj^EQsf0>jG zlyZydMjf4|kvHlRFj;;RH_#%iwU`o)NWIis8=H-s7F@#)REQ?6A%`a32B&_cG+WUO z8NA$PBh1L@j|5MhaAKWn`MGTFiaN>ZOd)P5jV$kGX*4XWe5@T=l1AqALq9>GaWyx) zP)x+{U~0@7nMtnm?e$JTA;tzU4u;1h_qT0*LE3NnO3llw8SEC+|E{&F2NAm;yN=j)?Y@tA=~FXh$sHxNJlKQx_#Tb}RV$Ft3) zyOwLYW!r99%eIYW+qP|gv+Y`1w%wjegS8c5NG2o2#Fx|fWbaiG9Ece;x%O?KNP0;u|QoYX#C~2 zx|hssqiJrSrNB>|E=<--p*b@UJ-#Ivu0@VOLRaZP=hu=mMZ_TjjA}eXIg<uLBclVgG_VrM3tZO~rKRFr^*jxL&Zqm^MQeXXtIqGC=1)FPPRd71 zIM{#x=wAp`VnZ~kS(Y9^)za-o0aI3xRUBIo(g; z_{K-_MU)h%fJ9sotW!qjYu`vOe`9nI}UK3 zh}C3M<8_x=505xqu=)7- zOPrNG>UdtVSspQT(Txm3i6h6%vUbu4&%$4UU|#QWl9PT%&GjK^(9|k@cmdyGOl5z} z2u|I)b9EcrqvtLi3M{K*l2fEK+Y$;T)3`LZS5FaD@_WHxd;O*KnLsXMP|* zaK6}2;_fW-W&0b=lug2+RS28*)f)(e{IBHzl+ZcW>47hs3IwYFYJk5A{SPC#3cKD9 zwXNdT2`eVsP0p^HVS+SfqmXi4Th60@OZG1DoPy83$HRY35SKxRdR4I<$7A9!Xwco# zG~3x}!RT-@)L~_dBTQSsml{+5-3mpzu?mlxMk5p};u%{UU$m)4Bor>Ok`|8c^39Sf zp@+&aP&tyCn2*|pa0?gmvJ(qjiA{7Os!(-d({aUmIJ_38fNp{Nv{Wxlh3n(>NwjZN z==zF4#CeoTF+S^;BZPa%zG#H94W*6JOudekkGI$b(aKJ{3YNk)SH<$bVQ6M~71x)H)gypGaeE12u_EJ@*X zK3tGFG(@aJEZj zQTrkAVRi0O&*}N>`91QQnii$wcqu3_36`(Oemb(AZDXs;4)I*^OITd|-dk4pZN{1S z{R!zAp`)n<|Fx5EsQq~)YOL)@e?=1{>>v+v^?QX?PPH!1;A3r@KWz$YcZPc1Vn){c z+PqSA?Gem_5ibKp`$F!e7=PB55~IoP<2guTzeoZVLkRaIsIOW?h_w?Vq(>oktI^lR zzMnyBI^%4HBNwp;oaXrsPL&r}5e)I4MDVhaTD*vPN8iz519YnW(r6M&k$)(&6F{fg zf8d=|l`FAoh)yNJWiTh0&Y@(^bh?@?-cXCNzWB1`^aztF?u%K~SJ#A}4_8DBYHQCs zRTgR=STMNz`x1l{zKL>|>}n+FE^3T|?#b66A>0x^mZ5uFc4|gqi6#Ewl8a?<4gKZq zn$lZ51Xs*+k|7a+dkU8bi7VG3pn(Psd(SzR_ES?huv2O1})CY zr28ZOw;A=()g&lBaC8|SF^e>=pUKb9BkATV42!pDweW?vtcF7>w-IoBlG5fzHw9VcEnk-3e=tV<1+cmG7r{S|ZUxY9!@wPSTOCY3CEm zs~^oVWGLF=RRiOscD~CIl>b^koJfEt9AYht=dGBjWKe@%wBkvOiMQi=EyBe2!17@! zv9auetWqHNE3=5AmE3Ozv!qlsjlYzv@|#ZR)zBXu;=9enrY6f5vhpth<0#9EF0k1v z(d~*lXW*Ot0xJ9Q=&wh3(s>`;^VdB-W$nvKsu6oMtOo2;!vq5+lzx@N)UT-MpBS@j zA?h2?X;mw!$;lL5eFgJ)cz6Xh;s}wgH;f@&AM6(a(h1NSJ>KnT_EJat@z}DXd ziE%p3YL)mGIulk$7MEp*6u3=mNu4Tv_37(y9kRf4wOd+p;3XBtwwIXW(VyQY6XL4~ zt`r@L$AWN&x=cXLBYi-tke)P9<6Cj8{<`k@H8~HXdeAj=c|?o0zoGYnU8S@V1A%td zDEhmZ)#!KrPl#io=!UuVH7A>+y?g#384d;TSy?by(FwRBP4OwZ^`f$th2JjSWIlH3 zXruPrp>W=gVUwW+14Cj-;{;H;25`(ZA%YT!wQQo#B-NLoom8@j^L{PP6ux8X)AOQp zp)H?H9kauuqagS%T+$;gt`}NSAifTz5;Ab2b~Z*vg}8h|F2}*aY16X{V8B*blI(_S zyWLd)5XX~vPKtd8GOoZ>wN5DKN81GYc}SaiNIFS9B_*NB?v#|Hy>tV$eSG7EA6;HtNq3 zhp5M!Y9C%L9r-ix@N#4>%UC5VYpR!gxTSp=zvTxdyn>` zD&!xv6%UW~5WqEm0Y>Rw9oonCh)XK=H0re~JAv;Dqz`cdU!HkWQ_599=T(okWMO>{ zTM)NtK+oULHv*GA8_;a`^?!(g*F|-8k66h-F$rc;amUzayI+#P_G>|~&Uykzfh=8d4d@BE2&>2YeR;^#6T zNRt8U_f8Og_bKcV;9^W?Cg;o|-#O{_ zM4&CVtP}CeJTW)1%m;Ht`51SAW6W>7LiZ4zM#e>E>y-|s1Zl;gX^W}18Z9k;P045{aLS;8oD2r1EZc5Um8hPnQJ0{e+62m1L2=ig;%zn6K zcU_l5@`@9;1=W&=2i73|i0hLuKAj0GvK^!Ak@AswEL7?S*h)!mlUASChB&@2bOo-; zk9h~aMryTs(eRj*VPS-g$Em|j02ySeqq?I=(JP^nnD!4VO6tqNH|ND7_ zfzJI@`x!kPCOJJV3R2D^=ZE`kro=2w3e>rERaBqOUh0JXacQ$bTxmu~fj&^WAp6Xb z>DO{~VH0tHziqt~{|w0<-R^-6F&v3X3S?&O!4wHcio9HMcPTsi(XVbkQv5YA}Vc zH3b_PIWv;|U5E%zwl!d{6`J^^1r`174+i9!#kEXm#U^VKzwAQ-^3Qvo#n*&SGH{*= zXZdCK?-#JMcE!>0i>kN@=R$k!#;9e}3=s{;qoMO!A+QmVk#oQpJ$vzS;5>=X%#>l_ z6M=601egE>CJDf&WtQCNbYpC>-J->#BRDvNZc-I&{S*5mY55_g?F>;Ixqst|cSRBu zHz18FiJCP5S@rP(gfW$T`Wen;cnwc$3b|X&a$Yzt|E|y%S#o8vHA@MQ1GDV z8JkvwDoT|bUob`uL0)ajB^Yq4baptT3{JcGyz|7+ZPU!NEV;sHZPgG)^S8=|nAv~K zr&rJvHyIr9$7j%dVZ9?X^M@Or%&DgwmPMUEax(q_?IBDV;Bq~lE_FAkG)?2OxU@LT z=4PwvH3Bx1?Vr=5(iD}J-nGEZ&Kam?qB1gk5+^+vU0{tBP#J4MU9CxgCG8(ANC3`` z-DVN}rq#Qi{_|xW34aP~2I(HFuk>6!b^W+@$zdLUS%*AY0<*91I0s{wwba$}$CW!)& z7y}?CL|U+kh56@|jIwF1$pZ5^b{62NQ$tJP7DcJ2=_NLehidq{VO7W0ELBzs9! zL}Q2I>xSh)t|FaGjY)spoUX4a zG0v&U{lPk*k!=u0jLasM<7rh5EXA!i8^&AsE`=DY!DbSn9nD~y0a?W%*=&g*$ttnD z_+5(Ml_7dSi%Pmre1x15YPkubP7{J7p)Su{Lj}MsheM4bT~k+36?lnXr;ePlLVQ{F zWRwSCkt7!^m8i-`+NcnGWQucK{t2V6BKX*#GeOl_H8W(*22dyAl7#z_K<+)gk07z| zdSU{oJVu&vXD*y}Z$vDg6aTslPDK$(N4k>2jZ<3TRtK9{$i{z0Zaq-!~V&_;2Y8 z?WAM_*IQ4G;mlQ?OU2*dpFiH`5a{0Q@r1X+cn1%;rWh%)+`8SUgt@uoOL=>K5@Sf7 zk}xjuSH~ZqaOTfPV?r4o zkY`IsOsoeCeCI3M&!H!c*7-9Mt@a$NzqLD_WMN>U1i}O7Mk?A48;+iDR)QV6{ zMzah04Yx|LKb~-7z%Z8rwL_LL?3gA99Wea|_-oixvPaojO(~1xs5|z*OSh8vSq;nH zir5U33pv@sj+({qFD5Aht!p5w*!UhA zE#2#4)5J}pSyUM9H%SK5H{_qKa`Me%v{_K!u2V*ls$_Xq1@dEGI}qiTN1whuw{D`9 z&;@y$>4VID8~yWiV@^< zuT-hMdsIf1;|?(+@VD#O$fXY4b^UvukuEu<$j|KWl#+M=f(Wur73JqiKnm0`0s_L# zy^0+`iy?$8(Cgl#hIk--eYmplK%_T(16l>gi{+RAzo?&dD%#|fm=LF5r_DaU+%~+J z4+a^Zyk^P*7;KMfS4A`GPz);z+PQl5`}@YCR=X~QZppgZ{t;t!vlki5=?&hv_crys z*G`Ns&N;YpxF&YBAMh=w3*lw~Hlt^wBodE<6Fcz$Wj^&vaLi(aP5Lu?0J<)c)c$CN zdD1nE`$9cyjFr{&(sWqXX}tWvpW99W3Q6e;>xV`4o$Fpe4#*L^9I6;bL6Tyz`7~xb zu=o>@?|T;msJ$c}>5Pgxs-_*21_|KLhSO(2nl8IsZi{-9{=dp6zW}3+1@z0B`a~}C z?M_SvzgJr4A{V(~yVu4RBH{H0o6%=4RN4qaCKzQ;DjAUdVoaf1j?e>{iqerGH7!n5 zy2oIeW?xh~#o?RU+qKqNO=xaBGi%%z)`tN3Pu(5vtg3x#^?z4W)b4rwe#jITYw;FI zO#4C~oKs5UbvWD<5pEzoV}l-o9HY_q*L%!SE+-kx9a$bgUXm2Po z`}PD-nCV@Pvu=}IN`WtI+i|n~d^f|koLKpjF%A^5Z1+4Q4OtmdBe&e;bR;jdGl1Iq zei$kxjla&hl@LHGwdp)A;Kg7 zH^DeV@?ReO^&_R055Y#gsetJg2ka-*bX(7~@}a*JI*cwql*%U`a?jUhNM;dD=$Pt_ zZ+%rx)DO`Rmw>Z;SGoCtS_iKlB2Gf#V#~BNJ0G-EVo0bT zMiU0S5C+~58x;{*Ae3tLI}01G|NGHSx$L_fPr%;=I88T7Hy_PJjahxGR)5nYwd zts+8WsfM>%;mK-=k%^>mvnIyFlGA+0pZD709ZoqE>N##iB8) zJ+>{in`l0uJfNq3oXq%CVhJNM!`KjvU|g`-5Y+BoZ)y!I4XYjf%;I?9*a|vmX(R4o zH1gYTEAe9Go>GgIlM7Wf{~XYgAJ@$mF!m_;1~Pz1O=zA_>hdfvJ8R@l_4NjIzYciK zQ`n#V-Z)ikyyT?u+F3S&TAG%2EHeG5I(2t|KR!L?^vG}mnNte__#=k`W{ya39ygcb z6-3tUe&QmYqX>_=jfWqVy&Q8>9gYl77pu$|Akaf0k9zG3z~|U0FU=7KIIi;x3&Abh zp~SvrAogmzo)=T5GN&O|z}t1~dC3BbsRTwNG1bfv>dMN75rOtaiVeCQdlqZ0U-3YUNA5CW2zw}O$tc|`JQ&TRHd1J(4^;MS2%Md z-k81`Pa7Wg6nU9pR#e?|vHSQ3a6>9-rC}>&-1}FJmnV z@o-L+mo)5anhs9}l|zYI=PX7QG8vifz(s5--N&3=eLpgobUxi{NW2%+egyl-)W}Km z-#h3nqk1Ydi*PysKH1za+xNN`*JbV(Q-Y`)?A@PH5gH1iH_qK#WQdlb^uIHPWyc8 ziYZ8B=wr&L-WYy@x1^BunrhHDI^M`}e31Dff6OvCxUY+RyH49R>;1{b)?A%@Yke38 zt2B!fMz6iWJ~17lT1jznIF{^0zecVt5Ef05JKF5Io92<5tl>2Ukg(xMB}6Q&tQXS< zXSWY8ZvQD0&lkaJMZg|Te}F4|x~2Tka(%iaVm=MzPviB#7>*`$(1mre&h2_7OYv?2 z=?$`sTBr52>~ilTi@RDe`s==-EA%KQ5+jM4J9_(&26SF<)>{Z59Xv7$rzP}m z?bt$Wy5MgR+mg%7#+hXyJUQG^2#~MBaYa^Q?$5O%l#neBc8N%d7V6If1U6rzLVBLe z&Aa|=ho;=tm*1H$_{Tse*a2K3rVWY}a)$~NQPE%6c}7caF%g>%BSflUe|t_j+FgOf zCG>GtSJr@SEZ-x-q4!PqfDs!T3@q%P;_WsYk?s8>&>6@B8X0?UZ2;8=fEL93wF7c{Pu_I)T9C)=f^gi}o7 zB)oh0q2C~4eJJKw-1-iMTNA~$URO^simWnC)`Z8=wUkpfe9^0;aQ~P;DTNE_BJ}B3 zim@vGe8u>kFmLCiA zMvmH^ef6*v5eQ1RPmW1dnjsa!nX1;(@K49Aii>OQLl4fpf~?K>EGmlar7&xdwB55K z$T;U|>u6_3zf3b~y68Oe+QD@j9bbF_C_3qSpC?+De0pM{(NnR#o127=4xzHL@_Ecy z$ntK0RZV&fq$maaw}{}Y0G?b&|EKb-o+7U*guG&!*4)YL=PhhPII zciE9klMc3lAvIlpTVbe&YC;_BlL>`83EGMI3S2!mU7w!BHxCL2m6MWXnq4>ITby}r zmF3f3)s^q*d|u@1-JxFdCfp6Pv$>&u`V?AVoYKi|>HeO@P{KT0y7{jv)(jIBVOUmv zhHi$-2)}k+F}eV<+>n_~%lcZCGq4nbj}>V7Q3(r%XW#mVz~K<8+w!=0esDWL83Ogg z=YOF&L8N1AGG}iCK)DVCGX&U<1-b9JcffTHojd2kX{I}Pjd;?@a*w%Qx6X75^*)`$ z`ZQ4mw^aTg0k^vKHv&#$E}vJ+l7%G#lf)^f;qq(=i*878aQJ$6TG|%}_@2vyRDA~L zQ)Lwuq6Rqjd!0Y-^N=n#V7+bWY-eO$4~J%yQsDGr^QOFx9Kh4|(y2tr@AD^#}j(AdZ_qDB)OlcV+M8RVV|5j3{+ zQ6PCvLDG>u!2e)bCR7eJio1x0hiLlp0(JhCV$=Ntrry^ zl~Kv4^op5qJ?z*t(^R|1-bvyZ2uL|N2Nv1^*QU;AltrlH=Uvy=`;#IW_Evg7^;737 zdg$5Yl$3y>A$5Z7%@t?X`1svt&rcrY{}eSB*etDPrl(;N6YBb&es_EAp2^s?(aSCc z?q6L7Kt5V);pm=qy#R8|KH%Qqu7Cu5yU_5={=b zG$AZCj-G_sE1m;nuGDo*?(i#_&m^B0opc7*6&u!bi9wN zMmFzfd#@L*Sb`(0N3bQey{~vP{BJnZIrI&k=>X6Vi?R3i%Thd>emx^*zP%-Y7=P0j z`eVQ$W!ZM#FwxhXO~~dXdcSi_sJ_uAA3GMvZugt+^fUyOI0lc^)L(`F-?G6$1&p?= z(^FG4&A68XiBkD^_wyT`xVX9>{#?GAr7K=PpDrOe>jdyFctmHPx^TyW?{Sn~QMYyd z?aXRL1(OB5ou%)i9iW4_V>UgO6c3jf80JNb_ArYU`?$|qX{9d!l`$90ZG?lIn^GlT zN)#yqbEk$ivHaPSi^seB?FSCd8YCa)u}Yu5iC+EycYlYTjtC2;tq{^{loCJLYU6sT zvw|+GiQ3{rPsS^6O<`35ZdOU~BXY`HiH(t0dOPDxve;?&e!&hA0ko-9vYH?27srM$ z!2{GJ+gE}m?Byacd=58v6k?GESkXKTbj`n!Y_o8aQJOC2B(w`pXnj2Xr^;_;w9CI2 zTu!BSY@zOzd?-XrAzyk*egb-wc7riIL(j9J62~BF?R%dc{PA*)ZX|(SSNxzAVjVEL zstI6wuRbmKuvMil9R`rOgQ`ymt1J+VWO)s+x~2xE+@=uv7D|AX+@nsoa$#mFr4Z%juxs2%F{82##hDg6I-9E z_NOqS1^Zuvns*1 zqX@YYNIA{o)6%E~e*^sWsLsodiF#NQt7?$qhqi`>c`CogS$MSf_1l%-$j1{OeGQAn zyt((=7HqV)7+2T7%bJs?0Y7*L7055RFZ~6XQy^*Ht4r%*{9EZ>qJlnlM=V{y?ctfz z_5S7(+}tnBxVv9GQlL8KlsjgO^58A{MOv>}k0oQka%!eFym*GzPO0 zSsWHOV~}XieEOIw+-C65$oDqA;zFzC(kVg4)ug0pUg;B6p**=+YUrnOI zdWV>+BJS9f7Smo@5=I1m@jNQQoT!YBd!-*LFenmdoR(&s7P-sE%gvd>dGmgfkz_;_ z3^bw#`8H+b_UbO{vd_jRU^bFo6qYMCVMzjY&a!;18@ze@WrU5o`~a_Y=apP0GCI20 zGcuMLh$&B)wqFDScBWImW<%w_FIed5ufoU$ov6aBTd!IBfeTcAhkmz?U1=PEY7MCu&<}#loBk#Wyp$ke$S++wFTA7SkaVs%kpkfQx z!r%ZsSu+|#{mg|$BUN?v7AxQjC8SifWGr`FOU6f>a2Xnt<7rzbwlk^ppJ55?xlB^yas3PNGD^f|QQPov zrvPtdVLKZKlOF?CjNNyu2$qWHu1So0Q4s6G6Z?}tblARBOA$q3Wq|g?O;xrp1{cP? zi$UPzc_{G0ap~znJwBA}-V%Sk9vpz~?E}h;j2vHJs(x1VlHX;30632ygey2VYDF#U z%ETON6^SzZ<6{3-$ecXUchZ(@$q_>PF_LE}P1el{A4_dkMXq{}Ug44kUq)J)i#6(( zpWM9sYDLDbAYxQEfvfcp-oG*EU^SDB-1;}{g^=WXiN=|GK9M< z;=NUZNn(Fwz}Uz*8=CeIhhhCqiSYYYgfWa&Okxo)I=T)B;cuS9MRumZ@n(}0&vv;wI zU4&624ohVLK1OCpXeKK%XOrw48~`e{@pL_zHH5dxYA+2rRrwJY zo4_Wc)Zofh5izZWJTWM9sj7d(cY@I-HljsjCB^e&%|f(DSgCV>XR_cR1#|HvKlZW` z_-!twdPP?LqG+MG_1LxI3975Ak`HE_y$0y{JS;sQ3H;SuS=c!Dz?f}bi`Dzxb}txt z-}iPkGc1oJS|(0j<^@_V_u^Lh{Q>gUxm(ldJ9-K-~^!I8vy8h4~BhrFi| zX|k(O&g@E`;4v}CfK(a&gz$9n!G@!}jQh7ZBo+e@xj@1^zyzp29W7uz)5K2#dq07p`sR zv3gLhv~Uw+^J4&bj#f%J2S^;sUjMvuRp~nxGRJ4a74ov>vrHRFjvEarAbmu z&Oqk#+fp;y3z7)(X3Pb3WQ3`1cAts|D%z84e^vQfGWp+rDdB5B4=-_2W&mjIM6+2f z@3bG~U^YNkDN#+a{o7Vn0{%9^XbF<1@ovS#Q3xIcS zE4A3A>Nd%o5yM^8i{`PIVtnVXSvg@Z7C=f5O)k>~qV=gfvz3dTLfdRPVZaT4tNwKO-1>y}A1UAmMR@sq1;9O_*GH-&vUOK-1U9|{Zfs3^W?Ju&zRS>1^>38uG ztgQG>;v|D1cG>cE|B8=^6D+<3_sEkcMOYDB&%RY2j+|1Ck55ttcp74%&Q8S7@2^2! zPZoU}3XSbkJSQAAV!>xjkX1apZKIqC3e#viJoj!IpERQW36WfVrlf13TUmK?{cB~k z{yKvRxg+j6#pey%q)b*$5wNF>A<7?tf(Nt`_aPGn1uMy9j|hRe#Xli#aEto>9YF1@ zyec^vUjmr{v<&BF*Cg1Bf3=Nl8RMXTmmpRzgY6!XG|pY_@!>k(Q3q2k^r8!PLs13k zUG?LBK0ZEXAh!&lAs5pZl=SohJnno4we5Uj8cQqTMw^2&mu27*-!RIITxC&VJ!QdF z_m?ZRIi(w&l+H;qN(#+2pAfy^?N=@WK`x}qy98x)3?jqpe4|pFbb}Lj%JO}Rmsz>r zMSt&Ya}oAKdE)Jqqw~38@T5}m%~FF5;@s#al4@uQJdkds2}PX=Wd)B8enNR5oFD}8 z&BVlyZ(zZGXd9{g@MVxz!weYjz%HY0Nn!iRbjECuMxr?9cCx-_SKG0Z1m=pJl%lD7 zEJzuo7kENy1gx=1L1{)C9=dYCGYkIpE6gwKXev|vEY^masK_^4xjp{h-XGvXS#$EBvJI9v}9`~V!vd4#R$zJbowPLgDTYRdeaKB?9 z%M%qI4xVUs)B9~&!@&Wb|HT-b-~F#l67Ff)a1BwtK5d_b+=#KOtS5gx3z2T8K5qa8 zDiO<*NnQl0U&|rxp&hV&UnuvsE)jN5D9i|7<{NQ3U8gn3^M^zd=c8> z2{+HdpSwdrY|3ug%vTVJ{(lw#P+t0iI&0T)uTettqcM!slY&M9;3WMnilidP`t^mo z*vi%XgQT{jDG5$hE#DmwwzBGLb6u{MWx#lze&5gw_{95n?LL34Xx$K<su9tw{QpR`fNmcol*>9Sd~}6b_IADiT}JCdh1@9#;TrLnkPstf zgVo$9H}a>RM=Ug7q4tqla^k;oG88zdV{l4KgC-K?6P|W~Q|Tf@H{yGyp(^%^+)#d$(q*+LpSpKE{d{Tg>-fHaC%_ms&Rhm3jQXPe6GJ}E!YwR1no27Yn zE0qSWg51hIQ1pHUqmaH@1fwsU|nL z?~j%K!S_fhoB+I88-OsS^Py_Zz7~{$cK)VTh1*Q$jW`f8yg!r*a@n&r#{IWS@1`Ir zD2o3@UtO=(V?SUM=JI`b?yt5n$;43#QazSMH8xtW`lIj|4fXfFe&cXH!HC&S3ONyR zakWm>^Q2*D^9P0K)MaI->#}hH2 z1EgO*`MyJ*SwrgzqIC_+1+N^3&$_pw1YA{gS;r@g?Hw$c-JN$!=VsQ3aG*CE31c76 z0ZuHZZTkaMjy4lAMvA<=X zfGB>i*bNShn)pYcE0NER*c}L6;e=v^{RBk9f0HbM99MW1sh+5~j!#(44a6<5?IMH9 zim(P1_|s!oOnrV86kw<@a*)E~F_DCP65)yFUG3sG^2<^!);R!%GX9&<`}4lxab-7r zk=TDKW-(A@Cuu$JB*MKw<{@+vys)!SeMa)nIZp%`{E72w2kH~*8U9zx3mB$&=`as# z{^eOUJql&*3y6A!!~Isg z=w(nVa)^y%*^0W*BH|bUJkn#bRCSgj2m^O>ZC(f`kST(*EmkiNK}WYgfp+0}R#zAA zKRl&7@HuI!hq-mhSy(U#hdP05)y}2wXWp}u4-Nip876_xAe$jRao#=p>;WtGSp~Kl#<;-M92S%p1dZP(bBZM*0FID z@xq{}!>MwMX%bnD@hq7PF_GIzKnHYI5Wd}e=5(?%Wfi!RQPVq7k8we@{MjgXumwW% z0B=kdFhLF%nm%}$*ETp~3UMk0>(9g4R{YbRB zbqx0|-juq%CI7JOusabS-(zqgs>`wsUr zRZeuZ$zk@TMLuNZbRNTSNJr{C&F{ZBuk}L|@F$SZzodg#?WgOAmZQj0I_6>0q~RG! zOd7|>#j?wJd6BXBLidT5FLM4VpuVaqjJg!M3qo{=J6JU$kDsJ)d&-n*0e&3{F~1I# zpxgVi!_E9Ji*39xXlUs20mGNu1JdAx;LNA<-A&&iV8}}_3K4hMC}A!p$kzAzPm-K` zyK#$9P9k8~m)r^}DKP^Q)>rP~uksa^l}2Yw=5g^#3v^qJTlIo3sA< zuyK7WbEOjAXA9w~7l^%}Ez^rfR<{qXd6`G{5#ww!2xbPk&KXD~&;dKF&m8vU5~1E` zPVG+-@V=Kw1s5%HTtfrWW_YZbQ&E4uS$33wd8+2BJFfQ!X&@)wBaz->OE0K`gC=O6 zU6I%<;Fs+)tW!Yt$8FrK)0Ql3X&Rzhv>EuP3Mm$mZYel2ssEljLRd z8hpr>*676#Zpd64=i9vcqF0$Aukt6;)S!H+kN$PSl9MLM!ojvmt1YPCgot@G{CRBL zaW@9&ZS^5KaGv6`^f43sYc6xCdgsiByJlw<(ZiJ`_A(lQ&=Pp1S~Y}3Ey#Tb{6R6X z_6+iv|K$&6{IoalrqJ9X-iCsdkoVyCxGgG+kBl}M{|T53>1APs4gfV2-RAV~b3nUH zD#PgLD4J4_@5gI-C?JgIN%6*|xudayeB)1Lp0sppMMBX?HBZFmxDRajR=o9G*Gvjr zH!2D-c_vamqSf_2{a4dbELV8Cpx55=M2jciDOx^AkW@3G&5_pIljd(qvp8z<)i%Aq zu1CVdHb)Pxu0fq}o@4(nHIcjmUzjBdGo|z{IseWIm60XW%4T-*gn4EfipCAJj$Tvm zF%!0LXW54^&ec>Y78e}tRD83?pq=l+INfG}XiFNVL2GJwlU)Ep|9PeNvlwLQa`x*N z6!THN(a8E#cA-C^jdq*;S70tBJoaq)k&gRru0@;_EdrKs6hIic{WEFI*XX&RF@=Cl+{UE4_@3igI@b(%e$vQoU zkU)lr^lYWvzn9jdWt~@gzQ8S$WG&6aFMGT^|IbeG{z%w8+881^G7qsl7sgcOMSTAN zvDJ5fP}zl6L~4xOUtL$(I74{p1W1v;Lmy<~3au1#Y<8fAGDMsf(R>rm*aIr3e>j9Z z^I%Y!;(i(}OSJq`gIiy9v;k6T?=CP10q274Bet**u==V_>INQx+<#n&npx-wen)(| z@fs(@$!Ho$Fg<-VF%!1Rj8bg*#i^EDe-p0+ALYuw?H_zOxAWp}P5;osNMg(u!Qjqo zXtyE`WLae4%Z@;Nn^qHguueZfPiV-J*Vj3Kh)(4dtnn#podAjZeFQW zojjqSkj4PP;R-knwBfkp+5Yh$bd(>efmv9t zcCJET^Ua2Rgzq_R$f3UWv6$^5lG(jVO*$|CRjOtRH2dQIb?-D1&3DN7Pt8^cVMU|S z7SGZ&NV0z|)l>z+G)-~{sW?ftI+fI|;L+m5iVwX2YV(PjRyk2g{3Ol9%T@8C24nYV zy>st}jADZ+9(oxPrl;#GgRb@~efnW3@&(irqB))eG*VIoV5ntfNI?`y6KfI7KSl%x z zt}$@%B@K_a;(VB3%^H$>y|QY$ygr2LMV%4yohK3P-?H8mb(O-C=*Kv_(o^I@H#{m9hve=o7%rO~XE#+YkRZOi31{K;b)ayY zq+S)1z!bjTFr`Y*l2oz01fQ*@dw+gfb0?Z#h*>fV!=KS@ zG-d4=($(0%=^)goza^L#XDx+XGC=|&6N~3SdMdQFl>?XyXD$X(-e|k(e$ulE)t+LO ziL+?mva}VHZ<6yeIB!GOX#F(n`glBY|J&U@=vposnvke~hBqq(;j#rNc384$vMB{U zN%lh)IXR%Vzn_`qJ@>TU_q*!P*M+t5`tGt|JQRkwFxBX**}Tc2Lw+`5igm=4I=o;* z>A{?Btl12uyym86S;JlGzC*2c^!a7FZc!baAY49A8jY@ZV^0+DPU9kL{$GnWG}O)D z|3}kVhDG(gU04wW1VLK58$m$2yE_Gj?(Xi+p}TYF?hZ-m?(S{@iTC*XU+?$%bmr`R z_Fm6g_Yy#SaDHu-p&Ol_rmxHlU1c1KAi(?JQnfFqV`vuKopP^##A#K$Z7k4fpGM-Q z9BpE_xa%~01f|9s%=yP?Q~jqReG(_KVl*0upy|hiPvXk?A&B8~;`!-SSiMw6?E1QN zPPPWoMa?DlfAYUrF(;;vmByZhbf2IJnjnwrIJ?X(S8q*7XhffSq%FJ}J zO#awnt@JtX>Kaq=<9dM-C7`uD`;8bJ75z@8yntnRa>n1}+0u7K#Qpra!`AxS0E>h5 z65c5oq5`;!X4CIMq@BDKXI81Z~%|IOb zBWvsIuO19+HmPD0M_qU6U9m09Q;w3H6hI9zF+{-p1##8kA_}OOm%IEtw}hdb)x9)g z`Eb}LoSFIQ^wV`j^=28yW$xi`m4zsQ;wFaqZ_BbrDUU_JRTYnodY3=wKi;NhPa4?% z2u7*KWo=;?*x-?%+w0#GUy03o#~r@Sm>PaW9ny%1@zE~hFWs9iH8=J4f|Gb;tZpH$ zV7>-{A5?tR<+Qr0h&M*}BoY}-x16sK?w8_sBMyXW;|=e!SalP6sNdQY)o2T`pu|)u zLW2fl-c7j49lv;?xV;pb{)skYuQSgN9ZxL4UBWmbrCxR7r_0JDMX~EghnB-5KmL~4 zC3y44Fq2c8MlNO)PUKXwsR9D0T9~4)qQ@KV{r0~9w|CINB_oMB1i@zEIo{dmBPW~4 zw6Edk5w(dBKN1(^3`CjlB9$w}W{1duO|M}rcmzAj~ zJb6D2w!qTQ;#JYZ2k>nAk6p)#5%d5V4EnUxyV!<#_0{zLh6y9Q;hkkJH9)trQhj%< zb-)^E`ToTI1!y8wP1aw6s>Cu}YuVj3MEkw>{CFc0Xb%+E0i&Z5K<0o1!h^v0TogtK z<=mt$8hNnx4mXT9k4-QoVS9W5pjOB3@`bElRQf$0KnqGJc3oEtLkU8)(ghdGU9s*j zt$JJPxqG#l6UK_r`Nw0@oEh~lwL6mi-(@y{ke<;HmzNE;C4gYY-odB5gn!Wu6-(pj-6Mz0Dvvh- zK!p8Pla z_}493_jqejkVK~YpxDm52ZLb9ko>7u19E!unaD?Goq1h+i8?T;OCRJGjenvRHzEb{ zy(PTnb-d<0^9m6v{A`;f>Ky};LV);NB@D2+uFGkz?<>zJg&hs8OT?;cxTcdvHKek1 z$FsPyJeCFy#W!csyWAPuLN81=S2DWxic1b=Kmp_6Z^#X4R>pmFYt5qH^>AdVf3hdV zyF`G|plJwRLSo2#$xdo+$66X!e&p<=6#yimFSLu;mwh>f));(&*B{6s2g!5FdKeE(_puP^@H%ZacyEvAe$S?-UM79hoVVN<2ErUy{z92w#G0^EfRVWx z5R;5I1&4((S~>KU|E#+Nx|!jUWMv5(e%$?KbiXyVRgUXf2Br`q!Wl~Rk3k4xmvmA2 zNu!_G9S`LJ?kKXLbcRz0d-(Vi2PR?Fw_|ILq*VYJ0Z&kz1KWgbGLfImo}E`vF#Cg? zO`jD{SoxX9!#S^M5eE^QNw`4U!_6)AY1YW#WFF-DSonu}yK45@PO=T4K4DTX#C~n5 zu*+Qg6#=`GM;lDaMDled>4Mz036|P}*<5-B;o=LO`T5sPCqi_Y3FG}sX$%Q=Lox!4 ze{`Jzf|Vz(6YRs{;3_E}hZj(YHom6pOXi*O%C7dy`>e>#C2u>LP`7l8Q`y%VoJ5#L zlyBWW;(Wp}S(!TRkB{BXk(eEF-0zLxHAON(B2Rj4CC9yoMwqeXu3$W=(^vxML8E|< z|D0$pc-*crikhMdWB$pyUv@*Lr>7OKqA3=}#!?uCge6Zwl$se=zE9obKdxzVgP0Fk zoCz!STK?(cMil?k#dB&Hj&2_^y!)3epLuqv7$Ypp8Vv@KuH+APc1ZK&Gm;h)k^q?& zIhn_zgG>zb(IsEQ$`?#dYTfnX{Jyu@M_&F5`&jDHB0%&vu#?6Y137R~_*A9P1yfouhk^;pRaRBLAxh>@1z3n3(j$LYy#A<1;uj+L9KSsYQtKm|Ek157;XlAE+_<5ov6r!y^6n>bu9lYeeg5c zqknGvQ+Qe%uW)!Cr9Zk9Eh!VY{!H0Ntic7Z=eNXgQ6HJD9*^Xk$ zZiP3hVPfhO0LIUAazl8K2)Gw#Lr$*Qp~m@b0pvlEvs1wPQ^2`NtX*u2Lw<5@SIS&v z!CZ;FyFm{WEe;L_Db@c*zTvj>QdwF&J{AFSa37-qBa-X94C!jq{R;?Go79`&~=&G>)VnevRDZh zj!kND*&*JBt(L`7g1x``)4O@qLG8YPe3jqU*C#+@)H4jF-=|w_&%$cN#7h z#(#0XQa|+cIt4cce-v`22vrNi)$W7I=@ZTOoYK?VI=+qm1!6 zwzK0xCk(!s5Y|aObnxF>{I3CKYDj~-Qo)n$t2GXH-Y6|n4_xqtER$QzDY)N@Lf*Lq z4BSBl@Q4kdaH$aZk-8a)u*{q=Us^3x@Vswlj}t!(F7^X$?VDwb9v^bnR+w>j3)AL( zWaxRs;Z91BCqosd@Y_XQX6mNpOrh}w(GO==2j>He%yvX>S_FWVrGlX>0leE5WAY5Vmky?Mez}3Bma+b)N8I;E{#cc~#s!zL-UEOO z{cAzzC6dP@t~}TP6@vjpTXO5D&NujGjYPO6HMaW>-zRjs_14QyU)>uc29c% zPTq8h3>eJzy2gn8JNSdTyrU5*IOYM-9pG4kwj8oBNa#n<=7Uyg`gVG>aFyG^h$kY3m!PVn6M)-nqg3VWF>)oEU?Z z$wUF8%6DYuO#R!mdY7@6U%$3WoQN?N-w!> zhFngx`y{-cHf{CDQKyyIPRkFKg(fF!jDya4mlW?Y)M^D8nML-*B@qsuTJ>3JAnsgi z##`s`ZNvt;3qDk%mY>BQoubXazrO`f)=c`hwRvP-8o+RhodIDE5g?&2lDyyHUBt$qmlf=)Xy2^JX>3b(Q$%(Q%qQDVv%1bBvZX66OUI@-ZDAT zgk#&{{Wn#Poy3kZdQZUS=P8>o=5A|d202+KU0e-+@E0~fV?U&E=Ki)lwnYqqzJ@gf z-W5D?vCWk8DeA~MWN5c(1l}4P{9uJ|pl}6AeA;MxL@C%+rfd&JY4IUUY3Z&lP&nMqg zxM_c$O0lg7cldxHYBvbgp8X#M&KHH`R?sv5)-%;n>L0+0(JCH)qhWT%(eXh>RXie} zW?HFNTgoIG(4LNI+pT*#QaJ{vOxEOl!cA=gtKyff7qO8~r#RE!8NYzq<)_y_&h0m;olbAJ0d#2akRDkiq&qT8}8&U=fh zR#?tbLXp0+9s=in&G{VH3D)YSOlRi&A2#XMUc<#1(8~UT^=vFBZW4!ScV|{l5%GbQ zM;Cs0e!~1Dlh0ptyIPUg-!u$ys-rVomtrTyqqAZQmQNY-9A}DTgiK9~-5ok)8ORD_ zVn%4HKTD&Iov`9MP(FABcDIib6p{xIgIo=Bh@7P_p97SSqLa~>g~!&sYlX$xW6Q;G z<_@m(tJ-1{k|dy`m3}mIoX`F>m8)`iS-V0=gVX-xwzW2ik~q1(5f_;e&=%O;1tQY& z0&NKrs`1Uu_Q=qfnH$U{+%(BviW~N~UDvM{wKr<=xEvKKGtq4Lmdmu+<_oKFOhcW2 z@KEI3r&)ws=X^aZbUWE*sVEb=SrR&l82AQoPK;8(YAmM1g5>;fl zJ&v~hTwR&G5vQt+fUIabeJGQ-itEm;9zqU7E)Kl%4gSWWd7Tr<_pi%0cr6D0l;T+_ zt<-RYX#5s`gL^bR@a(k--%DKHGEzg{;<3vbi469P6O0b1U5K)FW@{?G$lDz)g$d1# zoE23oYlGtg+Nkj3p_S=%W^ro@ePTivmu?SjYQ4SOu?p)1Q8}RkPfldBxWfPzO-^&O zIxNYYJ`k|QSl9UI5=VRYB_>c78MQ_5e`|skO1fA*^}6A0G{4IP1KDC9bN_IW5j~T$ zp$NyGO*)fP z&7dZkHy{tD1(&-N;nW&6|M?W^x|NGO&xNw|s`Z4GOtQepnL-mQ8etcAqg z1w1;|f>cFxQkuJF-VTOQr-37k8(c*IQ4_%Yg0{CONO)U|)r^5xS z)|+rCOKer{OPF$pKop$C^Rfq)ib}pyVfF=6Tmc;@Q4OQnFcchR_MBW$#hS7!r%x(x zd>cvZ7l0ZnZ5K8F$yMn`V^kF=JBR+&ZyxDdW8ADJc{W<*R;9sCT#!l7pS+l^FMgVz06`q zOsX7b>7842rje5noEn&;;%N_geoA0S8nv|4TX6Y)8Vn8!f|xzs9c{1W!LI@4Q5Xum z`}_M%&VW!jcLbrEU>r#K?b*u0Bt=W*Cup+VGAO*3|81{7-a)lFw}NFk>p33TTanLb^PC%d-cz`xvf0-b4_M<%I$I4y z>F>QsHQH4lt)e-T4)oEGt&&yA@f>2x90JM703bn%$8MX`Gb2Ci+f-jd=J@Rl{QD#k z*mfe{zh{MeyPY9qyx6f($RiKdl|eRL^QiCfxabH6x*U%`H{|5NF;eab9~*}tS)qti ztKTh+l*}9bPT~QqZELMoRNquSv0S#`S%FZnQKPoCn0F6UAI{C9`}ii6$cqGb>A7pS z1>%S2OI{A3TezwjgWJz(6y+PD7hKoi6%vtloz%L_eaeeUC|oe6I`GCg!MBNtIGEv6 z1ST^3R-uz6!kVkMzcisAZ|q=j%YUmfLz@d-;z`sroJ9)2r7yy&qRX)EsU~GcL}H}J z*0>{#+$6qTJgkK|{6Wea9}g?flCF6HDMoICH09f!zEzsgda*pZm-nrF%C}DkuK{(< z)gow|$jHd4) z5nwQSSu{aZkAuTT@6}F?@`7sY)8W=u@E~Cqax||qA>A6d*CeA`!5~ z*qMFQaq+1~GL0Dh-A_e=W+zl3(?%4Txs01nc^6!T$!NfQTsKG$XFr%nqvEy}Bnlgz z#>L8njQEGqG0j~^Ek#LOp);KC25mkqj>gd#Sgztu$5uHoMqAs3NeuGN=KGb>Z-YVuD;!?9ky#qD$)3N{HzyxxEo zjMUfx+dDeqcnqicBll#|Z=LnwK^ORFyR#`*78CwkeuQ{N48Sw=72Z_+qF(FzP22m? zs{mqR)~a_E8$#F_vHLm}^wM)jL-yP@weo1w<%LzDp(5P(1Ane>)Qu#|(x;mh4lADF zbT{IEXV$C zcS%z0WV(*Dcf+-4`fQ**_jHsxQQ@(~Ag@y1qkJDcUX9Bg2dymHyE^=E^6x=;yuJJZ z7~D2QtgM)Retye;6vh|Opnya2{Y7~HTc-v?B~z;d)hnZkJrAhnRP?_O;a5s1H#fDb zjW_S`Y#)u{(B3)6dg~*nT>D4;vwyE&NPEM)KO$4U646MGTP1@1F-5$*h ze_?^Ai*Q9iPs(W7T?FJoa<$V7^^VkO?9a5#;@KgA_@nX^D z`^)6q(r3`J%V0xUQnM}CYnNtPWRA4frMiD|3M8^y>EeT`U85XVLr22*myM}~D@^^m zux!LK<8M7lpWT~00|t;_Li>9DEi}ugn62D0vP!jry-GZOyoA_WBak8c7_sHPx-yFA zk^5*Fre}u4ouGm{D)?dkZrR)(4sM68dIrk9N2M%0@=ys1L$3LMp}P)s^5jQvha$Ak|! zxfidGFKR?kI3n{se+Powk-*U+?C$0~Wd2ZP^{eh0R z-XMf40L->COb_dFNm2H5vP#z*|2lkdr~MX)^AKzS49yv!r;J7XrZ@&sb8ull@$MHrbweG)$qpwqEQoz=KiU06baOKc+qjMO2pR| z(|)dB?b8RJvFL?qv~cTVkszi_XLnpG_eG>y7;uZPkA-T#i^cPx+`$;#)aWZi*PD($ zFPY|cU^wtSc|nZb3ol#9uFIh+F{y@7#0@I&FoYkwee__Fi^X(Lw%zMC%XsxdklDE{ ze`mRdH)VYMuD-?Uoux$0Tmmj`0;SKrRAKVwWMczxWImIl$+>;&on78okW{EO!3}RL z@3}*K;S^N;WvFj5Q%Ff=vPMN;RRRx+?;Meol0rlI==21~r%XXXfz!@t$xELuoKr{| z6Bp-SrdFj7G;1HuSKxX-?qS6dO%yZJ0bQWJU0q#=zo(~B0gi%4q8DSVp+_tLi!|6B ziiLxRfB(h@Xj_DrfIG+ls1Wy8zdxMHAMxH54da&*^F29uc+;}dR5eCYRIHRw72GKz zczkr*8Kxxsl5H@k9~3i!T6(5;!Q&()DGTM(oK&d^sob?&zzMU%9-5a7*?HviStaB3 zBje#Swzm;D7bI0)*f~A-)nj%KlVfyS%Unc@jxebr+S9^OM4h&K^yHLmP!^gy*r!e} z&}oSw<_%%Buwe{ni>?daWSlaH8=Ps?pfvsCU3P=_bL)xY71{}rbrHmYvSLXxjD0W@ zBdcC;lfa8Dflaj#He1ZqEE`)^AEP zJ3L{HF@n0o(w0b0PIS@c*4sCp)qZL@JOl1H)39gG_jGfI5NkS!@QWU%EoAN`h;wmq z5$AQQ4eZho6Tf$gnq=f~v{&pX(Q2_Lr;yDEEhTZ3rb>Nxoo>5!zZ)+X4u>x`!v`Q> z*2eyS-FW@g6Q{hnSZ>tu&I|#%9%k>W-8Z@-;rfbXxI{YEf-9G%?pye6GTA9bah2cd8^uT73kL29%vmN6hXKfcfwxDDNfsYmuT32hn{y$v4@ zd>!q@&zlEZoY&*(RQY7z3-b3jdElPOP;lt*YsZ`Inj|+}e0}i9U89qLnROV|b(Q%i zFFZBCP4UVJjVEVvkwPn1;`2#QsTtwtwsd?5gG7EIn_Mlt-iQvy(}@62vXE8R+Jtpi zkaU{`F&mG5X)wd{o3;kJQA@;CcBwGXQU=VCkdW@&X;@Or%pfXZSW&=0@C)aCQi2Sy z;S>|RcVSEBMB!v>YMuM7%9po%*3(NVP4^fdA0JY9`+azSPYg-z`J?Ur*uYu3@Su0+ z`XO^U;VXSje@(yvb*ADs?@P9AefrReb~da$%?ojX^WdlxBolliF|e_n2Dwiwl6BQr zTY756B64^iw-~ab>~cEX2ZtyD{YNe8x{h=Q6UC?$J?r zFb%eDWx@|@J)p))VwD{`ECv^o6Q?l zD3z=a^vkE&0aK>U+z4-}Mlb0oj=ek6K1qYo$ z;&i(>vUr@)xNIBE4$HeeJdpl_GoP1mZy`?I%}d$P^JSb0(&w_=yv8Haq1eTcJL51!vF5@D% zD2S{KU4TiVte%?YI-&LYV+2sfe#kBrl%(BaC`#t#-m|U6m+vB8adb5Jmq$3r|4{ih z(0k`*z9*weTK2QPKv}UeGa_Z0@YKlVDf*i@pX!wl!(Yo!&RzEiVDA>x*cn(7eS1E2 zuGAu#Kp9_T4IXcX=Znnn(K*f7PJOhI+M8b$FX`qkbxo&2_w!!1$BU|8xRSLqrALNV zdRMSyT}J);0U({N`$>mq&ARbLfwnwD|!$ zLn11w;=*-Gw>25xcV)cxC)0%yDfX>fz~Mr=oY+C$_e&<*dFm~ytia(v#~u3V{)9eM?#@H?R8NM+>xxWoy+umoN0EM+ z-KdCgVBEoaJP-;V1dPA}v2iWta}k(4UIX#_oeMOT~=^@mYdc0|G?JiA*czK=XZ zqhE{MEg|(#GX)vx)0K-3A7wr-b>-%EGGa z>nP#xt1rW8jF=O}wOdoVJkxe}Z$3k6!$a5iEO#dlMk{B+JDUmQWo~E5qVwJFE0Mxq za6J4-(*y!I!{t;@VTg0$+V4_Ce>PC@V2SbR_@xjokXm}$)Ms*h1-~zPjGLZL4g7eK z@-ExVAhi3e&{0}3T3T8`K=!S_k{}w5aD_<6RSp{_dh12k+JKvZy!5Yi3KwUO-l!$9Ds)tcIT*vhDFA!@;*rS zNQi+f9Zvn(w5z3eJ#PuIJDemA3<>F5S!o;a`Y{j2lfh%JPJU?r^D3n-GUAyL*OD&2 z9+l3*n}=lc+Zkut8Tiw8n~4GaZm`DdAT}}r2 z_VQJnH}Bay{ju_LV5njWLbhQYWJ#lvR8T-6@Oid-j|V9g5fRynm*efZ+F|)C#0rp6 zq*Iv+m)>otV=3z7QK3EYMn_;J_-Nst;2hg$Z7j4)+Sm#m+ZXSDrJ%&qOJ_DgYBnAp z82ET~b=B)4Q>TPBv06$Met&5}5}w!5<(|bu)&j0pDN{8RZx;(KDsCDs`QIG~9$w-r zry)gnI3gO%S$-;!on&5XP*8)QqvJbI&oxE>;*Utc-7Fw8ICs!7cT||5OsC!~DNKpB z9gpaYMS$Y}3V>MWM`4B!m}B^3^U`#GWwt1-C4_D&kVV52h+T8HYawPq8foi>*fU7o z{5Q=+?yqYrfufVeSiWrWC1r@sI+@yU?A@C zTe~_s+nKqkG4StXX9Aex6QHXtr|)`V!2M{TlFUbGrTHF{j`QWc#NqKr`YtOYorY&m z`l1kVl=0ucKR|@nfsRb4llu2yb92EzfS+$0K(hyCXA=Ut83hT*!PwaM$i@wU#j@j z+Mmo0w3UM{;o5B2|EH%&N#{`@DESc+d63U>H{A`lk%xYae%TpboxqoSmKZ7%yj-50?a5u8*-evRp;G??kcv!&*5dv!nq) z`zC+9Ia8D65^8w-1(bClE=V$GaWY?Fx}y5iRcFQ`=I=j6oCdjp?=8pi7f*<7hUdCE zOYMg+%xc>a0M;#PIHuOEtg0Fu&HIFO!~2SqQCVzVX_&@s%m1Uj9rUjP1YpZbK3kVG z0p!q=J=XFK`aI`!)-y$|^!vq%ii*hwT9o$Je?bu=w65k0@ENXSK%7n(aa>C!qfVUrG zYRTn(7W9V{S@!6Tp5f^AIDZN+CP3-JleC!wb1&@TJTHIL?L~-35!C1Syfh6agL|JS z)kZC%F!-rEZQAQ6caVtkX-A}xTVtjADpd!CGULoqzobNXXKa(#36bjt>!BMJGm=O^dBHiYA!L`-7rVl6zuHtC~3 zIh?5Bb*D4D%2^&Vf5*A-ehht;ZK@vV8Lk9sZI4^Et99QS1D&OiEcZ`<8KuUxZyAWr z+k(~r;>EWF`mz&)>)m1X8hsI`HtI&^SVRN_rSdZK+!DAoph+SOr~|@1KR@s7Br3daQ>b;s%~eS#xxLFVyHA|$mR6~#NL}eTdm+n@)o5Wgb{VC_(xoD= zFU|9now)HWvt*@pMMH^=RKLktWhu9nk_Q68VVMjlJa@Bf>LSnFo?4OfYSA(?%)>;W zD4^8 zq<=reTPI{Dv|Uh5w~U_~ict7IrIqt&ZL!#~bC=A|O_#iqtRBAt-6+66vOxULLwEb} zyb7nV!pO)7{t8_cch_|Aw54Ca|qM*XHj^yVxcP765gg-Qhg)c{^Z!=eg@ zkhn_U(v#Qadfe&jB#}{}OtH$(Zk6P@<-%D29HP&=Dn+Pj!zeUqc*6!d9e@_FNiv3| zvQ4Olrd1J|@C4mzk>~DzsbCmQ>bXuvVqi35isQzDUAY)CtcAG-K~!BuZ*VUWCxyz8 zbT!@JvS>XJBESKA|3S30{ctMKHkv)W}BJHL^mgS^*A(V(1vX76t0{*{^ru z_z~hV$uP?H&IG?U$j}Sc=!QP}2dTD0A_JqkqVJ0^xi)}5sLZZr5}Nk$s*g0C2?)cE z+8Le$B@bVj3_};YI%N`Rxm+AH7~nx`=M)gy8l00bG|Hiyl7J$#2Kf<9n~oqX9Z!Y2 z&KhGtyRq9J#U49)5ctNyf&Dv~K}=jcRDI&kQd?U)%_}8^Nx%s8Uq;Jp{sytk)*d}$ z1u#Ee0XKVnAi9Ht0HwWAV<46l+6YKydTZUt_;@9|#f8>&B>g4MXUj*GNx%QCp%27g z9IhjaDOlgZQB(rnHTwFc_xo(sH=a>Z(WX}HegupLH{uE}TyCf& zxkf0ngwoWsjG!M+1*l63at_ls7N9anCoAT|CR~=aa`Z5P$63vAD`UeCMSRfRI8z4K zm#VLr>1EL#)~L%!WK472XPhD!@$-b}PR=a{sk$`jQpHuFi$UP?_Xv_`tb5Mico2?$ zvynKQIcqJ{5^P!Mvv^7M6irTB=O#0>L(iapY3MUZ@K*R!g-a2CJhh^~0-^xk4g_vT+=uMbcbvLyQMb2_SNeHsRod<=xb`A|vc3<{S@x*eza@yE%tlpU zJ?u$9WHWEMw*h-_FaTyKbzxyQqI-zuY)U(wFmwUBOh5jOqa_Ds+`%G1)nGK=K!X#; z6ek5;G@myiP^3p_*rl&EmGmSW0IzLn*&e)kw&d`&rG=(&dt`h^`cq!VUq_57F7T#^ zm{|C&q3km!qQG46j19@j}glg#UIh?4Gi3PUbJvmy>lZ&PwxpEbqAvhri%+*%>_zoU#R~f z2!B%7F>1&@lM9^2u@X?wzB4+@CNFFc2A_pFHzE5uRGUGl?4OM2A$&@l##^bvJf>Ej z+*>?u(A=WP{y`lT>-YPuc_OHsovB*s|2jrOXTN)I8s71<>hJ%Mx-W`1tn}`@j+>lA z2G?Q%_i26-T^hJhQ+yFXx`b2&Vq-7i-0*n>pqpt1Sm-_OMKj{WEntmn@r`04XXFiM zcX%MjV?-&Yf&OS!R*oOeG^Gb`HrNp6y5;u&ttEa6x+w`)9%F{YDQGA%6QkK&t0;+F z($Jx6iRac+YkIR5+ABU$h~54$%Sc!rj_o|W}*+*bcSYkmo@*X?&9@OLL6?)SBLx2$%6zFwi1M7u10`6TkBtRN_xCE!^sI4Sg0u{YVp;MFhyra$#I`v@$&B@aip`ZoZdz?> z0?)NdH^=4P6_R_7K+~;YV*D&S?JNh6{DSg$V24S~^_me`L0-Z-@;PcaJcFzs-xB^0 zRayr6{uY(LL&cuBl|_cfne!c1UzX#2CCl_cy6gNTT<(-sIT(e>L%LaX$hEBJy-u0Q zxW-BQwxom-*(;)byd(vFqLa64ji!Qwmt6prL$D+6&B37+TI?`obZKdcz6BWX-v zCHAVY8$7Uw0h$aLU^G`aI5~Cu`r!wn7FAU-6v?Dh9P6C8;>2@DhAjd^?KHUc*B3E` z0@9}(5526cbLGDqAhezcQ?$~y2zBGW8)gW7DcrhDUUG_Up(ymw>L?!ZR%hA|8StTq zXoG)LY-)IN?00cMLA#t)S~x3#9AVX2m_Av5;Vo9=P@%~UcH<6ly0jD8jT8t?nZ;67 zjxZakcs75}G+e-)IXd#Y_U4wpzr>(`$eEr@+3=Vqrxokp=@&OIDM9)9Krx`_lc$d7 zRnQmlttU<)L*ODjP#~>1w=h4e1Pl_}8Z&;e5i($1Ivx}FndoFL&B_no{5ue#`WZ*O z6FXUKIxkSI2kR6#=a|YTS8kEL8WO*Lj0P%0`(G=Ju03u|BZD{bxoz?K`=2K0mUU1G zOR*HuxK*ctL-721c?ts!-TQ@2J2E+SDm*E{ks!zEsY!)tdAvf6&A(`<`yn~2SV2jK5ypx#<7lBlI zJLz9urp{;c8C*`ZHXb?!l?uJ*w!Ft634=K za9Qd0cfGyXn9*6kOFmY`rp+z0UbSxnwxs_&j0{lH>iY^-_#O*p3-e}TC#$Qg((^Ix z-cwMOAlkmIKFZKdd{jYNSY!e*Gqbq{dvEXc|GtzlF{M#OH}(iYUj8F+pv2l#=R$MZ zk83CO6hA%Rh~%JAdJxF`vCst#<)k}M_)ld!5x-auf!8D^jtj@#TjD$?$Q2nU zDpn(E^A!83Q>#CIJ|cTQ<<1vGUy-&WeiM)UPBX!WC-h6h4JL^me2K${u8kJlGcqR9 zGW9D-13mXI5b63i>s&a(?6{qldYJHeRe~63UJJxcHte=BRDu3`Du<-D;fdb~eMDyC zOPb8ZPMXInfgDk9jMh_EDGF3 z>!^H~j2N}S3QA6nfFqilslHNb=T^psj}2yNr#OqV_2AIbliHgk@rDGUe)V=`#`VHz z+g|abdV^9MhvF}*P>TomrFuPpJ^UDA0t@MQJ9)M;4p(`ISJ zMac}ts!}wcql6m3(z(N8mHwMKzJG{R44%`J7#$tCYV3yTpa2u8{Mn=wQ%4b156?!(<4y^HkZTm+? zo9JBQ3B{TYYAZ!h*JHL8YU{pF^d5%fe-EIQ#P)dhXZXB=)mj+43>IF+$tpP_FqUNr zEBcYe(P;dRf>TM{p9WT3UBoKB+n<)MT~WnitrY`>8}sK8wfU8&mxW3280#mq-L%eXsSYnIvR>HgQNl~6^TcUQ%%?~POd*a9eSdj z{sZe|xy<)mhTA*wf#bhya_~C;+Z#-Ep+|nVG@g5SK5VgtFSoK5Yz}ogtsO>jur*W* zEAbAoeMi4r=FucMUGWeWx?E)1YL#uxIfw5f6{Io06qlL#eKP-x(O&$ku+TjfNP$@n z`a5la%z{&Rnj@_8aBW2e3)GOcfWUpRb9BDft6x@P5wGerC{qm!D@p%IExqPC#Hv}8 zV&`~ZW6CeSb!(hsC$Oi*z2p3Lwg_!krwO%1IJ`kK{nd)`wPpCSz{0#VDLEm$nXhU4 zc{!lD(kN|wXN!}|>DX+t8~pWl{j>;Yzal$%TJPo_UuZebso6cRS6^hUCNYGzof0YF zpqaSZ(OhFf^ezQahCG)!FkDN`22Na1%|?C%{6EusooKgwJ^18^!0S{Sz^ED|D7pp; z3nC#zd?3tDEBwT2(QWQ#_xPwbyqW=gVF}xnAmecP*CVZ4@6>UxJ9cJAUtio~w%pXJ ztpvRqNJxY-dP^jCl;b*-nU0M&I$M9-|F|U=x(Zhw|CEb~X;I@+T_V+_LJ;r90o&zO zjl-peH`VUC`N1;ORW^m?s}cdhrxnR@O;KUn!D!8zose%-GbFN@=orT?f8HUwnL=CA za5Rnx%*_nMWdJwp_IO^d?wi)J`8gDx>}+@nXXD$qxdcrGPZDZ_O0n_r4-aixKWWVb z{!AO}z60hL@aZ^3G;9=@ag!_*%ik`wfX%Txou1*J#{;mvqr69{NEe%GMNXJ%t-jQC zJjX-C!s5~sv=rkD92mWIGVHC@Vurn@j*jAF3mrheKmGf(r=avOtmqcmZVGz)`L#? zHsZ_<%+0q)P}wE^UNZ~7`>Y2iUqEi>1gz+LCxwUW4diW9J~3g5N~C<>J>!dN@IuFAdVnc;vG%m-QO>X{hD@LvOee+*}xH1 zN74#8YdVlMEH{oczi8cgxLZ>xNJuR?*N_fadD`j3qRdlz6YqCF3kvkQQolQAD#(_a z(~eTj6bc*@;BPCfphAab`32=gjWni7Vu_6CG!fgc+Q&(1zs&C{8iKRjZ#WNPpb?#K z#Rsf;WW3*`Y*vwCb8XL82^y7c1_!QWGgo@1Tg-;a$sFCA#43yB{z=1fKa6z#cShYK z$_bmVL8Wl3ElH0k`|uGdG#E<+Izmz?#iqVRa@{(hwlC1GyL*bU4TJ`#JfEs5m za_Kl|AcFkl=jY7^pY0tu0hEaSiP2zSem^!cq!hzrm7BC`DT`eWBQ=Nf{_j~!Ge>V` zp6eNB@Mfx-ac`=yAdSTySyb14GD!V)NfdpbHfppD$(kV)q}#kTyM>!@-t=bz^nWyc zV|ZQf*L0Jnjjg7!8ryEv*ftw9wrx9U8rybGY}>YN=iT}JpSM@O_R7iG`(F22GqYwE zUV6XAqoJ8ecjjjG&uD+U?D@vt$YC_LdgKJW^L@AZ8m)`R!ekg%YmFbPSvy>*=m$9VdOx(fo>8OQynC{>FB;K^^ltzx#z}kL^_2fzc z1yuLm(9lq8z>JNAlr*FUkN_Zzjg8fPADk^wsro&PL1-JDo-TdJ?rE}IPXkz#!eL{l zh(9;m?!o|4z$id5eIclMzSi1{?W&=%EFKku{ZYB6e|hIPWQ;t6;a8&MOV?U{5X+M7T#`|_trZPuZmA+~BQClhF!M86)JVSx=O7&>_!(OpO(JM6dBET- zPFYg_Lz!R`&oy|u%}t`a(eG2#INW8f=3ejyNE_xJ+LLvejyk?sPF8+kF9-Ukl~_T% zf4R%*X#$nXmd^VVCZO1m5d{%I7l z!5PcrSlBIp@{c(ELRGBAJnMM$o-Q&-s-(zxmE+aPP;Bby2J}0#R?#8h8YrBoO>!(a zdmo7)9}&Z+OLfFng_Zyp@&zEjg7Z(@>1s4UIQtUVAn`+gvq!V3T4rbAWH;ybVHCS}(|{42ZzfHmm7-rQXU-y4m1##cVfFo7J5OTdX` zv#K9Y!8{K35V4Py`9s1SR7o44)9U$QLLWHi{i$_Mx z(HE!7H;J?R57Uxp^C;mC&BDIU8@Rf;DMoV>xl?L#_P8-P#gA z0zDS^0L>#B02aQU)f63dBl5wKk&#&&N`h>T2oJOT_IsLJUz5e*7n0{qen`j9M zLGh-&bN)uLN22q2*hOxqjt$%yJs&Eb&)+?IBEO?5s+QhQ=vhQerCg<=SPvr3J!34gZ zhC`^R0Nue@8SQj)sd*U>7r~9JTdqeTMF!@*8K>HI!V=kIS^woaZUgo z9yWJ@cLJRvD7E|n@ZeXbAdtVWnLil_B(TT%z0KJKEW}^}GRvB8qMn|-STLYqGa3qt zN$iyvVi6%2@&b*k<%glQ{3Bd)m5-r!6x?Sh+hMyO@y8_zY?l9EJ1k(whmQYnyLmg~{a<9qM)7eAssUZdQ}f@}OZ1>=oD$u!mwglY-jmO;5_Y|; z+T_IiNojnRy#QvNUAptn2o#X_i>PV%@P%kbSF!CC@p{( z;O5~0wn78H$N{QW^0_oVFTAX*tk6?iexSoKd4sw*?=Q%B5p7S>p+ZB~Dy+Bc=zt-W zjGVHmrDbPteDou#s8ojl3~~SvTIebGpy>3QYFbTgtuMi$V_#9sHW^&EogF?uSLl+> zu;h66{y9zOReNEd6xotv|L1P{=OSopqGg}rrOr`?huySNKmhRj)h6d(64Z7fyB#!{uZm1e2gwizUI7aApQzoAxW9mi>C_Apg}BXbE;wLTd>U zQaz)8jCuETU8x*)9X%1#R9yB41oB86Pqn(8;I2&UjZ2XQG8vJ8zD5a@afbl#AiniT zA$P2T*SAs!+U}RaEdV3C1_lg>i>b-ccg3L6>s)d`6tD5yt* z+*eu6V4U-ASc~4P^`d%utwNz)bme|Z2%_P3Ub`%r&xye2=s1yxhES;{(pcRArqdsj zj5yOH8mLz^l?AIDo2Ct)A2H+o6Z=iK#nT;8{{o*UMw%4sVj(Ei%%qkyi@tyau9ty{OD-$3^ts3q?zFI!2NlitIDRuj;(>rH%IaLM^tII57zQL zh8h3GPSGZq0~sV1CxTq%_RV6c7Gv4bk~1=DP1U={e>l`buKEz8Y{PE2h2(AhJ zslJL(%_C#+g+Hfp`>Ka~^!7D;9mdt!844Mx4W}G_d#MV@BGDcB%kaxOpqOw5Gwb=V zAcG#r_hSsY?FA}e0C;{@*pA6AAS^5D47lIq`%@Y?sQfiGGWqY{U?5LeFlgA}qxQoJ zZrmI~Y-$*@t7DcIHS8w+G!G!MkWpX2+`*u8dcN}zI%(?x5Ir=W%k?}81rfcmRFMHx z1_;0__U72p{dSXaCz>GG@p!Ri8zvGhcJs6LYhB(|iB z$+sW8?BC|TmZHGaz*fFZv#G`5YG0kl>ii#-{w!qHwmN?sYk3&p-=$G!HI!TnbaE1ZQ)&XhQ8jkW(7$_epGI@Xfjm7 z=HD?_=0*{>2aCfwU|5o^h9vio(*bTzwuhFQ`rDUB;fJ$nrH9vNsZdUtw-jC*S3d5$ z7e(ioE(4|0o2=`H!_nQ3mG*_v>2Y)?UM_^sWZ30#5*!Z5RO{NIVPv%Ak=M}o-Ts(A zava93dtD7Q8>P4C#IO-dxKFoekKpB;mm&@+;D)6pr=`z*|5OQ}G^=})I+gK&-wGcv z((VScY1ZNUY!V}wvxj7?s}ZvPx<}~eTl;6~+|OBln~ASnqCtOWh|gLMYaqiSO|}yh z3W$D*fEBj(m#eVo+X_pQI5L^OtKH#}WvQw(p53l4#mDzh_yp<|%B*L=Ak9B0H6`x0w>S5z4o2d4ydKy}|CH9pUfJQbYXC_}9Rl4|RIT&LiJZGc9f zY_gkmfH<-{l79T2d{V9U7>Z6s4@f{)wRWoigISTF3bP&fB$%)=ASWeIAy-#ub~FLL$c8cd+U^NGNA+TtN{6p5nzBBu)R`|0LV=)_3D z^e0jzzIX>@#cN2M$QT9v^_@dED&0!pWy9^4`4*+Q@`QN3UBu<8o03uTxhnAkKyP>E z;tBpD5MMm8qGHMq!0fHaZeGhp&T5vw%_8_~0`)guNfz`CfRL?-Kdl$F8q(>G!^Ic1 zDFVQS@Z2@GI#}4Jo#7YbU(P!j?{P>-0G)icC;*TQ z_)b%B@Dx^I5BnD6D;FBUf1~$CYmXHw&gHD`&y60Pn2nH{)bY4gdLm}UH4p2E2*DH~ zNalV65?_HGryy6S(fDbpIw|j(nwlygpc;sClovGO$bRs8zxV1I@)4F_IQsdaTL+f^ zaKRIVj~|@sn~luO{4AC+Wy`EY9Y8~;G~dA9C;{h3EhNx(zYJBhu0D{t48eVUB4{!@ zgnyib-%(&i*R_k5r~DJ3h_lvuq%>bU><^3nmZ2ej-0t#YfwAEEGBmseNdaaiSLW+Q!am4H@A$wj=p+u)qr z!60Dl!$#d{&4sK{I}$sND{?|MNH&lMsMIJ%yPe&jt}Y>>nAy&y8UL{@zL{cdz+s^o%9>{yn5{3SB{ng{Tj_!4mjl++*!VYGiVms@@lJc%@`zj&@R=972`z|NWEW6TRSiB{1*tq? z0s615;mEQts-^xpE`PXufFKMA2nfX`$ousI8T?{3timSlAGW}rkpOw%wAkYk$IyL+ z*DI_H-)k)z?F0d-@duy$kDvn!9LQ$sZM+m5Jh!8mrCcu@)YH!xPMQ*;QB+h+`HoNQ zWS$IFxlc%?1!!KGg+xS%2zikY4=J~L`nG}XzWl4ub`z~1XP&w^XKv2ap)e9QfkKiq z$lJMgDwPLG76YRcKkfJDD$8=)#(KM<{eC!!q>K@YhPQ^h%Qx=y6(x%}_2}If=Um)4 z3IF^X`^`1LHD|w;w=#F>J&Y{QX4`0p0ky?1><)U_PZ`&**HNJ{tbXSfbAs-BOar_( zZgMfE=9qw~c9zjWZ?a2c!(bjz>fG`|)-sp5?MmA&x{8HXfCXTZiHWruO(U|8M5 zQ!oxrD!rB$DH^%?>8IQs;eTZxkQ)Yc><*hAEa(}QfdEeI@L{7n$;=~_JM%YjM^tNx zvIR!W=7Op?{LG)ZIb{+wRxBBwR|0XEa@hCgRgHw9?u>>&T|2oOy;LzQZQ;O|$o zp8xGQo_1B*&Byj(7ji2A?iwMhL+B|Nc+fXJG5?Ak4fLJHc@!?k3ycUUct7K}zG0$l z$=5pZ%LXH+d%AE2Miy~)7ygBiO>}RD zIgAL0>rQ6cFQnD{&EIj@UK%}E^w$v~@nM_`F~`A~;6wgwBUkj3j;{gfLoV#IY7=aN z4Mq1kH{8h(HZSsZDOhoY8%3t+n+~CQqWu#i>q`~p@A-1#76YW&d9^=haIiTysdLx} zmXSMJN#R#1hV*>_Fnh3uxe>B)r46m4 z#$jisGaSmRS|>!sx!fM6cpNq1h3cJS*KrkmJmjNT@9 z_u@a21;8L_p?Vag6f)N0%sfnFspv*B-tmEirS`f0q0c1XA0=Y7I`4Q;e%tKANgh;H zeblPx$M)O_s{GT*`|>pUH9k>$8VI1Qw^4Y8D*24>Dfo2_en;!sjw%SbX? zv|n<~9d3Sj2;E@sL|y?vDvqGiB%2i7E&i?hRsmdcP;Cp*e@_qCQ-lS7A;6t$<|~3x z3H~vYi|;v+;<~KLeZN8s1q)j~NN9^6^0N-=;@)O(tL|aLuR?6}$yeV?g9{sZ66DdR z`0_StRH{`kL!8>h#drzY-;{ISsA8yes#L-$0aY50GQ2#kQRZqXQfdU&!ghGjNeT70 zJ^At_=_^!To#u)1rspo>`f}C^+!wV@+DIBNEG!>bumzx>m|~8nJ4!hD4n9`EuZ%kA7sJ6%^;b9 zn!63PUPl=#6>e51m>;Wa?c!z;)n>b@*Voe2Z~DYA*L7tm-j;$n*6XmSH)sq^cUIHJ z$CbxPxa3qt!6?O$x>APa+XrI@40TC`4GI-(c|}Xpk-~~oz+l03b4jGx`j?Q_YM}q! z+gMRFPP3TAUJBf8nB&7pWnX)k>cdEd1Ux_V6CZaJt{XlAKBroagf7<8!vjfgo2J)H zjWHT{Q8_IorTjwi!vp#5COYK6myntoqvR;~e@NMx6 z--u3FJnUiUsm0Ql&14lTk#jJmd6zw7O#RM+WxV1flnSgxE}P|gOk>d zPO>*z(uUEm$BpS{D{s!ixw5S}GL87ID|4`E{Z4+ZU4O*iHCPw*aQ_Yhx-;k^607c%kW9H!o=u5VNwqajB3a z0Xt@&fux}?(l~r&3+pRga4{C(lX+$kKP10}0e7r;hU6Ck+bmkBc+^Sti+OSW6I#v2 z+=H&pP6R}5Yc6*!fq_(a)bc&5=^#kBV}Ii}p3hmxZ=u`vn6zmU{qJjPK7tz36s#Q4 zBFh$05giN9U!h{qdw;0|W?|hDh#MDIegiV&)2q}k$Jv1nNEFFsZh^^$H_hQAYvW~$ zQ8i&sL^?t2C3NghZ!rfi0}!|RInn^|r`V{mHb|0X=5nv8UG?8PNUsFJo_5lYkFk+r z<=M?4Zl?hIlbwqz``tU;`FgYo3Z~eZeD8I>bTQN^WZ}>?R8947j>R$o9QInAWZpTy zb!BM{)9iTwLjqcdI|?Q55O-)Bj81j$TV3H7o#x|kDXpJ7ds%md<^xQ;bj zX#kWOZkoRm{~y6@C*UnCuWv9euaycZAm~>dPPsy`+YVTUY_vMHcP-ZJqtmO(FR6p> z%IL%7xASE(V6d^L(ZXdl<26leA%;XFE~EGow{xP}golLlR8n)|KA&ZttFQfLI$7+S ztcUq6E)sj{buoC^Xg6oAs?eeqpEa1B&GsTXa&?Ewp3i!U_Z_(T%6&| zk>n)$my79IagZhWm+!8ax#azKAp=$j9WVi5Vp$ij_ieuA(E$kwUz!~wdc3q#{P1{+ zS^X>z*@uoRIKY3;4_z#u4_{mYvA5HKbt$OAiDXeJ-eCo>x8Z}IeOoTZlk)X?{L^o$ z1)j-w-NhzZsGp5smE9ZA^Ln3%$WNPtHY51(D{2uV^)vXK5rQzzyNgE2I_sR8N0X^h z+4biw#AO;GT@Nf-k9HFp-Z5RoO!dR71IG%j5^z?^2ne88%QDHL{#P(iz!Zdj&T!GF z1~{$1f+xwxBQDPEtg=`vnY`SOVs;j#R*OL#efBXEI`MpUfZp?X;i1)0Muu}}f8oLA zFkwcCG$vFaTbe7( z#C> zNI?uil*INCA&#)zNkF~3$RCzS{IA}jfcbnX=zYCgmT$Sf+aDNF zcpvdhi;7*@9NI0*&knxLq3#E^Dnjqt$PUA6N;g>mPX2#ivbId^#;cG?}Fk5;|R2 zA6pu;<~#DxIQl5auKRW6&m|RYM7_CuHUOcms;-``P$2DmS~ovYp~F|!dcVI}H zB&hmO!Pg@!j9?P<%N3za6!b5 zc6+?x&*vEk8XDNWtW7{$LIu9Xbw4fOF23G$b3-l6hF87@bsH*hB%C+v4GLlc9E}+B z*I0t7*+$U&FML_Ej#(RdTq94safcC!?>abI?IqXN6G!@zXW&y;;M0P4st7`}62E&1 zKP=(q)sELbStToyY4?4Do^dvnw0 z#C%sU;;u>(hoeBd!Q-+Q^XAtxbgu?KK%h};HKTP0!cVVi(f%h02Fnr1EG+!C?sZSL zH~t>t`PmNL;QH>eBh?tcZ}na>pw3~J(m&L1$5$xxJ49%h#07Dyzob)mvaY>)h?DwR`e>h5w%G zl&bl^fi4{0$JbZDmE)G^)$+8jPCoBRM>mr$-Q$jM#b%!*hR49%AZeRH3gY2+7vL%s zFn;jc*HMdXe$oX5(LFf=nMXP_k_^~1UXD;P0ZU_*eVA$A+{BPTT2xU4TvAwIicTmK zOCmz2UnNgq?i!h9s%eenXU;xFMw>p)j)YnU{}d4DqCw`AzLt_uptMzT+yu6> zeFuQVh^>pw?hQo=hi8ClwgCdKvz*S?7C=rPrRjg2RnD)>+*}9=47%Q^cnnNbJFwrJ z)n|l+K7X0^^&~SGq+i~?-}Vl4Cc}XX2lPIiJ@Z{Y8uJlSl@N)WTN;7k7s#q6fGDeV z8C5D=0CeDP(-W@(h(Pi3{)9m?%Xgiw}i{fw8%4_1=M*@h*ATjvm7-!G!J1 z<=@qkdb~{~kB5tMGLa&>vR6l`*VLKQtiOgRGh;$NPk!ZOtpHa~#}y9H^VY4JkGT8N z)MeC@4%S?HJ`)}$Xr<=Vw@+gQ!RvaV3st5rW2CRu@OBcgwRsHHrY=J50~nX@c}37^ zZ(36zi&5h*B8%)SK(LFt&@TH_Jpjr5p}QF4I!7~%#5d%7K4H3?7}`0Du=9@I-s36#{Zh_t2u%%xt zS=-!KfDT71cuq2%8|&z{?q~tZX>4dnz}A-W<@L4q9#lnCK|)TlDu7_FQ7VYWMLNIX=vP$0obvgXnVkM1+S#>IQlfp#`M`%Ke1O{ z=031TPZ^yvq(L-=9btWCvQy&BtBHaQ^NqXpo>^69D-OC-7QRa<#? zO?S;n3r6A7f!dYPbt&_t_ZdELK%CCO$q7RU9a(AP2E^8uklF*8WndyB>UDpZRwn0(fU`p`AvHbJgA%J;k*4ocY8*Jq8#p0jIkYweW8YzHg$ErHuN8vk|El zKOt0|{WX$kwR&nZGe6z0xm7&y(#L#QWE8&`j2%|LycS741ECcqB#L#1CHc=rMn+xB z%NlV@o#F{Niynu0NzCg`4my&yO^=U{MJg2en#=uCtr_|`{}H?-;J+e26=c?kV$FZ0 z(`<}ZbRUlacOV+;(aIgcL$Bhp+v7HRUDp&6Zs-KEzd{)dzN$PnbPUj`A&iS6S=-xp zOplNB`PH0vu<5)<(zU;QX=-{TZ7O2l*EH+@x>rV| zH2x{I7;CK{;sI?BZR?;|Fnf*mRkJuhs-;%b{L%Guy=RTw37O<@kWhD*xC`#T&pVQ7 zNG5XrkqRU;hUNJtRe$i=%8eD`IBu(e2wSdn&nHuU7>+LrtM@=aNJqvJv*#x~PJ8mN zh7PO|FQnFiA zRsE~0t9w|pp}`ryzK@<3Qxl7urs9|~H`CZ0&Q+c(9ZCP$njWBT82A5&`1V@;tO6~N zW9Om^52rm8|4{6^F(ceFr4PM&ZWpjae^NT69>`oSh-`YKdpX zgVV@9uG@hyW0}R`{_Hs2y9~#btcyI^t}$#Ia2t-qB&y$)5VOtARn49CNFz&Wm|Vth zJx@ik((w{B;5;%(be7w8qr`Li#g%RyoQNiP7E(5lCE<&et=91nnYwJKxTNj7+~+o| z;H1SVTn6UliWxPh(hea#gntD~K{?3o3i-@ut*0je%9Tix{%2+T#y&S>YPM2)e9UD8 z@;J+oqE;zW|D7m3Kg>y&d%rb|qY!|qM!D+)6~fh;enuW9j;p}8JboyOb#*jHgteTM zon>Ix!}xn>z}>=JTs8A2SWYvLy`2_Vk)gDP?n?~1DPZT2fJ+#AYXo7e&*NRJnveeug z_Ku-!gnuY_LG^@ol_*a#LQv<3BP2{=rTsjqBXK`OW9EoiObo$=nAJ3Viebr4z-lW>Nb(*x8$FMj5>*AFncwfLSOf*+6yOiTpq{DVpHdi(t71_bz=4;f zU_`CmqWa78_q3jakF=Qfn2(G-I%xqeU>@N83Ci1>1xa`K^IhJ4EQ^`^KOVQz3zs9I z>3mugqhzjtCfCOE$;WGzB9|JxtzC0y!iC%XP;}y_L5>PQSdIFzUT1DK{ z$zP5Wq4+@F-ZD454-}7(d^sf0^78U}k;%+@lnHZ@#t%1Dh&0b0pp~n=VM-~+V^&(h zWML9<191W5g~y6tRi03etJpZ8y=*%a_X-l}gR$$wW=&iMTzKR2x%+rPCGQl&2H|w21k)j10W%`x`(WarT zy^%1gAK_`9hc8<hN^!L3qhS6FBQP!0b7Lsrt+oCDy20wcT6g& zqpR=!dO)IsMfl}g>y_n|XQ!&TAKK4$av*&IL#ng~=dd-*Ga5CMwH|P5wm;$jZr32@uhbIeMt;H#~& zrTE1=9b%X~NfGK8=|Q-75-kPAp$jeIV%RP&BQq`vGS*>6C^Qg11axX@Q1+=AG4mra zRI@A*=B_=$I%q1p?lDBdl4`6+z8^2NFtTn~1kmcn#Kq;%1D>tEg*&lw4EQ{TowV*0 z{gLUUYsK@0zx>3Y8h3xga(uaGY-kmcC9^SqaKQ)8gR z#o7)0Uo`<^+e!)k_oij&Ns91ttroh5!|gfwqN~T;y>g@0*my?t0+HaBTnuDdYzLd9 zp+_}il#VszEtG4@o^hGPL^EOSt2NIQ^mX%E9V;T2E$)*TM^7Y}xheq!&Q!a9M zm&Ydi8#ZCk(mTVJ$4!@WrFK{hxX^)o*nft5QQ#2a^*| zqNOS5_JxO})o(9We&Ipytxe}(Pf|=#dB0y z;kMsy+vBH(tMKR!0i`*Qj>4==tvd)D94Om!;sknK{xg6j&r+QkW|dW4&4j38iE{iJ z`k*kIC5**@)+XIehOW~^2lQ5%^lGtuC6y+7MxB0V^H%WBe>W8V_*%})Lx1ulCWe%1 zdSL^w0cML~so}mbc*Ipg^FBdC0MtXdIkqMZW%2%{b7wu=SY{=Dxq?_l;mbX+!EINe zl^XkF`jo7qwWYKit8g{?X~xl-r`hGsXJ#qws#zd|*PX4%{kA9REe#}M?nYrXX)bT3 zQsrr5D^5JcU??Nv;8iYD_cWKcEb=CyP77Y(AJSy=_~U9%YspjF($ua|w6okfM?PLt z=s+=4vGCsFL!8VzHu_;O9|KyVOWO*mqqyIu5(N zcporUizO1ja2$#HU2VX z0N=@Q{dW_o)X`5RGt9UcMk-}0$&gcGO!Nm~tIq1C-MQbIOwHC8`Dkafcx}S#igQ{- zL%Rw6wi*nkROn?pzmx*z%I!x5cA{s}VdluG+1^#o54#d;h)f^VB6Qu`qTC0}_8ga< zpJ|8AwPk_RLUaDBCPkW5SZu=Mx6aU8Mo0k#cUhL@j?>_GWvmDmYxaA!4tZ{;2CVG?wWwbQa&g!lspg+!-b=vH^9O?5ls<$$PlBN z5-V1!gXc-WG6_{3Qqj_y3KL}X*&67bjwDOS+x35M;c~*0|J0h@<0xiGkB)JMLi$i1 z0}J;e0BT5&(Fl!?R22McV4FK4kA4-h4h?++U&J{MuFHXZ8QT!`mT|aKPDLcY=hE!4 znKd-dEF=`%>im$u5*yk<@%tDp(I`1!c%QTYuBgy8YKh3kg{Il*C^9_S<(Qs~1~H0q z*rMXRow7lZh%G%9gP#Qqd)4PB=3vA=$@+A@RGg>6i$S{A4pqSD)WUdhTy(xDp?SPq zsqZJV6iUTHf;i)n6tt_YhxO0i&4fFd8iIJ>>yXq`^=0Pa{7#ej>eFZ=Ux8kGQ9MGm@UA_=he1 zY&IT`>o;`zUmke>J24VunIt6pn3|Jixi#Z z;R5|TJ3%zN4H0r(bL~o>%r_G43tmLF(G{n7fSi-T_tlQ}02e8$GdRUfF`*>g=q6jL zzAIdr%5}(Idf#oNl4op?8-Lo79xQZBxw5@>xsxWLYbOr?8Q<$r3CeC15OB5}rmmz4 zM<D2j!j{OI53U_r}tw>R>`mvEIGs$C&!~mZESt1aP?=<8L|axt2$22>K^!9wRh& zhxU?XYmLX#i70l7IL06Dt`**cdRZrrMap6dRw(xi@b{hM+QU6Q`k;?iNEF=syBC1! zEdl77yIda~SdAexVz=kc02blGXn~z^g-L0IU`xZFSy__!Gid3{46$IG7uC z`+ghm2LXLjsL|la{KsC$s9r*DtaBA{; zZxhv!{Q-_ZS0Tu(KtkSbe;C-uGMCS6BC7zQ!6B;CX7Z*wTllcf|N1m5{4-pwnRXKg z>aXMmAWfw_U45)kq0lRwSb;9nnt~wePfN438xB{KSBO(q8ptu_7)qS3h1$*{%(B1T z(JHFrs=5FE#p4Y{Z|np-w%wkVVnykK{8yy(W}6p3SXYjMT7vrP+?t~miZa4SA48yv3bAnRxi^xWo6EfgwK)2ZC8x%?9JL3KIS_wbq9T{zbVbFlX3#iC%@JD=o^{a>$#+|cqgmd9gqL@UR>fTw*Y zI8nz0vqL{94P#XP{NJbg1RjIxe0zjWqfsBu>;7=Y)&r*WXQq%}saO$N4*D6oY+!JJ zYpU4hY~>D$U#Yfu0|`PuyyJL26=Rd#<&;}s-=CGHlT9S1V?{1uTiPvi1}iqiB)PZ9 z3}41pEJOXdx^o*7(LGnUziailz{+9nx4}eDS4Hh`4~Al^l1rL{{xb=)eU;Qg-*eDU z|uwHq*uj)@P6cKRs|R|GkEbr z0BN?d(`S>-ozH%uvUBC(5LB zbh1AhI`8J(k-^i@*R)voHw>pV+55l!caML(`rat1Ou>z<@sml^WxO-=6- z+3;cGlYC(a3R`HMig`wIe5(TYZzh5nJkkqL(*Eh*P z*(*B_x3;t#>`Gk1BYyx3%8dfl&=5aAef*UAeIg?sPhq1wU8wTI<8>BhvpR_CDYNQL zPuCJIUrA)%*dL2$Ei>g6e|qY1K?;TjZ?ZMc8)SMOEMI$QN$OgCOT#=k9hO-KVKS!k zJa2?3^w)D|zozU@selj_TQn_;x8(x<%oa;>*!u6=zO0eE(}@c8D-IPz@eh@cdMs(; zacY3NFS2UtxFT;qhPF>os+2w;r*rwRJsx4-eKt9D-y8$RqqQ8|+p||<4vu6DHG1*S z*~m~rG2RtXe6QF0mP<{CXsw=O{Jvd8H$89pIs$wX`?7pPsZ8jz#R_6DPP;=11Y?T> zd1`I^LhD^QnvSU(V(?2!krOhSslitGoIpvlN!qr7gR;Mb!WZ=W??ZTdhd`XId5F%I z)a7?{4AjBriDMG-@p(0SomcgdIp*)1pCGnEIWh4?`1a;ay*(4N{x)7lLLi>y?PRWh)OPSs;Cf8nW2pvsT5a!K`OIjKy zdB>94=QH0mk@wE8*t_3reP`6=BX-gqY!Xk8CX7Px*A{^J&@Mqug8l!K z_6e#72Q2cg!D(HG*=7ofQmGh)=yyJEH@E5dWAe@X zm`z6F3kEYHV4UtMuef}6eg5|}f6hR)xLgO$l|3h=b5AmtQ7aZAv6xQzv+L)} z^Lin~t2epWsUVph%}AVaFPc7FXwNj-;EEGK2%#ZWLV#yw>Dgb_vbk9B2tGTdlD*&K zJ4k~R^SEoxSIN`n^`8yhQ#1uK!%n$y#$QuPe%^N+Q>Wpyt4|GTRuL$rOcJMysm)!q zwYAxAc71WZIfzMh{tzbHjw#Bi_Z&_am;zhs)pXM4DO^sLe^#4WevhT9S$Vx|>#w(Y z0N(i&w)Xb9FzA#}8h7gP%oMhShXH}GA9oZIJa`~V?0khz20(KXUwL?7PQAQ-HhFtV zsCAxPCu(0cj);N9u+57hewGE2_E^TYKS7M7{k&*E;&izAdb(yCh;Z5@n!)FB&+=L% zL-Ur@Y4Tl3VPtc#+X3BHoua9ZQeYPkRf-yH!8g3TRTMYczFCAX*B-imq_=X)aE{U zlee8one*#5fusGuBS-E1Q+7h7Si2VAXPyL0pnmp5RY40zW0H!16*o2ktXj>%(AK@{C9`5dje83xg~w(s)al~=ufqJK zM%HcL#h_6i*jN0=Y`S9?BDD#hklIu|ncdqy6+?Vnq}}>QXnpm}q+wpqwqnV**$JDL z*+T}-!Osjh6GlRxz|*i+;Us%opel8io^Fp1H@kz1Lp&a@0yrK1A8}t9Rpl17E3lPP zDMcluyStPUknZk~?v@4x1PP^ExQ<};tUDuJBEDG*S-d}Uufq0hGvpe>hBJH}!I=!K$HN?<#iCOm<PYEOC1T?tdNI?DbzRlbjIqdqe{OHOipiLKW%q^ScQMVi^YS97x&(Ym{0>WN8Pe zX~g8|Fp#J?%}cSBD7RoLq&0@2q6#x4eaA$#xZ7427#43xSw9s^qh#u_6H$tJ!#|2Z ztEP>g+&C1$YS2h+sqyMQhzHGeVB3tA-7skJP0=jVrtrIdQNahsZscJ|g{~GG0o5bp z^>Xa0dv22mh^Vgo;5BYLx4p9y=kEnm$tbz^1$CP|Sa_@k17Y@fE|CXfO;`+#U# zg;cqve#01o`E>WzlNV1x$MMbyx?4_vh8}i}@1x`lZf~?8EBv4K6ghcZ;uvm2$B%-) zj>>(R>xgi11|LCwd1IiR1e*bBwlTBWRC)r~fn`IS`A+fOFBy+%ll*322j68LVF(%H zw;7=)67hc(>SZY^8dw%9xaWy}FaJ_N_2z}MEMqkd!i#Yj8xZP-aCqxw+0=HERr-z2Gf+J`uJcOL|?b8pSPNIc68TxwBq zAj{6a-xiEnbD32&j^GWZ;_cJx62&_!9v7dmU%O2=H9ejGofq`EQ+aF&Cq8|OtFN!W zh?-Lh&PD@Vta|Wrl5a);H??BAb^%A&w^qJyIOl(FM{fi_KF1MH&0?@OgLWl3kPp?V zC>jpkPkmb4*9?hxceU^^dX!G^%?F;PgkD}_@cps6Ry#Ua6p4mgiH9n!XH3N>$eJWz zKM4azyDWkp8>zXy!y(9Cej{$`bflC#@3r-iO6usVS7^^;6+_;Ea0R9Y7+8R`jCqv> z{km9>URjt#=jwk00uJ;n-A#Z0<$VIvNS+LVZnJmYEzQx=SGf0oe9Lf=o!@`1Gn$vMtqr|F?2QWQ zJZ{G?ctObat-m9iQA^~P&Cb(@Wh;w(ie(84?FXCs`ug`uPzh&hvD1=VPJnuJSK7D2 z;2QFaAis-TAevqk=GVQoF`f%wgBR9SpRyVVM(wPk`}?2HEq->+xOH>^4Rom$vOcCyB`r?C{Z^shP&{)HX@08YzZ~&#B%+J|AUBplT)C7%Ik+{o_ObMC z0;5I_fM7#Y4X#uMmycqrtP2?Q?q#?H5?|KNv6EHrJ^Ha(+TZMVq?w!c+5bpEu%c!l zU6r4b2HkHqOQ0-B-SJJk9YiemGPlgTYr`t}90G@YqV#i%mZ)K!uX}*+YWmpWzCWHWv1m?7o{{?yS$ zAX5dx#C)S^5T4Fg0y)~FDDR%q8M=M3U+L%EqjP#8-+P2NNIXck?t?;bo`c78TO^L1 zCvuuDg^p96uUp4hT2iTjfi7RH-f&~@+ihKm8UG4oDdJpEGTk|{#$?OKH(AoprKx$W zkE>uYl7t#Hf7g?bzSxN)pm?<{`*gk<_DiVteNJ_4ay2%Xdz)ZCQWxc$@i1ws(LLhy zct`2RN@NH>;o$0Mx!kj7&(v!y6!sj{kLqkb#_mnLjm`Bhkd6^|`QqzA!hQbF#;JxN zK*-;O-)G;hL?t)0cA5Rk#WJT^-&%`}TY^4ctK}8_=iYi@lB-K(7}k3T*-tJ@0<}Kg zYn}7nAenDRb+O4lIc@*~caigS{9Sh!$JIs70=lC=d4oq`eCzZz1h-g{jo$Ro-N>}MsrphTTW(zdbPyu)Wc8Wc-A|)t3)2~1#=XE z#4s|EP4jQ6s7;DOLm_atRsDEvoDC_y|8~V4{2akbs(_$d^Wn#p<)OV=r=feZH5nQo zY01CD4WhE*KQQp4_);GB^pm0no)8={DZD^Y%PX|%kF8PDF9Z<;>?Be$t*@A+vK^xR zzKWn_{O{ktKUH}m;FPG!Pd*NN-fFth{q1WtN+xF97gj2niWxKFTo%!I2lA~)x~)Dx zo^?iLNaoA&;GRycNl*8xHGdu)WT%i2R$pwHx2<=W9_H74{=^Trk_Q!#iESZ=3Sl?R zXwsfPpNCRPflEZhSm)~VBmJFHMAMoMValTI7 zrprq>#h;+??AENX^9?SG_y(Ozyd%yWGWh7L6Km*=PA1o)t`DWD7RXnW>`CPDEf4!# z)TkJVeBt51jvWf<|b_5~|F01MCrHq8n#>_as z0ttXHlG`Bp2|F+7t|tEsHBFvYHeQ-)=>9#mot>R)GViqptMkt=2>QvRRy{D+>E)C4 zh}05KI5ZZN%FX08Mo@yT)1A9y5J@HZ=(ivI{cn|I;7*1Ow(G6Y(Q-X&7+DcRORbfe zR8vE6vMm=N7+^x@zA_oQzR|iAz@wQwe;RrzL*DVTO{!DLlZS8vEKo5Xn}&FLLZdzB zqh7%7R=}bC7rzp1OUIaM4an?&c3Tc87_pe$KcHd;6jLdsp$-}rg%r;8{`6_7OT-_! z8#7e5Ha`8R4fyQ5h?_}ntcD;T)S2Cwsdpr^|MWt^;rP+j->3VRWipKmJlu?eylUQNqTX#3S~|q z+W3EuL7)I0yyimHXa7dG&uVzi;@gl<^jz!hq^8yyo%cIxskZ&FWWQNd$7PMHegTCG zh${UT9kB`!e;{92$V4e}%l`CTpa`l#dd-6$yCY|zDE)0?!+s=|-%m_kcHd6L_F6*K z-~~i)_&J22Ya?+o_TSHkOpP5S+C`_{!C>`sYpc|kD~i&mpm8<`@ivu$v|+7P5Ir8f zF0;)>4)=AUpuii*^CrdjCb3|9tJA=n+pl=amv^HoKi{nU&z&{iL-3c2WeWQh0n{JQ zzxAUlfzM$otE-o7ddfvDnnXw5^z9JG^mL5z6tz{69S;Iur%$#LH-hr}Vm?muw5NE# zz)Sga4fiQMBdhqngD!&GLKj}6&kt4_`Q)cAOZ_Z;di?0J>ewR26V#!(@d?a2u_fxo zl&%+VtZo9Ewz?10tcpOEtvafLeKhp2jpsLVwnhW#?17gmxTDFJ&bhu0Uia@S!wWC6 z3aaD;?;cHIBDpnK*j%hQ28S0{=oN@8AD+xnVrVnL9US$xtYR&t`hI%iQ=(l(StVL% zF#a|l2)vX6APTZzZN%qx%VS+TmEGilRKh}c+;He6;vz-T=?D7|9$CRiRwA59@H4V% zsZvoy*1I8SzrWKb+NXl;YA%$wfCO`&J;F2)fYmdVT<8@{KdRR78qh1saFsQ zylH%nggB~7*4198$6kxv!oR}|QBk%a7d+!%?7?JfHnO zk%cLwMk!NRhU~mq=w|?IBDpoob$KqCM1O+e%;~Cm(iDCRhOh2tuSOGeRJUnoi=T(z zF;z;jDWr1GU0ON65>aRq43W<-$2TYzm|1g7sX8Ucg z>1p(OS)10Xb3Tx*R_t?M=`3QEaY+15^3I{$LGW8)HQ{~lQQHe%u=DP=NCxa8pUtS9 z$0hV|))o!jBj0)69~8Bq@siJBB;2rhTP5SoW#qX;NLGysMYv0*wRJ>XGbckL&6##N zT@Of(qWaPW)M%A>xVzxg$rIXX2QDZN6ePmnB+N_{5k|Fwi{|g+yNkZ!+`RR4Z#OE5 z;?4!H3@Rf=$qB6rhNT3h{O91k843ZB`EArLVA5lD2S5QSK}_D;3ylR=_T#wfKo3$HE2)fPK9Hs} z5r<5;tHf@nytMLw`WkPQy6ips_?CEYSs~xwm65L`-l*AuAk?W^SdLZ+THs}kmN&7^|HA68@2*tBGraZNsURL6N#N4^_FocgaS1O8f zpTSJXU-4`C%}bmmC+{SLcu~p?nqEXIcUvPuFO%A+)~|>41MKL2^;>3yx7$cj?a)WQ z{y+7BKw;E%y>}{!0(~g0B#RgWB=>FC*|7I`!kyq)6^Iwf!3e8UN<*p84-iWg#jv+d zy)tK5?2e-n_e(#AB1a1@J9hKt*PFHo{zgJ7yIm&Xe+np7S$#zdcDjEc_EOBS20tC?HAVZZ z_^wHMKZd<{kDq^ob~Vh(8v)R~I|OyW};4R7CleC0O9 z?soo@E33TrhO!6Si<*t?bQs#VklQl=d)@-Yiw>t?*`|5K^6+#-();wCw-=)%9^d=X zO}f3Fp_%%zxZsiT=&P=BiVEK$?2uD?1EWQ|AE6gAgW9fro-`!=-{7PPS&Y)<& zNw9ZJ!)TB;zoTvy*lR#$)i<9(lZ(`Kwbo7?S(+rQBQAQ$i6!2~$1W{aLphDC^CW-b zlVo|(tpQEP`zMDQf?nG%x%(E>g$^IMdwGv0q&7MjJQ}KZeEqigVk-AH=ADtN%*!P+ zH2Y$0rbx3ju6M1Tz_aUm!)RCh4Vrz${E}zn;b=77Hs1H{YM@tD=30%)CNeYlECo^# z(8^~4syqk^1^1ru`uO-fq#Da^as51E>*mdyoZht9X`o`#G{YCS zy5Y6~&dX6$aSW;m802fht|u- zM-2ehVt~{eL*M=)`+~6!;|+q4;W4-1?27aFfFB^oZv`|2C>mH>zlUD3{jhzNt4QtL z>$!&GC||#mVCAt8UORnx$%wB{_I(FYFy8V7b!~Lc`Gj8od=C|1_FPB>x;DW3(bnHZ zqm;b!PNgos-6T$@ZQkiD-H^O`Zef9XXVuAp)QpdiWbbNpV7K^n+t)dG4*W-qzH|K5FPtR3PSaxPnt! z{h>j>3C(a52#Wb+?Qrek1jRLjlhx*O8zm)Xgg=o{-KR?99>ofU3@2DBwoemocPCfw?;YZ}aZ+Q!s!^uR zNND-EqC5q>i)~AjMI|qWn}rhr`}sds|6GtZP1`qgVqQD){bDsXu(GG0Cl`T!IMS%$ zxYd3X_bMKc=fky40l>NiHcI#}1W9AQtrtZnYm|1xWBgre`~7b+7o;)W7H8?t6`d1| zcjP>11~p(cSs&)`3xqT&R$^K-y*LRLdhKlR(fKl>Hx<57P0Q`%zlH|+CV?CX?La`r z@SUAl2^YpcBGW4Gv_xi|WH6JIzFisjGP1JRcJCaa)cX7|X|{JlLG&X1rt2#vUiQM^ z_S)AJW`YegiYK>2f-W(W%KbITxM7m0Bnzd`)6E%Mv(Ii9>$SVAf_3RqLegVG7E!$y zthwCQ8H*f+Jj*|a$tPRAIY1wg})O(@fu^j11WX)h&nYbAnLA=e`hp&rs(~f z&?BKE@=1hZIzL5-z_;az8cWW;%Xc4ql}-VFP+=dg2V~@7$aZ z_$renxpR{;Uk$Cnpuwf*7Tpc>ng^oS(4TT3{!J(YMUV06wJOqgtJJnjt0!m*ZzT)3 z(mKz~-12fAe6i8P;F@^RB?si4M+yh54v(JqZ3#I?RwT96ShB6mJ}h)GBa_x+!Cki7 z>o=SJ_nM6%P&}l$`+C7DndGV(0~E|TE!mdr%r8yvZv?2muuEB71ek`1-S`%Z(dlzl zQpn*wAcs$Z&fegEU@~}M8X@|^ZlWmQ>{Pu_kyJK**@r)z%d&qE+|$Y%bdkxHd8PL$ zOr`1K+S=DIk#dG7GM@^)CdZF3`+!54vGL2 z9;vlfbAhON9_Fy7U|pdn?`1QvD#eIzM^TICbXaFj<#8=`2~)yl$413n?w#W`#{YMb z&Y^CJ1wPD+XES;R-E{IHSXX0LF5**ToQ1E?#Y$ng2PnT5uP7;oeBp_ITD7QqBq?r~ zBfQPr(PiU5Y^`4>Gw41Y689q&bR!=fEKY*yaK*M`1@TN;gdbO|hY5$zXnR#i&Cn5Z zac!PTgFn;HcfJrnef@+?d9Y#LWjdVmUN&_?$tMyt=HKN**lKOZm{ZL@Nswd7UuA6k z0A_soV297;gX`C08KYLeTug!samw4-fugSfV7TVEZ13~si`KyMPLKiqgFD%CxXbcC zm%eW=$_au*`W9MfM&r&x_noUunt4!fN*Tmfk(S1QygwSm0hEB_6+@A}v)FSN{1Q#a zh(yyHaBV~2tLv0Xqf51(v);71Us4=s{M)Xz&0&%V`)XIbo)PBIc&-Ah_6F(aMn~PQ z*w@Mm9F1Cso~wnirYC>+OB?6rqrUm+o3jw=OFKCarLPZnnLW`sNE%$UD!J)Kdre;X zl~4J+w(nVcJMVl|NC{JKAk54>?o`IqjHQ-|wNjv=rkQ_?8Zogvb7|J;X6W)(rYM$E0Cy=L@*MOA8 zvRyxIhV_TM7!}S2acHh90&x*lv+h&(zy8&j34vrx8C+*n=a)(2CC%we^XSRnob-C? zf9Ookq9aJt-+Nk?0$AGfu1`lnGICc$tRd`DQ!kL`h?~?gHOFvEygx>nvIEi^VviHe z0d}Fp8r+xG{Zh?}N^&JwE*zgGFK`Nq;Ee%i!sSM-&miqDC=Ksy0JaVuwr)W@hgoMb z^Pn@)Lr5=WU1bO{a@CL!9YiD9U_{g)$ft7az+-08s*oqa9De;;Yh2)#ed) zJ$QU7e~+Uk2AD_HpP6I%5bee^V`No382sFasKPMDm+Bv{a`Y^J6c7`Vjp?1$v9MI&V<9oKX>SpM-FBUlvw*U`$Wt{5f=%< zgzflg{{o4WYN=8t$)-*s20|zPl;!R?x?(4j`S#gUSa;e_gKOKiT(uaA=52TknIZwK zCnvzNGAn9)ZF}q(a)PQe4{8az8JDL)p(9~ZU@sa8E&!GtN1F@+#MpppDx7fsDGG|V z(PC7#@p{f6QYODVZ{_Xb5%D@s4ZVD#Kuy~!3$oqOiC3dgV{+A9MG09+3NnoqbLa{wJU+}5;l6IATK2JP{s(ig{p;kYsR9(f zebXog`qe#0M>2ETwb2doeY*L%*;&QXvs8k`mfq&so+RN~^PZcg0WXWA1ABH1Jl~A^l+mgrwX7r@jJO8{VBHK3-nkuRQD+yN86XW{R0jGP;jH3jD;ycR9KJ z!SB8!2$IJsQKGNkcKym0FJaXWBp)igSN@B4o0|J0lHQnX*9R3A(g6=}g7TU4*!t3v z(kF?E28Fa&%Ol;`u{wECyXy41wSmu=M%{@}Gu)>KwDqFYl2EP?lDML1w#>Yg&-4gs zZIDgxNr>gMCChbHN~Tb&Jyff81=pT7Gj-KvR#u7y6hq0x)9*@Uh*$s>cV? zuM52kQJ_MSU?8Y^+G2Bz&d<4IF`5bjTaL6WOw||(Bi4 zV!jB4(L1T~soXks@28&G9}2XWWV@l;C{$wq$*MJ#jHg~^b~uK;<#z27;4S7JIpQCplg&GifLcj%i|O7 z>~KrI<#pY>>-+7ySstyUZWn|4Q@_!U;+r9f3x{OdiZA<6iPBW6$vY;6?$AKhE0i_O zH>xg}#2~KD#?GOeHUkQ4CA%oHk4VB*4b1BAb>q)1CzX9n6X3el{T1eDEN~p1cL4r9 zB*4$#n-%W9alEGET6kqt-eo2Oq^@m}J*&?Qo*OpbWZAu(-apnK=Khe@MzivBC8n%4`8%mg_cf(c{GR zxhT38-A^jiwHp`sRzJAYAGCNC){tf{d}f<&s?XIdCtfW#v%dz#zE7kb-loClT6xS~ zOW)3+v0Xq8?vWvo1eJxrYe^J`0^)>#BA7%a3lm(b070qjxwasE=;#wNV=|w4<@MJ8 zzCOQrenq>$1`t4U8sDCJb6NCeK~+)>eoK=Hp$KlF!k6t~C6Vqg%q16~8uBLzeu}t& zv%)I6Wqk9m766Ys0c|ik`_|48{qg8P#OEs-c@z0OW^*L<;J_17`4t<#<=%ah$PkCk zW38)&<-ySzLrH%i5ep%x*Z4`hR0T19 zH_ES@6KX=QeV-~^5y;7V@&qSC*e?~r2MhH~4wdeS2_3Asn7s}Z{Sj;-gPP2AQ{nex z$m&F{N6Fam479>Gbux@zc4>=)%#)yy(U?H4V*cAV#r30i2P?{de9t+A8^Q8eD*+AR z*Cew}c{)Ogz64^J(1+ugW*1>K+?%l9-oO(%xmgZEwN2g@CoDL_v zIJiTZW!p|O2|^+U=E!xoQ8kei+9)m-Jzfook2fX1Y3PNGD9o~zC9!5OBHAa3G$mFy z%Dl)>Gk885l(EPnwAWrZ&r4<4>qND}aQ9z4Y~U8`_4f>pGY4-M7(#kf=5IA3oMyfq z`cEE)%Y%B`=aihs{mmlk{TwFMT7~Y=^B_nYXNj>yxiTL?$@HSniFB%;>A4Iaj=d zvbbF9%Q#LgeJjLe5}z#D1Wr`~tyVF2in4+*Wpp~z&~u2!Jod1~V%WeHRHRQVUx zu5Efm1Y6mHe_?^Z5CW^=f~t{`y>C3aQf2;Ib3E=Vx6(%)wIyIfT{#%P+`VI$3PfNT z4e{-ty1Yw|KW(^cOBLO6`<~Y&Ld>B>(o_)7YM^d2N5oe_b9`?2?U7KK6%D55tji<(keo-(2kZX3 zQ;_fv69Sar6`8d9uLu~R4?!-y0-aMMwK7^J{tci6Q6d3J+ZEJN+U@=Q#Q9G`)CE~t z$-{$HI!htgV|la=J&CuyT|3NsyBg+JrCVPDleiNL+s#H0;Qs=yJJ^4*xnK4vOa--0 zV)tF^>1l@l&+JzA!edHqXR=`03Ze2N&!AnowW$U!1CUKSJ@LG4(By4|VX&Ed`?N33 zGat9!x~A*ukDm?wr|D`!;ejmaO#OEdmJAdw9W;YFICS$*VB7pOdlK_IJo>YkqLNv< zb=$QTeYb4JUI2HWM4C+4H;=H-XE3#HCzxF$R#m7lS1A*JXrg8|weg)eMw8PiQ3AbA z*D{G+I<3htEgM7E^k^JYe~fE~mpTEexq-`bO|jJA654Rm8aNI8>kgQ-{~=G1!%l(b zcB)z@>>~HIncb~<%Z56eiL}*`a@kKWzFm!BYc0euzND8SxKS+ov6|bmpT8MJhF@z+ z(?qsgrj8x~i`>9G3 zjyk&uHJcS}y?PxCv%~jD2E9wZ1;9pj3{%d{KQ;xlrcjE*g`DSJNt>P;;Hn?WB(tMb zom*L15!)jkA*Z8Tc&s+A{ecc@c8z+;On(m<62nx$rPpW-3v+7-UL8A9k!p>!8!GEt zoA%+pL_8Yt%7ZNDVLFqdMBfN2r3OjDoW8So*gQrWD{I%)!oIE#(a_j2N&3e3zqiEh zK^t*qy%eS^R%8;JpH*9+^ZV>h%hwMo&K4h+%0bKRhnE(`=&cH*3<|1Vrxg_fEbcdk zp79|Xt*9i>RVZ)|_XR?3H8Q463gqWXc*#oU6W9LZ`p%)0(p1_XwOEz2SoW8R6KH`m zb8u+L{g@=cTcMg?CZNdFzk~>4?T;}NrNv!6ifnl{L19AOYlsU7I4I)zA4|iOr5t2O zSbH!ggVM8G>ecsT0P0AX2VQVG+MgbQn5 zDF5E5Kxf3wsQ^=b$#PwVw2>h8!j8A8II#9R(;bs#k3~gA&q0{Wp9?G{Ya&l4(>>FR; zR{Yt2xM&C=hohlzDu?v(25u!bL5D}8Eisb;pr^;qeWlaDi{TzHGXXEQV1Y(H zess}Kpi;K~kpgSn$fh~3E7w@K@6Yc+$dQpSwj-Lo19%o{kc;O{(AZ9moUg)2_^60v zQ{vOeN<24e)fBRw{1!#bSEQUR9?xs9i)3*CyM(A9)2=xgua0R19anTd+IW~ky$4V4oW`m+Gf1(@-C~%}W`ui6RKmiE2#Act<0F$*DknYFF;e^5Qa3;EI7^u|>pdCk@<;qz5-X?@Tq! z_ir7*w99xohtDmygB!*h50=r{k%pBBR<23U#08UvBG<$@J+caEG?Huu5OUfcZ{?b z#FrTiTdEd67ujFb1M2&$sbV!czmHBg|N0PeECjUb(i@WSaW>QEc+RG=}lFYa-?{DR-*GYuqZ^wls0W|OdP`DzV9(Q-aO`R za2*KP+y*1=eS;0RefFg{kmGr}RcI$9;;8Pqv=JEM*#7X|UuPKW`kEYymFC?7?QclX z!|&)KsE+*O0ERK;Q*WssKl`4TmOaXlp*nbco`69T>+YS8PJLLI*H7y#La;kyMccGd zq2L^=A>bx*lnm%*It1o-p-&+n+EpYbFQKzfG=Ai6^1N)--eOg7g|O>N+#uisX&=-N zC$G{gi(b&44P*o!PcF1osGXgjSVR5I?fVz86|=(UY8_MflLCIh|EyHb-8LBvo1^_R zmMPK159>NT5{=nUF-S+tG{QmYQ|<;=qr}Tlf>9-K(tDrhuj|Yv-R_}ZA3nOi5+Q_% zeO$E4wEGbaYm(c*orFMW|Kp?@>e9ubRoS6I=VhesMqFlQ<`pM?-&T$g9kn8#sC`w-*waP2uaMAcDi?@`jM zsx8=cs_!{}y;~F+^}C-6#0+Rq?Zmiz@3S4wqAP8En!`czL0t|)uo{M@ya<95Cfw@hyYhMdgrV!AHe}`xR z+MW{1hU#p!O#5ye)jPAkfUNbp)ximV8uLtpneMmi`v&vWvoTlqm*(}|((k>}sX@9n z#>cYkSN!Ik2Coqs>g>F0aW4x|VQm0p&Q-Ns~pPXo|R6CpTwxoBUNJJD26 zg%#a0Du;r*-J8~8AV7~p@bVtj(TBYRIgV}XBu0bULzP-rlE(o+5dTmcBQjBF2c10L z!{|jghai=ivVLa_QXA>>f}?~kw=YY#uV4X)U^Y0Q;HTL6t>$Y~$O{A}{_>S8I!s;d zh5$Sjf&1w6i17Ze>g2sy+=slDrkac(?vx0sxEIB=1;nu|SXrAB{PIJ{w z{69@xzpvj}9vXT0FGSDwW}YiV1@Igl3d|hh>`#F`EZ52pBC^QpP}PdLK9YkO*VN>b z_s05edT%02n%%qhG;b4%Z(w)cKA=SRHn~J@yRNM~-}USg5x4U5ipeEWwp%!g)zSYr zOk|wh3KhQ&wtz^7P{g>=H)v82wCofUAD`kBehl_>#_L8ZP9gUfg{F}bjfnedSmC0W07=Th5ua${CRB;MCv^NM8jzp(&3MU)>x@1IHkO$dKI zCX@v;7Ba&xPyFx6_#G#Ix$`IhZA+XyMo&j}L6_FBvoq_TJ%W5-%!1@E_hi(*bswM5 znJeS4{Ku#xAFvanvNv$2=>>)Vw}k$y25yZkns#E>{WkQvye=SxBk_~ObcX9aqUohvd9Ss3qKnUV5 z!}OnAv8^3ev8IwZbmb3e@7D(T^w~a5uh>l;z-!EoQFxy+i$qbw<|DTHZ%-@DEdMrwEst~&Hke) z^ZVY9lLY<5F9ptjdw>nnE5PXLhH4;G4hNy3L$r^Agrrw&JU1pKg@uK80lt#d<*Q&S zGio)wJOPm@{|)c)N)s4@q68gBcO#?fp%BJ79s>S%7$(Glwt|C2{S?H=DiA6|EXu0*l^3YYtAp8S4ULVRkrgK9Hs~XA)&SM zfl(qPtsiQbs=zkh8?6b!hB6V&3tb2wpG>ffff0j_I7z5RMaDHKB(n#>u-jq?b2)ft zpap>egxVHnXYm$=69S&Y;KTxnaQUNRZ$RP%{g3&<;B+l-9ZZm1zb*~sityT~s(~RG zZ2bXhG{q1#aUAloe85aK2?0V<5b|??8^3-)%R|7%{N`1kij0Hz!p#$$XI=j&1b+nAA`Vk8&5i*EHYUErsDN~d%a2p3_wL-NG5 z5JeR5gjs9=zygIGHEy3FyZ+NCyT|w$vHZ@a_(Y_@8BFP(&Qr`tgT-fXM&aYneJi5kVJX9lGt zR$S9;E5+1kd@K-KftMVqqVRK*=SL>DwQBBaE z=_VP`f{k{oPQtB!!T=wX5Y4^6_mi4Vg5|h@6w(Vm$jp&^AOLnM#+P5<47@|2B*RzZ- z;q#A8{_`M=4s~#_gW&$f|I0MPOM+M%5kvo^Qh#mj9Q3+0-ba06@_*g@MzE-VUP3;K ziMVhfw!WS(BqT%y=-6g{w=SMTeWCaT>btw3ScU8@KrJ3W6uD3)GKv`tRC?g|bVHm3 za>f#nc1G@Ta0I&J1%~!Fexm~+CQV6T45&+C&M)*bcL57}pk(w>M_^zB9{>ICK&1CA_s4(p{CsF z5rhn8X>pBN_(kSH8A=rV0J3>SMqwJ7fvv>Zifq;pPhYf+o}D*7A;*1!_=(af;0;s_^Mc%A#%8 zp*4;x6n_kWJ70x z3dCKDo(|u?yhdUTq;)(>%>if602IJqxt?0?gg{MH3goeXbUER31Jc?8W<(Emn%nc6 z{=*0}&mib39YRu4eV`*4nZ|>e4kIBUx%gtyasftOdkXQ7p*9Ie5Jg`r7JTh2;bwXc43(qnB9){ax2%z+xne6ObSc_GCVG(QrPUb?gFZ2nZx2pc} zXm=%!d+c5-e>71~LE0hKQThJbm{JR~Bo>-qypv(ZIc~RAZX`<@*A@ZwQ5ojmssd5H z`Z65iDS4E3O?tax(;}zC2atbyLw88F&C3g@j8I@u zr>_JBIovdacGf(yKQ7}9{Sp;+Nlpz1G%vvcc7hr<(TpY*%EU<_Wtz^RhGIIsM`2}ty2AM0Ga9X&zGGlOX+pyfhF7${CHe`B?GtbGigsWhX}Ei$LtVbt4+_WYMy>^P`>clT_M+a~pbx-e77sRNl}oR?UyM zK?duClx;-LtnWSL)~Z)usXlo=kivCWYMcN?E$n1%1p8>$sofcQFsBjmo3SmM>G&Fn z3akl~sF(3teP^$+l?hlTA^f}BRXBB;Aj|8-g~3?q9b6SaKG&PCyMzuwUBMyGf&qcn;e_KK`v4-fpUx7 zVTg2uDVShn{yXJp=w7gMdb{pWRPKW)2@`rflUev{;A z64)_4KgE;@5N^1EI2o7e`Om{0+(E?iC8j59Xn7s&m)|NM8P-5YQu89|o^qPY>IhVC zrmGJ8e5OKx5EZ?K(t#@Z*;7&A$!0pMH~%$K*ataxaO}b0M$!Ujjd4vFnvX1r7jMZk zxX-%(YtxtS-mH%alOVonh_0mav}Yd_qcpJ%3+by5_!SFvAq%uYEQ5hrE;iM zzUrvth+m zzrCl+^mQ@yc~DV#31d!ZjAs5v2)=T*9=%u(Jb?;N+i&mVu*WC+q&G!m_rC<8nfKq? z>2+HzF8qp-tY=^Mu}z>=S`>AMsIq5F!dwKKB6QCB8j6fXSqMBG zV#9yh)`b|iFoyo*l~_4enRhpR)jUcGSPowAtd-6m$pYHaFf=l*Nr#JN_j@gF z`hI`iU3Z>E7>Al&^veTmR%sY1uAink)AZ)DP}wEB>cvF8f=j&PS$%4h%Uj<}C_Y=H z*?%}#%FNuKavgZ<{cPIv^X+n=gWCJ!P?HChVpfKXrorGar-?Jfg#<98fqqqQyWV2k@$yjbOR3{l%eSZ&Z)Yf+?cX9EA|Uay}$16F(W0JmVB) zAqUFv%8?Z~T+X$8b@w7I!`Sq^^Yi9?*^8_bN3t=22n&4v<*bJvU^O{*BobWu1(vAy z?aR)u{QxdaX2r|7$Gj;R$#+SdB__R=GCpy`vM``By2Vji-^PELh-dsYCguGELbAN+ zl2~`ZkmNP_avl^o<;G^@Wb|;`7z~cNmI2_kXBGPs%tBCli9m4)8E_h z&ExiWLiPQEYrS2T4-m9sq9G3WSIY08;y(>UP?Muz!ft}CLW%1n>x-A=BlG+J|NJVB zJWHGp^YJLfpC|tLg)ttS1{PE|HirN8b9{vQGOgH<;6LyD`vuIXC|4UL1HaMG{Qbn= zKbLj-v9c!46br^8bWgIzuz5RP>BH_ONi2#6#x5e ze_aziWXD7ilI?3ASsx@KgTtM*iDznF}6g zi);ZY4@N6U6prBuF)hc7Y=uUr+euV>%kP<<%NrfM*ZLqnQ&U>e$mq9MYNW9HI;4xu ze=A~&(BDim_!xLWePco}A64`1;;T?Zg$7)Dg~pRlX$fs^1V2aXTM|6RO&$fc!iJ&Q zfF8(|akD217u_nXx8aqySqvJvH>%#d=KTzTQmI8XjX3`d|nd_UG3TF(bCb<;bXXq0?4~)W~~5=??U^1 z&FDiycL1O||yBt!)1?vNT+qenxitjxqF7iyF$sEgbt;~6}6<~{56-+lqT-0VQLo0=0kVqF?CyXEjy zjk@_yaSF}AlUJ<^m)MQW!V9Ms-~w6c%o|n<8d%L;xLB*33z%75D4RMM_J39wcC6I_ z?64nGAz_g;!_4Cn?)g1F8~+@hbqo}-c^q2cRRB(Y_~;IR8h%7oSI%#(@-du1g4i|F zCuV?99T|@hmn$ zVda5<^ZDA?$K9~q5Vx$SrdlU06K!iIUxZ>(I5F*&jq@#f%XT-_!ytITWYluu43l>? zaCtdqtp%`MuuK0$}Y;kgZ~b-BB4 zZ}E(j85r6?_M*V=Z_CVzD*jOcZ=%HmrX>nb2P**7Q>M9?Y!e|hISOF+z8i60ztJ=QglE447&AR_;tEtcTw;Z+6FVb!Y$*5Gl?J) zb!(V(NU?(PzLOXj2mR6ssfxJVT9<3$X~*@~^4FYWTi5GQ;#u!6f?H?iggYb{Y=Z1- z$4xEkFJHC~;ogN$gOtwh>juFJE>?KfVWNgp*AYo}_nI${aOwW8Lt_2+#*ks#bwJ5& zZ$2IJE|}?-x4|Mu4B7k2($8CuD!u0v7V*K17HRF_DVDSBwx2(PO)^4ahRSV@%8HI+ z(Lv#bs;3^fuTBQIQDnR10%2HH_-9o<^oZl6x0sd)#9LMXE6p`;CuUwCfGHB9Ur6{-|4<@%y$EJ{}kYv zRylMf;_tmYw+};m*q%?*5q_qNS$UoyKl|Wv8>XaFl;<2cWTG!ynlJmI3P7m+;66p- z1dvP-V}%QP;nRl#IY$;6x50Tww_+;_R84&h9xaiFeS;kFqh`G(Z?P1Xfk!Q9YwT+w z_XVKTnedkL5ufo9U%LTBRAPDox%cY>MC$WW(S58C0{Og;3AWPWbKR|~V)2UBKtU|d zIBNpG?QgG${tY8)pVzTDcwP6MI~U=}5hmVq{mCGVpJcRCl{(q8K{tGbgH=lmZ|DSS z^;0_z3Md6_@DZZABo$jS4SCHSj>m);v1lGTKIZ~l-uipii$xNz97a#>0&`yJDRx84 zY*fazl3vj`-Un0V8Ptg7yE5p14V8YvET)YDIdvUJd4yq11%aon)(;D$SN&gd+rO$GMcwYL2=RaMyv@*UNV&!C z$l8Mp+>fQzcMV2r2le++&+}l~_tPk7rw0|;zQ6;&EVICzN37 zSrQP4f*GAohpHX`i{?aKfGBEz^s-4>lK);ufixRS8PLtOp>*gRRY$ZPNR7B4x9XC7sR)02 zPEGoug|=3>iT#_gSi#VoeQ;dqrx~>+#(V4HBO&v+=@yh^ECEsoZcXk&6!7h|Il)Ni z`K(FPLj4hvaMUql-mVK2SH`(MV(4<4?)ep0#zC>9wkT_f(By3X(qTH>CHA=&P}a4UAfU%rxI>(FC5KGbBEcH!jQ_ z5yPlq*Z(f37aHt04BCY3WNm?*%k|2^-OfRs)Av;#ZI11T?Go*5Y{8t8zh{TGU#!Kl zBZBROwvO8xMjw)vK18PM&~C02^%n)LUWl3h$o+89H;OS_BtAeuSTi-fb!NH&PBAZtSdQ;MMgJ{Yzp1$_^D_z`6#@$H}qw4>mZO^Qkgd zzkMFJNHjRykM|*@>_mR4@0Fp2%b<_amifp)?WQ_W>NTrhJ0qQo4!tQ#Q{gwYY~_WT z9ZALAWzv&hFc23Dn%R^^kR(j?$>_8nDOeDn6oY~kQ2^t4)R|WxujKqVMlz%{X2J9& z7C{AEWwsN3PXsCb!>5mm8QOSx?De1@W6vGPkgZYf-M1d@GYoh))F3jU6x4za6=;F( zFx>X}nc??Z1xRqh=1Fp54K&EHv!GkZe{pIr+^(8(Rl?FrCj_;Ja-q@T+_e@uVz=ZoHueqTSkS~?->+Ehx8ZG{h;4%`-qiDTZf6S`@RuPhcEL3rvN+nHyLH4xh> zCHK~?ni~N^0%Wx_A&$%Y7Z#Xm6ErDjsjw#BrOA%H_Z;t}qY8mf8j}v0Xcjlr5Odt? zW20J)YU?Alq4qc)UCZ?@z<_0nsuug=7pG;)7Sm$UcbN%kU)*cth*eP>;1o8fKNziS zq(Jz3JF-EEWBefmDxyJa7S+R*bFqxQ)do!+_GD1o@X_wySw7Yj<@y1QoOZ;<(fxF{ zWO+=lsaC55c;!j4ufs;4;(ar*>SGjS;EUq(@!bi*)G;|H!mP<-;Ij`m%^c)jKr?ME-~3ON~`#Qa(Oe6Ts%6(XTe+FIwI%YZy85AzN#Yltz@Pn zmeLNIoy>PnnjCAm2s^llB69TlyiPp*gY^k$9&5!7+W=j#E(^Y(JG76Aj?Lv~&Vzpb zNWY&fM98jvE6clxE*?S=V$)xz0Ox^R`wF>iFnzJF=hL=h`ZFT6SFF%&0Z_c6ul7rJ zLgcl_yi9|p5_`)9E+&88IDb%xsIbh@YvLiUcXkP%#A~GJgq4>=H043L`s(r+uI_kz zNqQ~fO5XC`QFG*Sleg%1Hda2e7gxrY5|&JeI`&obk~eosJMzkv(6!vD9`WTKa;Gbq zxM@i`I&6{_0XE8Cxd&TX1+Q9e4|%8Cd~ybHWfeqsHd@M5m?2bU%v#R{>sK#77Q%w1 zq314{XH$dO-jmIwhKPldQXM2ETHIK!Lx8kHz*=Pk960z@ooF7vf>~|fIgBY2ucBEf z`Y1WD*u`i#YsA6GDfPi6_*S|X7=^t zm$ChLUnqFp*a`KcA$Fh2=U9QQ{8Y9?DO4fJV#gleck}M_dZ@lZvq%1+6_S8+9kIae*0XET1-umU~KfX>n8YO=6 zk4zh%uN98Fv-e|FiGP0PLt#%+HZe<|)RwmGu09FwJu<>SE$NHyTB133gm^uP3YLP8 zykZOmg#=ox$(wD0rDJz_@Ps*}fr=M}mA+*OJk9pczD zt-HJsfy6SS2cgMm$?R{_aXH`bhV!7Zmy=%>+mIijr0{lmO=Sg9yy{TAjh;|e?$top z8v$6ORSqvd<`8>&8VVCnILQZnf})Xx_r$2<1%zk59hy7pwCbQfo?1 z^a?2Ky+o5HNk>i;Y>>ku@TzOvYJd;911d_#Y~qF40mb>(TbW7~{?KOV^1%`~;vx9L zXfRPUiLPXV6JTvlFiPjlk|z4!4Bfq`@BgzcB`sqnA34* zz8h&RxsmTikxFcY=J6D)Q2Oy>eMTHBMovt$e8RLkfRYQ(*-yA!%Z(Nj zRrM}5t%WkaW|mg?#raO7ZDZN|Zbd#v6LFWaA-{)xzMZdq)B{S4TKtElUtxoAJ-H67v|Zf537gRrb4!zkVepGuqyO);MVp z^;sib=8YN@yAD#jpXyXMo#|UI?*glIt>3uY7{uYhC@DR$3U^gP3^6 zH2bBRzWezsEN-CojC4g%L^C>xqBunGwIA9N1!`jkmg3D2tX2z#(>`MkRK~>LpvH+- zi8D_;!!jo3m-L?yr*4DIFjHcSJ#YJ7$`eY_$sdSkIN3J+rZ=A!o8dX%6#M9uf4lY+ z3jYX33R|@&{$W}=pW$~E@woP>@1{`>r(-&%a!jwTVuo7!e8Yd{ib$X(doU3vfKQ#h zuI&35KJA1Nrp}9&M)9!qLpA;mOUOxKpxtuPCf;Z|q}=ERnAdO5-g-`T0$$I+go|9H zX}T=-*$YLH(MHC@27E!J67KcRog+>p!amTFNghuPGK>f&|EF!x0&4ypn^-4L>QSkB zRVnLu^brgCsZr=AJgnbYaSJ$2_CS854MXobuYv&0(^_SpmMmC>pRRiOH(A!i(;U|? z`sw9vnDxfh<%y6nB|DfU3a9+YuA+;pRNxqP4DO$!k>(8y>)s^;MmmR=TPL0$WJ_|l zH}TBJwFhK}mV_S+`UJxWN{Q;DI66AiF&dw+7U1BG57o@uhYZH}s z+pp-1coG_9h+%&btBQH5-(7Ll*H*l&!MlRP{mR9PYazTYUh^o?Rk_FOB8dFskS0>8 zrH0A1RW?K;H*vZi5veQAD56=Pf_vsdjB(`L$5%L-YegY6zO>RFEH~;nXOpWW$S+&j z3T9z)P@edzELJEo@w@iIU$&a(Nd>LP2y#o)=?6E#)0IP`lOpnyYGV3Ti>2)B^iKkq ztCeNO#DzYx>E_l#217cp&$=UVcHpn!G{dmI`ia}b)OZ$ z?fr|5N%%0!_%@ts*EfIP_F3}t)vU{_$%M-68Xhx>i>(!xhOj@A5)}rj!RU!J{Nv6I zMm8C0JlA)H@in8d?3k}FDrZGDLP|(aoAi-teDX7p=l1)8Q^b(-Gmr07v}C6R4(D;N z5lW6U?~=3nOFZnqAFxb%K5di+naw(2)|s;|nnPFuU$KPn)3xM1 zl8ZR<<_eQSj7xjVE(S(@5o*?lG(10 zg`6oHi4~Od`1Xbx)>ojQ9{66k;=z#x6Z?TZpB;T36HL&mNkD8&|5pp7i$*=i;W8Ot zuRw9PilLiKoldVpsfpVjlD;G5(|3ua)io~N1qE>;ZjgDuOLgw^zV)uU~N39cU` z^l{j$Cr}D##8(_|Hf2@P3@4EGwymb>f&@fD4~$)X1IuRsCJ1q3H3Dsr*O8nw(%dUv z>5ta@k!yI@ucPyAtQ3O>i)q%g!jkJBGEM6am8HT~gw z5)Yq+@hZzwk$CaH;%Ie_4_jyH=AzaSx@=O9{;HR8m6DaSBbxk`cs(a1a94HsY; z4MG`65O>%Nad@?C{Qx}D*z|`w@BMsWdcElv%|n6tEu*-i+q*<3DO}BUF>)vZniXDY zuaWWUz7&tW0^h>eMOgXkk~BGYJ0am`(a`m?a5L@Jk4UK(6_w=?1`pGpG788>86-15 z+*oZ-zLQ?ZeyADFWA{ywuBanBFlC|6zC(*E;u6HXS!}!X%);_yz+2E`ro|HMqbKNl z*z;ReyX}x?-h|-s($CITjhVmJW*$mHMF&l^gD-eejq za_r~gtvVVHS^6)ASM-MHaSe_{1KQ--agcA_jo082>yR_XF{#y1cVr$%LY8)Vi`-1t zPZCg5vd+;vO}cQ0k9tY6k(s>ZM$}4+q;CQ7_L}~;HT6akzrnEc zZnQ?`$Cpqh^O^dS`4CvSm`7)Fmdogbjrk@=CnfzmqnPW#@n@Lwn$lZQ=BVwO9Xxty zbiJLg2I_@_7Qd0~dKo+q2fdz5pb@b4>Sg&rDd-$@2SuOJFfuU^cZZss<BqwkS7Fz++1_7?K7WG+;rg0z4xfbh}H1qLUXhy)4E!U?SdT**+-d zUuKmk0?_3RB@6yv2h5N54|u%6uW2g(S{nN5RjPK;{{^o$OsoI^ literal 0 HcmV?d00001 From 7dc57d782b2689cca16b28e90a1c5659bcd9e13c Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 15:25:10 -0800 Subject: [PATCH 069/170] Change default for coupling strength and quench duration --- helpers/layouts_components.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 6ed6ac5..142eecc 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -42,7 +42,7 @@ {"label": "160 ns", "value": 160}, {"label": "320 ns", "value": 320}, ], - value=5, # default value + value=80, # default value style={"max-width": "95%"}, ) @@ -112,7 +112,7 @@ [ Slider( id="coupling_strength", - value=-1.4, + value=-1.8, marks=j_marks, step=None, min=-1.8, From 1ecd6baad1f87d9846b2d181f80140e5348e6865 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 16:12:36 -0800 Subject: [PATCH 070/170] Added color labels for coupling strength in background plotting func --- helpers/plots.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index daaff15..fb3d043 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -164,18 +164,30 @@ def plot_kink_densities_bg( xaxis=x_axis1, yaxis=y_axis1, ) + coupling_label = {-1.8: False,-1.6:False, -1.4:False,-1.2:False,-1:False, -0.8:False, -0.6:False} fig_data = [predicted_plus, predicted_minus] for ta_str, data_points in coupling_data.items(): for point in data_points: - color = coupling_color_theme[point["coupling_strength"]] + _J = point["coupling_strength"] + color = coupling_color_theme[_J] + + if not coupling_label[_J]: + legend = True + coupling_label[_J] = True + else: + legend = False + kink_density = point["kink_density"] + fig_data.append( go.Scatter( x=[ta_str], y=[kink_density], xaxis="x1", yaxis="y1", - showlegend=False, + mode="markers", + name=f"Coupling Strength: {_J}", + showlegend=legend, marker=dict(size=10, color=color, symbol="x"), ) ) From 636a956330253b474b8c8021f8a234fa41c546d2 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 16:24:36 -0800 Subject: [PATCH 071/170] Plot coupling strength label when user run simulation --- helpers/plots.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index fb3d043..7decb77 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -39,7 +39,7 @@ -0.8: "#9467BD", # Dark Purple -0.6: "#8C564B", # Brown } - +coupling_label = {-1.8: False,-1.6:False, -1.4:False,-1.2:False,-1:False, -0.8:False, -0.6:False} def plot_kink_densities_bg( display, time_range, J_base, schedule_name, coupling_data, zne_estimates @@ -164,16 +164,16 @@ def plot_kink_densities_bg( xaxis=x_axis1, yaxis=y_axis1, ) - coupling_label = {-1.8: False,-1.6:False, -1.4:False,-1.2:False,-1:False, -0.8:False, -0.6:False} + _coupling_label = {-1.8: False,-1.6:False, -1.4:False,-1.2:False,-1:False, -0.8:False, -0.6:False} fig_data = [predicted_plus, predicted_minus] for ta_str, data_points in coupling_data.items(): for point in data_points: _J = point["coupling_strength"] color = coupling_color_theme[_J] - if not coupling_label[_J]: + if not _coupling_label[_J]: legend = True - coupling_label[_J] = True + _coupling_label[_J] = True else: legend = False @@ -393,6 +393,12 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): color = coupling_color_theme[J] else: color = "black" + + if not coupling_label[J]: + legend = True + coupling_label[J] = True + else: + legend = False fig.add_trace( go.Scatter( @@ -400,7 +406,9 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): y=[kink_density], xaxis="x1", yaxis="y1", - showlegend=False, + mode="markers", + name=f"Coupling Strength: {J}", + showlegend=legend, marker=dict( size=10, color=color, From 6abda7b13b6dfe30269b67023a2bb253a89bada5 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 16:32:57 -0800 Subject: [PATCH 072/170] Change the name for remaining two plots --- helpers/layouts_components.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 142eecc..fda69f5 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -50,9 +50,9 @@ id="kz_graph_display", options=[ {"label": "Both", "value": "both", "disabled": False}, - {"label": "Kink density", "value": "kink_density", "disabled": False}, + {"label": "Kink density vs Anneal time", "value": "kink_density", "disabled": False}, {"label": "Schedule", "value": "schedule", "disabled": False}, - {"label": "Noise level (lambda)", "value": "coupling", "disabled": False}, + {"label": "Kink density vs Noise level", "value": "coupling", "disabled": False}, ], value="both", inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, From 1878b256f0a7d7da6f37fd1607c62e0f5c2a38d6 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 18:14:03 -0800 Subject: [PATCH 073/170] removed unused imports --- helpers/layouts_components.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index fda69f5..ee444be 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -13,7 +13,7 @@ # limitations under the License. import dash_bootstrap_components as dbc -from dash.dcc import Checklist, Dropdown, Input, Link, RadioItems, Slider +from dash.dcc import Checklist, Dropdown, Link, RadioItems, Slider from dash import html, dcc __all__ = [ From 5bf37480864e59e5ae821b34c4a69b418e749221 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 18:16:48 -0800 Subject: [PATCH 074/170] Removed unnecessary plots and default kink density vs noise level to default plot --- helpers/layouts_components.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index ee444be..90084da 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -49,12 +49,12 @@ config_kz_graph = RadioItems( id="kz_graph_display", options=[ - {"label": "Both", "value": "both", "disabled": False}, + # {"label": "Both", "value": "both", "disabled": False}, {"label": "Kink density vs Anneal time", "value": "kink_density", "disabled": False}, - {"label": "Schedule", "value": "schedule", "disabled": False}, + # {"label": "Schedule", "value": "schedule", "disabled": False}, {"label": "Kink density vs Noise level", "value": "coupling", "disabled": False}, ], - value="both", + value="coupling", inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, labelStyle={ "color": "rgb(3, 184, 255)", From 211952821acf6f16bf8ea6275d60d908433d72eb Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 18:42:37 -0800 Subject: [PATCH 075/170] Update title and description of the demo --- helpers/layouts_cards.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index 1b92256..b147374 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -53,14 +53,13 @@ def control_card(solvers={}, init_job_status="READY"): dbc.Col( [ html.H4( - "Coherent Annealing: KZ Simulation", + "Coherent Annealing: Zero-Noise Extrapolation", className="card-title", style={"color": "rgb(243, 120, 32)"}, ), html.P( """ -Use a quantum computer to simulate the formation of topological defects in a 1D ring -of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. +Simulate zero-temperature and zero-time extrapolations on a quantum computer, leveraging the Kibble-Zurek mechanism. """, style={"color": "white", "fontSize": 14}, ), From e5513a8535ab2d7d8d9966a5f0a7cf3c989bbf30 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 2 Dec 2024 19:32:59 -0800 Subject: [PATCH 076/170] Updated tooltips --- app.py | 19 +++++++++---------- helpers/tooltips.py | 10 ++++++---- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/app.py b/app.py index a9c709b..7339b5b 100644 --- a/app.py +++ b/app.py @@ -239,13 +239,13 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): file for file in os.listdir("helpers") if ".json" in file and "emb_" in file ]: - # if qpu_name == 'mock_dwave_solver' and 'Advantage_system6.4' in filename: - # with open(f'helpers/{filename}', 'r') as fp: - # embeddings_cached = json.load(fp) - # print(filename) - # embeddings_cached = json_to_dict(embeddings_cached) - - if qpu_name.split(".")[0] in filename: + if qpu_name == 'mock_dwave_solver': + _qpu_name = 'Advantage_system6.4' + else: + _qpu_name = qpu_name + + # splitting seemed unsafe since the graph can change between versions + if _qpu_name in filename: with open(f"helpers/{filename}", "r") as fp: embeddings_cached = json.load(fp) @@ -258,13 +258,12 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): source_graph = dimod.to_networkx_graph( create_bqm(num_spins=length) ).edges - target_graph = qpus[qpu_name].edges + target_graph = qpus[_qpu_name].edges emb = embeddings_cached[length] if not is_valid_embedding(emb, source_graph, target_graph): del embeddings_cached[length] - if trigger_id == "embeddings_found": if not isinstance( @@ -744,7 +743,7 @@ def activate_tooltips(tooltips_show): dict(display="none"), ) - return dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict() + return dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict() if __name__ == "__main__": diff --git a/helpers/tooltips.py b/helpers/tooltips.py index 7fddcae..1f1b8e5 100644 --- a/helpers/tooltips.py +++ b/helpers/tooltips.py @@ -14,14 +14,14 @@ tool_tips = { "anneal_duration": -f"""Duration of the quantum anneal. Range of 5 to 100 nanoseconds.""", +f"""Duration of the quantum anneal. Range of 5 to 320 nanoseconds.""", "kz_graph_display": -f"""Plot selection: Kibble-Zurek prediction and/or QPU energies (either separate or combined).""", +f"""Plot selection: Defects vs anneal duration or defects vs noise level""", "spins": f"""Number of spins in the 1D ring.""", "coupling_strength": -f"""Coupling strength, J, between spins in the ring. -Range of -2 (ferromagnetic) to +1 (anti-ferromagnetic). +f"""Coupling strength, J, between spins in the ferromagnetic ring. +Range of -1.8 to -0.6. """, "qpu_selection": f"""Selection from quantum computers available to your account/project token.""", @@ -42,4 +42,6 @@ """, "job_submit_state": f"""Status of the last submission to the quantum computer (or initial state).""", + "btn_reset": +f"""Clear all existing data stored for the current run and reset all plots.""", } \ No newline at end of file From da2954e5093cd284f934fa549726776269bf725b Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 12:47:42 -0800 Subject: [PATCH 077/170] Fixed the issue of MockSampler unable to grab Advantage embeddings. Haven't cleaned up the code yet. --- app.py | 52 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/app.py b/app.py index 7339b5b..9c82a63 100644 --- a/app.py +++ b/app.py @@ -220,18 +220,18 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): if trigger_id == "qpu_selection": - if qpu_name == "mock_dwave_solver": - - embeddings_cached = {} - L = spins - edges = [(i, (i + 1) % L) for i in range(L)] - emb = find_subgraph( - target=qpus["mock_dwave_solver"].to_networkx_graph(), - source=nx.from_edgelist(edges), - ) - emb = {u: [v] for u, v in emb.items()} # Wrap target nodes in lists - embeddings_cached[spins] = emb # Store embedding in cache - return embeddings_cached, [spins] + # if qpu_name == "mock_dwave_solver": + + # embeddings_cached = {} + # L = spins + # edges = [(i, (i + 1) % L) for i in range(L)] + # emb = find_subgraph( + # target=qpus["mock_dwave_solver"].to_networkx_graph(), + # source=nx.from_edgelist(edges), + # ) + # emb = {u: [v] for u, v in emb.items()} # Wrap target nodes in lists + # embeddings_cached[spins] = emb # Store embedding in cache + # return embeddings_cached, [spins] embeddings_cached = {} # Wipe out previous QPU's embeddings @@ -517,18 +517,26 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): bqm = create_bqm(num_spins=spins, coupling_strength=J) if qpu_name == "mock_dwave_solver": - embedding = embeddings_cached - emb = find_subgraph( - target=qpus["mock_dwave_solver"].to_networkx_graph(), - source=dimod.to_networkx_graph(bqm), - ) - emb = {u: [v] for u, v in emb.items()} + # Seems like we are calculating the embeddings on the fly in both cached_embedding and submit_job. + # If mock sampler is using Advantage embeddings, we should simply follow the code in the else block + embeddings_cached = json_to_dict(embeddings_cached) + embedding = embeddings_cached[spins] + # emb = find_subgraph( + # target=qpus["mock_dwave_solver"].to_networkx_graph(), + # source=dimod.to_networkx_graph(bqm), + # ) + # emb = {u: [v] for u, v in emb.items()} + # bqm_embedded = embed_bqm( + # bqm, + # emb, + # MockKibbleZurekSampler( + # topology_type="pegasus", topology_shape=[16] + # ).adjacency, + # ) bqm_embedded = embed_bqm( bqm, - emb, - MockKibbleZurekSampler( - topology_type="pegasus", topology_shape=[16] - ).adjacency, + embedding, + qpus["mock_dwave_solver"].adjacency, ) # Calculate annealing_time in microseconds as per your setup annealing_time = ta_ns / 1000 # ta_ns is in nanoseconds From 9292eac9517830b8ba91a710956a4daf6443828d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 13:48:38 -0800 Subject: [PATCH 078/170] Reformat code and removed previously commented out code --- app.py | 61 +++++++++++++++++++++------------------------------------- 1 file changed, 22 insertions(+), 39 deletions(-) diff --git a/app.py b/app.py index 9c82a63..15cab71 100644 --- a/app.py +++ b/app.py @@ -220,30 +220,17 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): if trigger_id == "qpu_selection": - # if qpu_name == "mock_dwave_solver": - - # embeddings_cached = {} - # L = spins - # edges = [(i, (i + 1) % L) for i in range(L)] - # emb = find_subgraph( - # target=qpus["mock_dwave_solver"].to_networkx_graph(), - # source=nx.from_edgelist(edges), - # ) - # emb = {u: [v] for u, v in emb.items()} # Wrap target nodes in lists - # embeddings_cached[spins] = emb # Store embedding in cache - # return embeddings_cached, [spins] - embeddings_cached = {} # Wipe out previous QPU's embeddings for filename in [ file for file in os.listdir("helpers") if ".json" in file and "emb_" in file ]: - if qpu_name == 'mock_dwave_solver': - _qpu_name = 'Advantage_system6.4' + if qpu_name == "mock_dwave_solver": + _qpu_name = "Advantage_system6.4" else: _qpu_name = qpu_name - + # splitting seemed unsafe since the graph can change between versions if _qpu_name in filename: @@ -516,30 +503,18 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): bqm = create_bqm(num_spins=spins, coupling_strength=J) + embeddings_cached = json_to_dict(embeddings_cached) + embedding = embeddings_cached[spins] + annealing_time = calc_lambda(J, J_baseline) * (ta_ns / 1000) + if qpu_name == "mock_dwave_solver": - # Seems like we are calculating the embeddings on the fly in both cached_embedding and submit_job. - # If mock sampler is using Advantage embeddings, we should simply follow the code in the else block - embeddings_cached = json_to_dict(embeddings_cached) - embedding = embeddings_cached[spins] - # emb = find_subgraph( - # target=qpus["mock_dwave_solver"].to_networkx_graph(), - # source=dimod.to_networkx_graph(bqm), - # ) - # emb = {u: [v] for u, v in emb.items()} - # bqm_embedded = embed_bqm( - # bqm, - # emb, - # MockKibbleZurekSampler( - # topology_type="pegasus", topology_shape=[16] - # ).adjacency, - # ) + bqm_embedded = embed_bqm( bqm, embedding, qpus["mock_dwave_solver"].adjacency, ) - # Calculate annealing_time in microseconds as per your setup - annealing_time = ta_ns / 1000 # ta_ns is in nanoseconds + sampleset = qpus["mock_dwave_solver"].sample( bqm_embedded, annealing_time=annealing_time ) @@ -547,9 +522,6 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): else: - embeddings_cached = json_to_dict(embeddings_cached) - embedding = embeddings_cached[spins] - bqm_embedded = embed_bqm( bqm, embedding, DWaveSampler(solver=solver.name).adjacency ) @@ -557,7 +529,7 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, - annealing_time=calc_lambda(J, J_baseline) * (ta_ns / 1000), + annealing_time=annealing_time, auto_scale=False, answer_mode="raw", # Easier than accounting for num_occurrences num_reads=100, @@ -751,7 +723,18 @@ def activate_tooltips(tooltips_show): dict(display="none"), ) - return dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict(), dict() + return ( + dict(), + dict(), + dict(), + dict(), + dict(), + dict(), + dict(), + dict(), + dict(), + dict(), + ) if __name__ == "__main__": From 0678e7c212e90ac779b29e1b4a8a697f5f7a6881 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 13:49:14 -0800 Subject: [PATCH 079/170] Removed unused import --- app.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/app.py b/app.py index 15cab71..1c958ab 100644 --- a/app.py +++ b/app.py @@ -20,7 +20,6 @@ import json import numpy as np import os -import warnings import dimod from dwave.cloud import Client @@ -35,8 +34,6 @@ from helpers.qa import * from helpers.tooltips import tool_tips -import networkx as nx -from minorminer.subgraph import find_subgraph import plotly.graph_objects as go app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) From 7641a00371673fd2fac31162df8e656969f27a61 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 16:26:04 -0800 Subject: [PATCH 080/170] Changed the position of legends on both plots --- helpers/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/plots.py b/helpers/plots.py index 7decb77..8bc3cd7 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -314,7 +314,7 @@ def plot_kink_densities_bg( ) fig = go.Figure(data=fig_data, layout=fig_layout) - fig.update_layout(legend=dict(x=0.7, y=0.9), margin=dict(b=5, l=5, r=20, t=10)) + fig.update_layout(legend=dict(x=0.1, y=0.1), margin=dict(b=5, l=5, r=20, t=10)) if display != "schedule" and display != "coupling": From 99a86a82532e73f328c06ad70babd42842564654 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 16:30:34 -0800 Subject: [PATCH 081/170] Added anneal time option for 680ns and 1280ns --- helpers/layouts_components.py | 2 ++ helpers/plots.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 90084da..031ef9a 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -41,6 +41,8 @@ {"label": "80 ns", "value": 80}, {"label": "160 ns", "value": 160}, {"label": "320 ns", "value": 320}, + {"label": "640 ns", "value": 640}, + {"label": "1280 ns", "value": 1280}, ], value=80, # default value style={"max-width": "95%"}, diff --git a/helpers/plots.py b/helpers/plots.py index 8bc3cd7..cc4d552 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -29,6 +29,8 @@ 80: "#9467BD", # Dark Purple 160: "#8C564B", # Brown 320: "#E377C2", # Dark Pink + 640: "#17BECF", # Teal + 1280: "#BCBD22", # Olive Green } coupling_color_theme = { -1.8: "#1F77B4", # Dark Blue From 547bed70208e65486981144c61cb194b48628216 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 17:24:51 -0800 Subject: [PATCH 082/170] Fixed the issure of x-axis label disappearing --- helpers/plots.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index cc4d552..a8738af 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -219,8 +219,8 @@ def plot_kink_densities_bg( elif display == "coupling": fig_layout = go.Layout( - xaxis=x_axis3, - yaxis=y_axis1, + xaxis3=x_axis3, + yaxis1=y_axis1, ) fig_data = [] @@ -389,6 +389,10 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): ), ) ) + fig.update_layout( + xaxis3=fig.layout.xaxis3, + yaxis1=fig.layout.yaxis1, + ) return fig if display == "kink_density": From 14c656b4a20c9f3aab11f43935828b6f0c56a949 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 17:30:08 -0800 Subject: [PATCH 083/170] Give a more reasonable bound to y-axis value on coupling plot --- helpers/plots.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/helpers/plots.py b/helpers/plots.py index a8738af..e4f8f7f 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -137,9 +137,13 @@ def plot_kink_densities_bg( range=[np.log10(time_range[0] - 1), np.log10(time_range[1] + 10)], ) + y_min = (0.9 * n).min() + y_max = (1.1 * n).max() + y_axis1 = dict( title="Kink Density", type="log", + range=[np.log10(y_min), np.log10(y_max)], ) x_axis2 = dict( From 2c6adab193efb552c79dbc4f50382367eec1deb3 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 17:34:21 -0800 Subject: [PATCH 084/170] Black reformat --- helpers/layouts_components.py | 12 ++++++++++-- helpers/plots.py | 23 ++++++++++++++++++++--- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 031ef9a..b973db9 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -52,9 +52,17 @@ id="kz_graph_display", options=[ # {"label": "Both", "value": "both", "disabled": False}, - {"label": "Kink density vs Anneal time", "value": "kink_density", "disabled": False}, + { + "label": "Kink density vs Anneal time", + "value": "kink_density", + "disabled": False, + }, # {"label": "Schedule", "value": "schedule", "disabled": False}, - {"label": "Kink density vs Noise level", "value": "coupling", "disabled": False}, + { + "label": "Kink density vs Noise level", + "value": "coupling", + "disabled": False, + }, ], value="coupling", inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, diff --git a/helpers/plots.py b/helpers/plots.py index e4f8f7f..9427a8b 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -41,7 +41,16 @@ -0.8: "#9467BD", # Dark Purple -0.6: "#8C564B", # Brown } -coupling_label = {-1.8: False,-1.6:False, -1.4:False,-1.2:False,-1:False, -0.8:False, -0.6:False} +coupling_label = { + -1.8: False, + -1.6: False, + -1.4: False, + -1.2: False, + -1: False, + -0.8: False, + -0.6: False, +} + def plot_kink_densities_bg( display, time_range, J_base, schedule_name, coupling_data, zne_estimates @@ -170,7 +179,15 @@ def plot_kink_densities_bg( xaxis=x_axis1, yaxis=y_axis1, ) - _coupling_label = {-1.8: False,-1.6:False, -1.4:False,-1.2:False,-1:False, -0.8:False, -0.6:False} + _coupling_label = { + -1.8: False, + -1.6: False, + -1.4: False, + -1.2: False, + -1: False, + -0.8: False, + -0.6: False, + } fig_data = [predicted_plus, predicted_minus] for ta_str, data_points in coupling_data.items(): for point in data_points: @@ -403,7 +420,7 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): color = coupling_color_theme[J] else: color = "black" - + if not coupling_label[J]: legend = True coupling_label[J] = True From 72db74d24eeb8c3c59b69476160729a884cfaef1 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 17:39:46 -0800 Subject: [PATCH 085/170] Change spins or qpu selection triggers the reset functionality --- app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index 1c958ab..4f1aaf0 100644 --- a/app.py +++ b/app.py @@ -277,7 +277,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Input("job_submit_state", "children"), Input("job_id", "children"), Input("anneal_duration", "value"), - State("spins", "value"), + Input("spins", "value"), State("embeddings_cached", "data"), State("sample_vs_theory", "figure"), State("coupling_data", "data"), # access previously stored data @@ -304,7 +304,7 @@ def display_graphics_kink_density( ta_min = 2 ta_max = 350 - if trigger_id == "btn_reset": + if trigger_id == "btn_reset" or trigger_id == "qpu_selection" or trigger_id == "spins": coupling_data = {} zne_estimates = {} From fa15ed3bcfb7ddab1f86d5ad2eae445e7ebbfe85 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 17:40:10 -0800 Subject: [PATCH 086/170] Black reformat --- app.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/app.py b/app.py index 4f1aaf0..277973d 100644 --- a/app.py +++ b/app.py @@ -304,7 +304,11 @@ def display_graphics_kink_density( ta_min = 2 ta_max = 350 - if trigger_id == "btn_reset" or trigger_id == "qpu_selection" or trigger_id == "spins": + if ( + trigger_id == "btn_reset" + or trigger_id == "qpu_selection" + or trigger_id == "spins" + ): coupling_data = {} zne_estimates = {} From 0437ff547b72fb375aede58bb7cc23a3ebbb58a7 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 18:10:26 -0800 Subject: [PATCH 087/170] Refactor the fitted line and ZNE plotting to plots.py --- app.py | 71 ++++------------------------------------------------------ 1 file changed, 4 insertions(+), 67 deletions(-) diff --git a/app.py b/app.py index 277973d..c5ea86f 100644 --- a/app.py +++ b/app.py @@ -363,73 +363,9 @@ def display_graphics_kink_density( coupling_data[ta_str].append( {"kappa": kappa, "kink_density": kink_density, "coupling_strength": J} ) - - if kz_graph_display == "coupling": - # Check if more than two data points exist for this anneal_time - if len(coupling_data[ta_str]) > 2: - # Perform a polynomial fit (e.g., linear) - - data_points = coupling_data[ta_str] - x = np.array([point["kappa"] for point in data_points]) - y = np.array([point["kink_density"] for point in data_points]) - - # Ensure there are enough unique x values for fitting - if len(np.unique(x)) > 1: - # Fit a 1st degree polynomial (linear fit) - if qpu_name == "mock_dwave_solver": - # Fancy non-linear function - y_func_x = fitted_function( - x, y, method="mixture_of_exponentials" - ) - else: - # Pure quadratic (see paper) # y = a + b x^2 - y_func_x = fitted_function(x, y, method="pure_quadratic") - - zne_estimates[ta_str] = y_func_x(0) - # Generate fit curve points - x_fit = np.linspace(0, max(x), 100) - y_fit = y_func_x(x_fit) - - # Remove existing fitting curve traces to prevent duplication - fig.data = [ - trace for trace in fig.data if trace.name != "Fitting Curve" - ] - # Remove existing ZNE Estimate traces to prevent duplication - fig.data = [ - trace for trace in fig.data if trace.name != "ZNE Estimate" - ] - - # Add the new fitting curve - fig.add_trace( - go.Scatter( - x=x_fit, - y=y_fit, - mode="lines", - name="Fitting Curve", - line=dict(color="green", dash="dash"), - showlegend=True, - xaxis="x3", - yaxis="y1", - ) - ) - - for ta_str, a in zne_estimates.items(): - # print(f'anneal itme: {ta_str}, a: {a}') - fig.add_trace( - # Add the ZNE point at kappa=0 - go.Scatter( - x=[0], - y=[a], - mode="markers", - name="ZNE Estimate", - marker=dict( - size=12, color="purple", symbol="diamond" - ), - showlegend=False, - xaxis="x3", - yaxis="y1", - ) - ) + + plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str) + return fig, coupling_data, zne_estimates @@ -722,6 +658,7 @@ def activate_tooltips(tooltips_show): dict(display="none"), dict(display="none"), dict(display="none"), + dict(display="none"), ) return ( From 38f110cd4031a40d46a46e977699d1fefa0533eb Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 18:11:26 -0800 Subject: [PATCH 088/170] Added function plot_zne_fitted_line. Should display zne point on both coupling and kink density plot but only display fitted line and coupling plot --- helpers/plots.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/helpers/plots.py b/helpers/plots.py index 9427a8b..5641e60 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -16,10 +16,11 @@ import numpy as np import pandas as pd import plotly.graph_objects as go +from helpers.qa import fitted_function from helpers.kz_calcs import theoretical_kink_density -__all__ = ["plot_kink_densities_bg", "plot_kink_density", "plot_spin_orientation"] +__all__ = ["plot_kink_densities_bg", "plot_kink_density", "plot_spin_orientation", "plot_zne_fitted_line"] ta_color_theme = { 5: "#1F77B4", # Dark Blue @@ -544,3 +545,74 @@ def plot_spin_orientation(num_spins=512, sample=None): ) return fig + +def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str): + + if len(coupling_data[ta_str]) > 2: + + data_points = coupling_data[ta_str] + x = np.array([point["kappa"] for point in data_points]) + y = np.array([point["kink_density"] for point in data_points]) + + # Ensure there are enough unique x values for fitting + if len(np.unique(x)) > 1: + # Fit a 1st degree polynomial (linear fit) + if qpu_name == "mock_dwave_solver": + # Fancy non-linear function + y_func_x = fitted_function( + x, y, method="mixture_of_exponentials" + ) + else: + # Pure quadratic (see paper) # y = a + b x^2 + y_func_x = fitted_function(x, y, method="pure_quadratic") + + zne_estimates[ta_str] = y_func_x(0) + # Generate fit curve points + x_fit = np.linspace(0, max(x), 100) + y_fit = y_func_x(x_fit) + + # Remove existing fitting curve traces to prevent duplication + fig.data = [ + trace for trace in fig.data if trace.name != "Fitting Curve" + ] + # Remove existing ZNE Estimate traces to prevent duplication + fig.data = [ + trace for trace in fig.data if trace.name != "ZNE Estimate" + ] + + if kz_graph_display == "coupling": + x_axis = "x3" + y_axis = "y1" + _x = [0] + # Add the new fitting curve + fig.add_trace( + go.Scatter( + x=x_fit, + y=y_fit, + mode="lines", + name="Fitting Curve", + line=dict(color="green", dash="dash"), + showlegend=True, + xaxis=x_axis, + yaxis=y_axis, + ) + ) + else: + x_axis = "x1" + y_axis = "y1" + _x = [ta_str] + for ta_str, a in zne_estimates.items(): + fig.add_trace( + go.Scatter( + x=_x, + y=[a], + mode="markers", + name="ZNE Estimate", + marker=dict( + size=12, color="purple", symbol="diamond" + ), + showlegend=False, + xaxis=x_axis, + yaxis=y_axis, + ) + ) \ No newline at end of file From 12f110b652c5ba65fe27e27ffa8c364570e88bd9 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 18:24:57 -0800 Subject: [PATCH 089/170] Add consistent display of previously calculated ane points --- helpers/plots.py | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 5641e60..5f44913 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -573,17 +573,19 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_d # Remove existing fitting curve traces to prevent duplication fig.data = [ - trace for trace in fig.data if trace.name != "Fitting Curve" + trace for trace in fig.data + if not (trace.name == "Fitting Curve" and trace.legendgroup == f"ta_{ta_str}") ] # Remove existing ZNE Estimate traces to prevent duplication fig.data = [ - trace for trace in fig.data if trace.name != "ZNE Estimate" + trace for trace in fig.data + if not (trace.name == "ZNE Estimate" and trace.legendgroup == f"ta_{ta_str}") ] if kz_graph_display == "coupling": x_axis = "x3" y_axis = "y1" - _x = [0] + x_zne = 0 # Add the new fitting curve fig.add_trace( go.Scatter( @@ -591,6 +593,7 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_d y=y_fit, mode="lines", name="Fitting Curve", + legendgroup=f"ta_{ta_str}", line=dict(color="green", dash="dash"), showlegend=True, xaxis=x_axis, @@ -600,19 +603,21 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_d else: x_axis = "x1" y_axis = "y1" - _x = [ta_str] - for ta_str, a in zne_estimates.items(): - fig.add_trace( - go.Scatter( - x=_x, - y=[a], - mode="markers", - name="ZNE Estimate", - marker=dict( - size=12, color="purple", symbol="diamond" - ), - showlegend=False, - xaxis=x_axis, - yaxis=y_axis, - ) - ) \ No newline at end of file + x_zne = float(ta_str) + # for ta_str, a in zne_estimates.items(): + fig.add_trace( + go.Scatter( + x=[x_zne], + y=[zne_estimates[ta_str]], + mode="markers", + name="ZNE Estimate", + legendgroup=f"ta_{ta_str}", + marker=dict( + size=12, color="purple", symbol="diamond" + ), + showlegend=False, + xaxis=x_axis, + yaxis=y_axis, + ) + ) + return zne_estimates \ No newline at end of file From 316431278517d264dfc6632735dfd6a2ccfa306d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 3 Dec 2024 18:25:34 -0800 Subject: [PATCH 090/170] Black reformat --- app.py | 7 ++++--- helpers/plots.py | 41 ++++++++++++++++++++++++++--------------- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/app.py b/app.py index c5ea86f..b7cb240 100644 --- a/app.py +++ b/app.py @@ -363,9 +363,10 @@ def display_graphics_kink_density( coupling_data[ta_str].append( {"kappa": kappa, "kink_density": kink_density, "coupling_strength": J} ) - - plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str) - + + zne_estimates = plot_zne_fitted_line( + fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str + ) return fig, coupling_data, zne_estimates diff --git a/helpers/plots.py b/helpers/plots.py index 5f44913..e07770c 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -20,7 +20,12 @@ from helpers.kz_calcs import theoretical_kink_density -__all__ = ["plot_kink_densities_bg", "plot_kink_density", "plot_spin_orientation", "plot_zne_fitted_line"] +__all__ = [ + "plot_kink_densities_bg", + "plot_kink_density", + "plot_spin_orientation", + "plot_zne_fitted_line", +] ta_color_theme = { 5: "#1F77B4", # Dark Blue @@ -546,8 +551,11 @@ def plot_spin_orientation(num_spins=512, sample=None): return fig -def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str): - + +def plot_zne_fitted_line( + fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str +): + if len(coupling_data[ta_str]) > 2: data_points = coupling_data[ta_str] @@ -559,9 +567,7 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_d # Fit a 1st degree polynomial (linear fit) if qpu_name == "mock_dwave_solver": # Fancy non-linear function - y_func_x = fitted_function( - x, y, method="mixture_of_exponentials" - ) + y_func_x = fitted_function(x, y, method="mixture_of_exponentials") else: # Pure quadratic (see paper) # y = a + b x^2 y_func_x = fitted_function(x, y, method="pure_quadratic") @@ -573,13 +579,20 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_d # Remove existing fitting curve traces to prevent duplication fig.data = [ - trace for trace in fig.data - if not (trace.name == "Fitting Curve" and trace.legendgroup == f"ta_{ta_str}") + trace + for trace in fig.data + if not ( + trace.name == "Fitting Curve" + and trace.legendgroup == f"ta_{ta_str}" + ) ] # Remove existing ZNE Estimate traces to prevent duplication fig.data = [ - trace for trace in fig.data - if not (trace.name == "ZNE Estimate" and trace.legendgroup == f"ta_{ta_str}") + trace + for trace in fig.data + if not ( + trace.name == "ZNE Estimate" and trace.legendgroup == f"ta_{ta_str}" + ) ] if kz_graph_display == "coupling": @@ -612,12 +625,10 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, kz_graph_d mode="markers", name="ZNE Estimate", legendgroup=f"ta_{ta_str}", - marker=dict( - size=12, color="purple", symbol="diamond" - ), + marker=dict(size=12, color="purple", symbol="diamond"), showlegend=False, xaxis=x_axis, yaxis=y_axis, ) - ) - return zne_estimates \ No newline at end of file + ) + return zne_estimates From 23ccbea819eb247592dc0083723bc742db15d3f1 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 11:51:05 -0800 Subject: [PATCH 091/170] Change label on x-axis --- helpers/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/plots.py b/helpers/plots.py index e07770c..ec51528 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -179,7 +179,7 @@ def plot_kink_densities_bg( type="linear", ) - x_axis3 = dict(title="kappa", type="linear", range=[-3, 1.5]) + x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[-3, 1.5]) if display == "kink_density": fig_layout = go.Layout( xaxis=x_axis1, From 033aa5d58ff20921d62a98b6e30440bbc2154490 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 11:54:02 -0800 Subject: [PATCH 092/170] Add explanation for fitting line and -1.8 in description --- helpers/layouts_cards.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index b147374..a5aa88f 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -59,7 +59,7 @@ def control_card(solvers={}, init_job_status="READY"): ), html.P( """ -Simulate zero-temperature and zero-time extrapolations on a quantum computer, leveraging the Kibble-Zurek mechanism. +Simulate zero-temperature and zero-time extrapolations on a quantum computer using the Kibble-Zurek mechanism. Fitting occurs once three or more data points are plotted, with -1.8 representing the highest energy scale corresponding to the lowest noise level. """, style={"color": "white", "fontSize": 14}, ), From 3b044dd06b6f3674605a91e53314e0ee1531478d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 11:56:05 -0800 Subject: [PATCH 093/170] Add J to coupling strength widget --- helpers/layouts_cards.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index a5aa88f..edac6de 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -65,7 +65,7 @@ def control_card(solvers={}, init_job_status="READY"): ), html.H5("Spins", style=control_header_style), html.Div([config_spins]), - html.H5("Coupling Strength", style=control_header_style), + html.H5("Coupling Strength (J)", style=control_header_style), html.Div([config_coupling_strength]), html.H5("Quench Duration [ns]", style=control_header_style), html.Div([config_anneal_duration]), From c567e82aa1ed9a7819edf246a6947f95867283f2 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 12:02:06 -0800 Subject: [PATCH 094/170] Added hyperlink with reference to the paper --- helpers/layouts_cards.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index edac6de..402802e 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -58,14 +58,28 @@ def control_card(solvers={}, init_job_status="READY"): style={"color": "rgb(243, 120, 32)"}, ), html.P( - """ -Simulate zero-temperature and zero-time extrapolations on a quantum computer using the Kibble-Zurek mechanism. Fitting occurs once three or more data points are plotted, with -1.8 representing the highest energy scale corresponding to the lowest noise level. + [ + """ +Simulate zero-temperature and zero-time extrapolations on a quantum computer using the Kibble-Zurek mechanism. Fitting occurs once three or more data points are plotted, with -1.8 representing the highest energy scale corresponding to the lowest noise level. Learn more in the """, + html.A( + "paper", + href="https://arxiv.org/abs/2311.01306", # Replace with the actual URL + target="_blank", # Opens the link in a new tab + style={ + "color": "rgb(3, 184, 255)", + "textDecoration": "none", + }, # Optional styling + ), + ".", + ], style={"color": "white", "fontSize": 14}, ), html.H5("Spins", style=control_header_style), html.Div([config_spins]), - html.H5("Coupling Strength (J)", style=control_header_style), + html.H5( + "Coupling Strength (J)", style=control_header_style + ), html.Div([config_coupling_strength]), html.H5("Quench Duration [ns]", style=control_header_style), html.Div([config_anneal_duration]), From 9612ec140b4053e39a19eb1d0a80a36ddb016d4e Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 12:02:17 -0800 Subject: [PATCH 095/170] Minor reformat --- helpers/kz_calcs.py | 61 ++++++++++++++++++++++++--------------------- helpers/tooltips.py | 32 ++++++++---------------- 2 files changed, 43 insertions(+), 50 deletions(-) diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 9ff0d06..6a67246 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -14,54 +14,57 @@ import numpy as np -__all__ = ['kink_stats', 'theoretical_kink_density'] +__all__ = ["kink_stats", "theoretical_kink_density"] + def theoretical_kink_density(annealing_times_ns, J, schedule, schedule_name): """ - Calculate the kink density predicted for given the coupling strength and annealing times. + Calculate the kink density predicted for given the coupling strength and annealing times. Args: annealing_times_ns: Iterable of annealing times, in nanoseconds. J: Coupling strength between the spins of the ring. - schedule: Anneal schedule for the selected QPU. + schedule: Anneal schedule for the selected QPU. - schedule_name: Filename of anneal schedule. Used to compensate for schedule energy - overestimate. + schedule_name: Filename of anneal schedule. Used to compensate for schedule energy + overestimate. Returns: - Kink density per anneal time, as a NumPy array. + Kink density per anneal time, as a NumPy array. """ # See the Code section of the README.md file for an explanation of the - # following code. + # following code. + + COMPENSATION_SCHEDULE_ENERGY = 0.8 if "Advantage_system" in schedule_name else 1.0 - COMPENSATION_SCHEDULE_ENERGY = 0.8 if 'Advantage_system' in schedule_name else 1.0 + A = COMPENSATION_SCHEDULE_ENERGY * schedule["A(s) (GHz)"] + B = COMPENSATION_SCHEDULE_ENERGY * schedule["B(s) (GHz)"] + s = schedule["s"] - A = COMPENSATION_SCHEDULE_ENERGY * schedule['A(s) (GHz)'] - B = COMPENSATION_SCHEDULE_ENERGY * schedule['B(s) (GHz)'] - s = schedule['s'] + A_tag = A.diff() / s.diff() # Derivatives of the energies for fast anneal + B_tag = B.diff() / s.diff() - A_tag = A.diff()/s.diff() # Derivatives of the energies for fast anneal - B_tag = B.diff()/s.diff() + sc_indx = abs(A - B * abs(J)).idxmin() # Anneal fraction, s, at the critical point - sc_indx = abs(A - B*abs(J)).idxmin() # Anneal fraction, s, at the critical point + b_numerator = 1e9 * np.pi * A[sc_indx] # D-Wave's schedules are in GHz + b_denominator = B_tag[sc_indx] / B[sc_indx] - A_tag[sc_indx] / A[sc_indx] + b = b_numerator / b_denominator - - b_numerator = 1e9 * np.pi * A[sc_indx] # D-Wave's schedules are in GHz - b_denominator = B_tag[sc_indx]/B[sc_indx] - A_tag[sc_indx]/A[sc_indx] - b = b_numerator / b_denominator + return np.power([1e-9 * t for t in annealing_times_ns], -0.5) / ( + 2 * np.pi * np.sqrt(2 * b) + ) - return np.power([1e-9 * t for t in annealing_times_ns], -0.5) / (2 * np.pi * np.sqrt(2 * b)) def kink_stats(sampleset, J): """ - Calculate kink density for the sample set. + Calculate kink density for the sample set. Calculation is the number of sign switches per sample divided by the length of the ring for ferromagnetic coupling. For anti-ferromagnetic coupling, - kinks are any pairs of identically-oriented spins. + kinks are any pairs of identically-oriented spins. Args: sampleset: dimod sample set. @@ -69,22 +72,22 @@ def kink_stats(sampleset, J): J: Coupling strength between the spins of the ring. Returns: - Switches/non-switches per sample and the average kink density across - all samples. + Switches/non-switches per sample and the average kink density across + all samples. """ samples_array = sampleset.record.sample - sign_switches = np.diff(samples_array, - prepend=samples_array[:,-1].reshape(len(samples_array), 1)) - + sign_switches = np.diff( + samples_array, prepend=samples_array[:, -1].reshape(len(samples_array), 1) + ) + if J < 0: switches_per_sample = np.count_nonzero(sign_switches, 1) kink_density = np.mean(switches_per_sample) / sampleset.record.sample.shape[1] return switches_per_sample, kink_density - - non_switches_per_sample = np.count_nonzero(sign_switches==0, 1) + + non_switches_per_sample = np.count_nonzero(sign_switches == 0, 1) kink_density = np.mean(non_switches_per_sample) / sampleset.record.sample.shape[1] return non_switches_per_sample, kink_density - \ No newline at end of file diff --git a/helpers/tooltips.py b/helpers/tooltips.py index 1f1b8e5..66df7f5 100644 --- a/helpers/tooltips.py +++ b/helpers/tooltips.py @@ -13,35 +13,25 @@ # limitations under the License. tool_tips = { - "anneal_duration": -f"""Duration of the quantum anneal. Range of 5 to 320 nanoseconds.""", - "kz_graph_display": -f"""Plot selection: Defects vs anneal duration or defects vs noise level""", - "spins": -f"""Number of spins in the 1D ring.""", - "coupling_strength": -f"""Coupling strength, J, between spins in the ferromagnetic ring. + "anneal_duration": f"""Duration of the quantum anneal. Range of 5 to 320 nanoseconds.""", + "kz_graph_display": f"""Plot selection: Defects vs anneal duration or defects vs noise level""", + "spins": f"""Number of spins in the 1D ring.""", + "coupling_strength": f"""Coupling strength, J, between spins in the ferromagnetic ring. Range of -1.8 to -0.6. """, - "qpu_selection": -f"""Selection from quantum computers available to your account/project token.""", - "embedding_is_cached": -f"""Whether or not a minor-embedding is cached for the selected QPU, for each + "qpu_selection": f"""Selection from quantum computers available to your account/project token.""", + "embedding_is_cached": f"""Whether or not a minor-embedding is cached for the selected QPU, for each of the available number of spins. If not available, an attempt is made to find an embedding the first time you submit a problem. """, - "btn_simulate": -f"""Click to (minor-embed if a cached embedding is unavailable) and + "btn_simulate": f"""Click to (minor-embed if a cached embedding is unavailable) and submit the problem to your selected QPU. """, - "quench_schedule_filename": -f"""CSV file with the fast-anneal schedule for the selected quantum computer. + "quench_schedule_filename": f"""CSV file with the fast-anneal schedule for the selected quantum computer. If none exists, uses one from a different quantum computer (expect inaccuracies). You can download schedules from https://docs.dwavesys.com/docs/latest/doc_physical_properties.html """, - "job_submit_state": -f"""Status of the last submission to the quantum computer (or initial state).""", - "btn_reset": -f"""Clear all existing data stored for the current run and reset all plots.""", -} \ No newline at end of file + "job_submit_state": f"""Status of the last submission to the quantum computer (or initial state).""", + "btn_reset": f"""Clear all existing data stored for the current run and reset all plots.""", +} From 69f397fa38440156dc8ef1496cff894b77383ccb Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 12:28:26 -0800 Subject: [PATCH 096/170] Modify the fitting_function to return None is fitting fails --- helpers/qa.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/helpers/qa.py b/helpers/qa.py index 0ee52c6..64bc76a 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -216,8 +216,7 @@ def mixture_of_exponentials(x, p_0, p_1, p_2): f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0 ) except: - warnings.warn("Should modify to check exception is no solution") - p = p0 + return None def y_func_x(x): return mixture_of_exponentials(x, *p) @@ -257,8 +256,7 @@ def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): f=sigmoidal_crossover, xdata=xdata, ydata=ydata, p0=p0 ) except: - warnings.warn("Should modify to check exception is no solution") - p = p0 + return None def y_func_x(x): return sigmoidal_crossover(x, *p) From fe357302b0a083fdd0e4401269b72e9586d18c76 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 12:47:35 -0800 Subject: [PATCH 097/170] Catch the error of fitting failed and do not displya fitting line or zne point --- helpers/plots.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index ec51528..d642aaa 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -571,12 +571,13 @@ def plot_zne_fitted_line( else: # Pure quadratic (see paper) # y = a + b x^2 y_func_x = fitted_function(x, y, method="pure_quadratic") - - zne_estimates[ta_str] = y_func_x(0) - # Generate fit curve points - x_fit = np.linspace(0, max(x), 100) - y_fit = y_func_x(x_fit) - + + if y_func_x is not None: + zne_estimates[ta_str] = y_func_x(0) + x_fit = np.linspace(0, max(x), 100) + y_fit = y_func_x(x_fit) + + # Remove existing fitting curve traces to prevent duplication fig.data = [ trace @@ -595,7 +596,7 @@ def plot_zne_fitted_line( ) ] - if kz_graph_display == "coupling": + if kz_graph_display == "coupling" and y_func_x is not None: x_axis = "x3" y_axis = "y1" x_zne = 0 @@ -618,17 +619,18 @@ def plot_zne_fitted_line( y_axis = "y1" x_zne = float(ta_str) # for ta_str, a in zne_estimates.items(): - fig.add_trace( - go.Scatter( - x=[x_zne], - y=[zne_estimates[ta_str]], - mode="markers", - name="ZNE Estimate", - legendgroup=f"ta_{ta_str}", - marker=dict(size=12, color="purple", symbol="diamond"), - showlegend=False, - xaxis=x_axis, - yaxis=y_axis, + if y_func_x is not None: + fig.add_trace( + go.Scatter( + x=[x_zne], + y=[zne_estimates[ta_str]], + mode="markers", + name="ZNE Estimate", + legendgroup=f"ta_{ta_str}", + marker=dict(size=12, color="purple", symbol="diamond"), + showlegend=False, + xaxis=x_axis, + yaxis=y_axis, + ) ) - ) return zne_estimates From 63700ad6d11836a75bd8d8ef6df6009b84e6d6e7 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 12:48:07 -0800 Subject: [PATCH 098/170] minor reformat --- helpers/plots.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index d642aaa..3860a97 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -571,13 +571,12 @@ def plot_zne_fitted_line( else: # Pure quadratic (see paper) # y = a + b x^2 y_func_x = fitted_function(x, y, method="pure_quadratic") - + if y_func_x is not None: zne_estimates[ta_str] = y_func_x(0) x_fit = np.linspace(0, max(x), 100) y_fit = y_func_x(x_fit) - - + # Remove existing fitting curve traces to prevent duplication fig.data = [ trace From 4e22922bb98e3e4a41cf26fcc575de7754acb727 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 13:11:55 -0800 Subject: [PATCH 099/170] Add modal to warn user of fitting failure --- app.py | 33 ++++++++++++++++++++++++++++----- helpers/plots.py | 7 ++++--- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/app.py b/app.py index b7cb240..730e35f 100644 --- a/app.py +++ b/app.py @@ -111,6 +111,17 @@ dcc.Store(id="coupling_data", data={}), # store zero noise extrapolation dcc.Store(id="zne_estimates", data={}), + dcc.Store(id="modal_trigger", data=False), + dbc.Modal( + [ + dbc.ModalHeader(dbc.ModalTitle("Error")), + dbc.ModalBody( + "Fitting function failed likely due to ill data, please collect more." + ), + ], + id="error-modal", + is_open=False, + ), ], fluid=True, ) @@ -269,6 +280,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output("sample_vs_theory", "figure"), Output("coupling_data", "data"), # store data using dcc Output("zne_estimates", "data"), # update zne_estimates + Output("modal_trigger", "data"), Input("btn_reset", "n_clicks"), Input("qpu_selection", "value"), Input("kz_graph_display", "value"), @@ -321,7 +333,7 @@ def display_graphics_kink_density( zne_estimates, ) - return fig, coupling_data, zne_estimates + return fig, coupling_data, zne_estimates, False if trigger_id in [ "kz_graph_display", @@ -338,7 +350,7 @@ def display_graphics_kink_density( zne_estimates, ) - return fig, coupling_data, zne_estimates + return fig, coupling_data, zne_estimates, False if trigger_id == "job_submit_state": @@ -364,11 +376,11 @@ def display_graphics_kink_density( {"kappa": kappa, "kink_density": kink_density, "coupling_strength": J} ) - zne_estimates = plot_zne_fitted_line( + zne_estimates, modal_trigger = plot_zne_fitted_line( fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str ) - return fig, coupling_data, zne_estimates + return fig, coupling_data, zne_estimates, modal_trigger else: return dash.no_update @@ -382,7 +394,7 @@ def display_graphics_kink_density( coupling_data, zne_estimates, ) - return fig, coupling_data, zne_estimates + return fig, coupling_data, zne_estimates, False @app.callback( @@ -676,5 +688,16 @@ def activate_tooltips(tooltips_show): ) +@app.callback( + Output("error-modal", "is_open"), + Input("modal_trigger", "data"), + State("error-modal", "is_open"), +) +def toggle_modal(trigger, is_open): + if trigger: + return True + return is_open + + if __name__ == "__main__": app.run_server(debug=True) diff --git a/helpers/plots.py b/helpers/plots.py index 3860a97..df20fd6 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -555,7 +555,7 @@ def plot_spin_orientation(num_spins=512, sample=None): def plot_zne_fitted_line( fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str ): - + modal_trigger = False if len(coupling_data[ta_str]) > 2: data_points = coupling_data[ta_str] @@ -576,7 +576,8 @@ def plot_zne_fitted_line( zne_estimates[ta_str] = y_func_x(0) x_fit = np.linspace(0, max(x), 100) y_fit = y_func_x(x_fit) - + else: + modal_trigger = True # Remove existing fitting curve traces to prevent duplication fig.data = [ trace @@ -632,4 +633,4 @@ def plot_zne_fitted_line( yaxis=y_axis, ) ) - return zne_estimates + return zne_estimates, modal_trigger From 255f7c7ac317d4a3c165ac412fa6fa39ad543de3 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 13:14:02 -0800 Subject: [PATCH 100/170] Minor fix --- app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app.py b/app.py index 730e35f..4a89967 100644 --- a/app.py +++ b/app.py @@ -116,7 +116,7 @@ [ dbc.ModalHeader(dbc.ModalTitle("Error")), dbc.ModalBody( - "Fitting function failed likely due to ill data, please collect more." + "Fitting function failed likely due to ill conditioned data, please collect more." ), ], id="error-modal", From 40af557d59127cb898e9b54438a3f9a8947a0459 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 14:34:49 -0800 Subject: [PATCH 101/170] Add documentation for fitted_function --- helpers/qa.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/helpers/qa.py b/helpers/qa.py index 64bc76a..d145f86 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -179,7 +179,25 @@ def json_to_dict(emb_json): def fitted_function(xdata, ydata, method=("polynomial", 1)): - """ """ + """ + Generate a fitting function based on the provided data points and method. + + Args: + xdata: Array-like, independent variable data points. + ydata: Array-like, dependent variable data points. + method: Tuple or string specifying the fitting method. Options include: + - ("polynomial", deg): Fits a polynomial of degree `deg`. + - "pure_quadratic": Fits a pure quadratic model, y = a + b*x^2. + - "mixture_of_exponentials": Fits a mixture of exponential functions. + - "sigmoidal_crossover": Fits a sigmoidal crossover model. + + Returns: + Callable function that takes a single argument `x` and returns the fitted value. + Returns `None` if the fitting process fails. + + Raises: + ValueError: If the specified method is unknown. + """ if type(method) is tuple and method[0] == "polynomial": coeffs = Polynomial.fit(xdata, ydata, deg=method[1]).convert().coef From e30e5019f9ff97b6c7d8fffaa57f961230affc07 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 6 Dec 2024 15:20:06 -0800 Subject: [PATCH 102/170] Updated documentation --- helpers/plots.py | 111 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 86 insertions(+), 25 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index df20fd6..e2ac55d 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -62,20 +62,34 @@ def plot_kink_densities_bg( display, time_range, J_base, schedule_name, coupling_data, zne_estimates ): """ - Plot background of theoretical kink-density and QPU energy scales. + Plot the background of theoretical kink density and QPU energy scales. - Args: - - display: Displays plots of type "both", "kink_density", or "schedule". - - time_range: Maximum and minimum quench times, as a list. + This function generates a Plotly figure that displays the theoretical + predictions for kink densities along with QPU energy scales based on + the provided anneal schedule. It supports different display modes + such as "both", "kink_density", "schedule", and "coupling". - coupling_strength: Coupling strength between spins in ring. - - schedule_name: Filename of anneal schedule. + Args: + display (str): The type of plot to display. Options are: + - "both": Display both kink density and schedule. + - "kink_density": Display only the kink density plot. + - "schedule": Display only the anneal schedule. + - "coupling": Display coupling-related plots. + time_range (list of float): A list containing the minimum and maximum + quench times [min_quench_time, max_quench_time] in nanoseconds. + J_base (float): The base coupling strength between spins in the ring. + schedule_name (str): The filename of the anneal schedule CSV file. + If not provided, a fallback schedule is used. + coupling_data (dict): A dictionary containing coupling-related data + structured as {ta_str: [data_points]}, where each data point + includes "coupling_strength" and "kink_density". + zne_estimates (dict): A dictionary to store Zero-Noise Extrapolation + (ZNE) estimates structured as {ta_str: estimate}. Returns: - Plotly figure of predicted kink densities and/or QPU energy scales. + plotly.graph_objs.Figure: A Plotly figure object containing the + predicted kink densities and/or QPU energy scales based on the + specified display mode. """ if schedule_name: schedule = pd.read_csv(f"helpers/{schedule_name}") @@ -377,20 +391,29 @@ def plot_kink_densities_bg( def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): - """Add kink density from QPU samples to plot. - - Args: - - display: Displays plots of type "both", "kink_density", or "schedule". - - fig_dict: Existing background Plotly figure, as a dict. + """ + Add a kink density marker from QPU samples to an existing plot. - kink_density: Calculated kink density derived from QPU sample set. + Depending on the display mode, this function updates the provided + Plotly figure with a new marker representing the calculated kink + density at a specific anneal time and coupling strength. - anneal_time: Anneal time used for the kink density. + Args: + display (str): The type of plot to display. Options are: + - "both": Display both kink density and schedule. + - "kink_density": Display only the kink density plot. + - "schedule": Display only the anneal schedule. + - "coupling": Display coupling-related plots. + fig_dict (dict): The existing background Plotly figure as a dictionary. + kink_density (float): The calculated kink density derived from QPU samples. + anneal_time (str or float): The anneal time corresponding to the kink density. + It can be a string identifier or a numerical value in nanoseconds. + J (float): The coupling strength associated with the kink density. Returns: - Updated Plotly figure with a marker at (anneal time, kink-density). + plotly.graph_objs.Figure or dash.no_update: + - If display is "schedule", returns `no_update` indicating no changes. + - Otherwise, returns the updated Plotly figure with the new kink density marker. """ if display == "schedule": return no_update @@ -454,15 +477,25 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): def plot_spin_orientation(num_spins=512, sample=None): - """Plot the ring of spins. + """ + Visualize the orientation of spins in a ring using 3D cones. - Args: - num_spins: Number of spins in the ring. + This function generates a 3D Plotly figure representing the orientation + of spins arranged in a ring. Each spin is depicted as a cone pointing + upwards or downwards based on the provided sample. If no sample is + provided, all spins are shown pointing upwards by default. - sample: Single sample from a sample set. + Args: + num_spins (int, optional): The total number of spins in the ring. + Defaults to 512. + sample (array-like, optional): A single sample from a sample set + indicating spin orientations. Each element should be: + - `1` for spin up. + - `-1` or any other value for spin down. + If `None`, all spins are assumed to be up. Returns: - Plotly figure of orientation for all spins in the ring. + plotly.graph_objs.Figure: A 3D Plotly figure displaying the spin orientations. """ cone_size = 0.5 # Based on how it looks @@ -555,6 +588,34 @@ def plot_spin_orientation(num_spins=512, sample=None): def plot_zne_fitted_line( fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str ): + """ + Fit a curve to the coupling data and plot the Zero-Noise Extrapolation (ZNE) estimate. + + This function performs curve fitting on the provided coupling data for a + specific anneal time and adds the fitted curve along with the ZNE estimate + to the existing Plotly figure. The fitting method varies based on the QPU + used. It also handles the removal of any existing fitting curves to avoid + duplication. + + Args: + fig (plotly.graph_objs.Figure): The existing Plotly figure to update. + coupling_data (dict): A dictionary containing coupling-related data + structured as {ta_str: [data_points]}, where each data point + includes "kappa" and "kink_density". + qpu_name (str): The name of the Quantum Processing Unit (QPU) used. + Determines the fitting method. + zne_estimates (dict): A dictionary to store Zero-Noise Extrapolation + (ZNE) estimates structured as {ta_str: estimate}. + kz_graph_display (str): The type of graph display. Typically aligns with + the `display` parameter in other functions, such as "coupling". + ta_str (str): The anneal time identifier as a string. + + Returns: + tuple: + - zne_estimates (dict): Updated dictionary with new ZNE estimates. + - modal_trigger (bool): A flag indicating whether a modal was triggered + due to ill conditioned data for fitting. + """ modal_trigger = False if len(coupling_data[ta_str]) > 2: From 0d488bdc64fc64255e18faa318c86a387eda01f3 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 10 Dec 2024 15:55:30 -0800 Subject: [PATCH 103/170] Added navbar to app.py and separated corresponding title and description for two demos --- app.py | 171 +++++++++++++++++++++++++++------------ helpers/layouts_cards.py | 38 ++++++--- helpers/plots.py | 2 +- 3 files changed, 146 insertions(+), 65 deletions(-) diff --git a/app.py b/app.py index 4a89967..d3c90f1 100644 --- a/app.py +++ b/app.py @@ -64,64 +64,118 @@ client = "dummy" -# Dashboard-organization section -app.layout = dbc.Container( - [ - dbc.Row( - [ # Top: logo - dbc.Col( - [ - html.Img( - src="assets/dwave_logo.png", - height="25px", - style={"textAlign": "left", "margin": "10px 0px 15px 0px"}, - ) - ], - width=3, - ) - ] - ), - dbc.Row( - [ - dbc.Col( # Left: control panel - [ - control_card(solvers=qpus, init_job_status=init_job_status), - *dbc_modal("modal_solver"), - *[ - dbc.Tooltip( - message, - target=target, - id=f"tooltip_{target}", - style=dict(), - ) - for target, message in tool_tips.items() +def demo1_layout(): + return dbc.Container( + [ + dbc.Row( + [ + dbc.Col( # Left: control panel + [ + control_card(solvers=qpus, init_job_status=init_job_status, demo_type="Kibble-Zurek"), + *dbc_modal("modal_solver"), + *[ + dbc.Tooltip( + message, + target=target, + id=f"tooltip_{target}", + style=dict(), + ) + for target, message in tool_tips.items() + ], ], + width=4, + style={"minWidth": "30rem"}, + ), + dbc.Col( # Right: display area + graphs_card(), + width=8, + style={"minWidth": "60rem"}, + ), + ] + ), + # store coupling data points + dcc.Store(id="coupling_data", data={}), + # store zero noise extrapolation + dcc.Store(id="zne_estimates", data={}), + dcc.Store(id="modal_trigger", data=False), + dbc.Modal( + [ + dbc.ModalHeader(dbc.ModalTitle("Error")), + dbc.ModalBody( + "Fitting function failed likely due to ill conditioned data, please collect more." + ), + ], + id="error-modal", + is_open=False, + ), + ], + fluid=True, + ) + +def demo2_layout(): + return dbc.Container([ + dbc.Row([ + dbc.Col( # Left: control panel + [ + control_card( + solvers=qpus, + init_job_status=init_job_status, + demo_type="Zero-Noise" + ), + *dbc_modal('modal_solver'), + *[dbc.Tooltip( + message, target=target, id=f'tooltip_{target}', style = dict()) + for target, message in tool_tips.items()] ], width=4, - style={"minWidth": "30rem"}, + style={'minWidth': "30rem"}, ), - dbc.Col( # Right: display area + dbc.Col( # Right: display area graphs_card(), width=8, - style={"minWidth": "60rem"}, + style={'minWidth': "60rem"}, ), - ] - ), - # store coupling data points - dcc.Store(id="coupling_data", data={}), - # store zero noise extrapolation - dcc.Store(id="zne_estimates", data={}), - dcc.Store(id="modal_trigger", data=False), - dbc.Modal( - [ - dbc.ModalHeader(dbc.ModalTitle("Error")), - dbc.ModalBody( - "Fitting function failed likely due to ill conditioned data, please collect more." - ), - ], - id="error-modal", - is_open=False, - ), + ]), + ], + fluid=True, + ) + +# Define the Navbar with two tabs +navbar = dbc.Navbar( + dbc.Container( + [ + # Navbar Brand/Logo + dbc.NavbarBrand( + [ + html.Img( + src="assets/dwave_logo.png", + height="30px", + style={"margin-right": "10px"}, + ), + ], + href="/demo1", # Default route + ), + + # Navbar Tabs + dbc.Nav( + [ + dbc.NavItem(dbc.NavLink("Demo 1", href="/demo1", active="exact")), + dbc.NavItem(dbc.NavLink("Demo 2", href="/demo2", active="exact")), + ], + pills=True, + ), + ] + ), + color="dark", + dark=True, + sticky="top", +) + +app.layout = dbc.Container( + [ + dcc.Location(id="url", refresh=False), # Tracks the URL + navbar, # Includes the Navbar at the top + html.Div(id="page-content", style={"paddingTop": "20px"}), # Dynamic page content ], fluid=True, ) @@ -131,6 +185,21 @@ # Callbacks Section +@app.callback( + Output("page-content", "children"), + Input("url", "pathname") +) +def display_page(pathname): + # If the user goes to the "/demo1" route + if pathname == "/demo1": + return demo1_layout() + # If the user goes to the "/demo2" route + elif pathname == "/demo2": + return demo2_layout() + # Default fallback if no path matches: + else: + return demo1_layout() # or redirect to a "404" or default page + @app.callback( Output("solver_modal", "is_open"), diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index 402802e..d970744 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -27,7 +27,7 @@ control_header_style = {"color": "rgb(3, 184, 255)", "marginTop": "10px"} -def control_card(solvers={}, init_job_status="READY"): +def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek"): """Lay out the configuration and job-submission card. Args: @@ -46,18 +46,17 @@ def control_card(solvers={}, init_job_status="READY"): else: job_status_color = "white" - return dbc.Card( - [ - dbc.Row( - [ - dbc.Col( - [ - html.H4( - "Coherent Annealing: Zero-Noise Extrapolation", - className="card-title", - style={"color": "rgb(243, 120, 32)"}, - ), - html.P( + if demo_type == 'Kibble-Zurek': + demo_title = 'Coherent Annealing: KZ Simulation' + demo_description = html.P( + """ + Use a quantum computer to simulate the formation of topological defects in a 1D ring + of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. + """, + style={'color': 'white', 'fontSize': 14}) + else: + demo_title = "Coherent Annealing: Zero-Noise Extrapolation" + demo_description = html.P( [ """ Simulate zero-temperature and zero-time extrapolations on a quantum computer using the Kibble-Zurek mechanism. Fitting occurs once three or more data points are plotted, with -1.8 representing the highest energy scale corresponding to the lowest noise level. Learn more in the @@ -74,7 +73,20 @@ def control_card(solvers={}, init_job_status="READY"): ".", ], style={"color": "white", "fontSize": 14}, + ) + + return dbc.Card( + [ + dbc.Row( + [ + dbc.Col( + [ + html.H4( + demo_title, + className="card-title", + style={"color": "rgb(243, 120, 32)"}, ), + demo_description, html.H5("Spins", style=control_header_style), html.Div([config_spins]), html.H5( diff --git a/helpers/plots.py b/helpers/plots.py index e2ac55d..f2760d9 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -193,7 +193,7 @@ def plot_kink_densities_bg( type="linear", ) - x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[-3, 1.5]) + x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[0, 3]) if display == "kink_density": fig_layout = go.Layout( xaxis=x_axis1, From cdda359833318d80bd728a493c15f8613bb67c82 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 10 Dec 2024 16:24:58 -0800 Subject: [PATCH 104/170] Removed reset button and separated plot display for both demos --- app.py | 9 +++---- helpers/layouts_cards.py | 23 ++++++++--------- helpers/layouts_components.py | 47 +++++++++++++++++++++++++++++++---- 3 files changed, 56 insertions(+), 23 deletions(-) diff --git a/app.py b/app.py index d3c90f1..b7f743d 100644 --- a/app.py +++ b/app.py @@ -87,7 +87,7 @@ def demo1_layout(): style={"minWidth": "30rem"}, ), dbc.Col( # Right: display area - graphs_card(), + graphs_card(demo_type="Kibble-Zurek"), width=8, style={"minWidth": "60rem"}, ), @@ -131,7 +131,7 @@ def demo2_layout(): style={'minWidth': "30rem"}, ), dbc.Col( # Right: display area - graphs_card(), + graphs_card(demo_type="Zero-Noise"), width=8, style={'minWidth': "60rem"}, ), @@ -350,7 +350,6 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output("coupling_data", "data"), # store data using dcc Output("zne_estimates", "data"), # update zne_estimates Output("modal_trigger", "data"), - Input("btn_reset", "n_clicks"), Input("qpu_selection", "value"), Input("kz_graph_display", "value"), State("coupling_strength", "value"), # previously input @@ -386,9 +385,7 @@ def display_graphics_kink_density( ta_max = 350 if ( - trigger_id == "btn_reset" - or trigger_id == "qpu_selection" - or trigger_id == "spins" + trigger_id == "qpu_selection" or trigger_id == "spins" ): coupling_data = {} zne_estimates = {} diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index d970744..07694fa 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -54,6 +54,8 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. """, style={'color': 'white', 'fontSize': 14}) + demo_anneal_duration = config_anneal_duration_kz + else: demo_title = "Coherent Annealing: Zero-Noise Extrapolation" demo_description = html.P( @@ -74,6 +76,8 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") ], style={"color": "white", "fontSize": 14}, ) + demo_anneal_duration = config_anneal_duration_zne + return dbc.Card( [ @@ -94,7 +98,7 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") ), html.Div([config_coupling_strength]), html.H5("Quench Duration [ns]", style=control_header_style), - html.Div([config_anneal_duration]), + html.Div([demo_anneal_duration]), html.H5("QPU", style=control_header_style), html.Div( [ @@ -129,15 +133,6 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") ), width="auto", ), - dbc.Col( - dbc.Button( - "Reset", - id="btn_reset", - color="danger", - style={"marginTop": "10px"}, - ), - width="auto", - ), ], justify="start", # Aligns buttons to the left align="center", # Vertically centers buttons @@ -220,7 +215,11 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") } -def graphs_card(): +def graphs_card(demo_type="Kibble-Zurek"): + if demo_type == "Kibble-Zurek": + demo_graph = config_kz_graph_kz + else: + demo_graph = config_kz_graph_zne return dbc.Card( [ dbc.Row( @@ -257,7 +256,7 @@ def graphs_card(): "QPU Samples Vs. Kibble-Zurek Prediction", style=graphic_header_style, ), - html.Div([config_kz_graph]), + html.Div([demo_graph]), ] ), ] diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index b973db9..7742291 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -13,12 +13,14 @@ # limitations under the License. import dash_bootstrap_components as dbc -from dash.dcc import Checklist, Dropdown, Link, RadioItems, Slider +from dash.dcc import Checklist, Dropdown, Link, RadioItems, Slider, Input from dash import html, dcc __all__ = [ - "config_anneal_duration", - "config_kz_graph", + "config_anneal_duration_zne", + "config_anneal_duration_kz", + "config_kz_graph_zne", + "config_kz_graph_kz", "config_spins", "config_coupling_strength", "config_qpu_selection", @@ -31,7 +33,7 @@ ring_lengths = [512, 1024, 2048] -config_anneal_duration = dcc.Dropdown( +config_anneal_duration_zne = dcc.Dropdown( id="anneal_duration", options=[ {"label": "5 ns", "value": 5}, @@ -48,7 +50,17 @@ style={"max-width": "95%"}, ) -config_kz_graph = RadioItems( +config_anneal_duration_kz = Input( + id='anneal_duration', + type='number', + min=5, + max=100, + step=1, + value=7, + style={'max-width': '95%'} +) + +config_kz_graph_zne = RadioItems( id="kz_graph_display", options=[ # {"label": "Both", "value": "both", "disabled": False}, @@ -75,6 +87,31 @@ inline=True, # Currently requires above 'inline-block' ) +config_kz_graph_kz = RadioItems( + id='kz_graph_display', + options=[ + { + 'label': 'Both', + 'value': 'both', + 'disabled': False + }, + { + 'label': 'Kink density', + 'value': 'kink_density', + 'disabled': False + }, + { + 'label': 'Schedule', + 'value': 'schedule', + 'disabled': False + }, + ], + value='both', + inputStyle={'margin-right': '10px', 'margin-bottom': '5px'}, + labelStyle={'color': 'rgb(3, 184, 255)', 'font-size': 12, 'display': 'inline-block', 'marginLeft': 20}, + inline=True, # Currently requires above 'inline-block' +) + config_spins = RadioItems( id="spins", options=[ From 106b2bdefdcb4bb6b1b06973da1a6d63703082b1 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 10 Dec 2024 16:42:11 -0800 Subject: [PATCH 105/170] Changed anneal time and graph display config into functions --- helpers/layouts_cards.py | 9 +- helpers/layouts_components.py | 162 +++++++++++++++++----------------- helpers/plots.py | 4 +- 3 files changed, 86 insertions(+), 89 deletions(-) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index 07694fa..e1f6bbd 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -54,7 +54,7 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. """, style={'color': 'white', 'fontSize': 14}) - demo_anneal_duration = config_anneal_duration_kz + demo_anneal_duration = get_config_anneal_duration(demo_type) else: demo_title = "Coherent Annealing: Zero-Noise Extrapolation" @@ -76,7 +76,7 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") ], style={"color": "white", "fontSize": 14}, ) - demo_anneal_duration = config_anneal_duration_zne + demo_anneal_duration = get_config_anneal_duration(demo_type) return dbc.Card( @@ -216,10 +216,7 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") def graphs_card(demo_type="Kibble-Zurek"): - if demo_type == "Kibble-Zurek": - demo_graph = config_kz_graph_kz - else: - demo_graph = config_kz_graph_zne + demo_graph = get_config_kz_graph(demo_type) return dbc.Card( [ dbc.Row( diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 7742291..9abce87 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -17,10 +17,8 @@ from dash import html, dcc __all__ = [ - "config_anneal_duration_zne", - "config_anneal_duration_kz", - "config_kz_graph_zne", - "config_kz_graph_kz", + "get_config_anneal_duration", + "get_config_kz_graph", "config_spins", "config_coupling_strength", "config_qpu_selection", @@ -33,84 +31,86 @@ ring_lengths = [512, 1024, 2048] -config_anneal_duration_zne = dcc.Dropdown( - id="anneal_duration", - options=[ - {"label": "5 ns", "value": 5}, - {"label": "10 ns", "value": 10}, - {"label": "20 ns", "value": 20}, - {"label": "40 ns", "value": 40}, - {"label": "80 ns", "value": 80}, - {"label": "160 ns", "value": 160}, - {"label": "320 ns", "value": 320}, - {"label": "640 ns", "value": 640}, - {"label": "1280 ns", "value": 1280}, - ], - value=80, # default value - style={"max-width": "95%"}, -) - -config_anneal_duration_kz = Input( - id='anneal_duration', - type='number', - min=5, - max=100, - step=1, - value=7, - style={'max-width': '95%'} -) - -config_kz_graph_zne = RadioItems( - id="kz_graph_display", - options=[ - # {"label": "Both", "value": "both", "disabled": False}, - { - "label": "Kink density vs Anneal time", - "value": "kink_density", - "disabled": False, - }, - # {"label": "Schedule", "value": "schedule", "disabled": False}, - { - "label": "Kink density vs Noise level", - "value": "coupling", - "disabled": False, - }, - ], - value="coupling", - inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, - labelStyle={ - "color": "rgb(3, 184, 255)", - "font-size": 12, - "display": "inline-block", - "marginLeft": 20, - }, - inline=True, # Currently requires above 'inline-block' -) +def get_config_anneal_duration(demo_type): + if demo_type == "Kibble-Zurek": + return dcc.Dropdown( + id="anneal_duration_kz", + options=[ + {"label": "5 ns", "value": 5}, + {"label": "10 ns", "value": 10}, + {"label": "20 ns", "value": 20}, + {"label": "40 ns", "value": 40}, + {"label": "80 ns", "value": 80}, + {"label": "160 ns", "value": 160}, + {"label": "320 ns", "value": 320}, + {"label": "640 ns", "value": 640}, + {"label": "1280 ns", "value": 1280}, + ], + value=80, # default value + style={"max-width": "95%"}, + ) + else: + return dbc.Input( + id="anneal_duration_zne", + type='number', + min=5, + max=100, + step=1, + value=7, + style={'max-width': '95%'} + ) -config_kz_graph_kz = RadioItems( - id='kz_graph_display', - options=[ - { - 'label': 'Both', - 'value': 'both', - 'disabled': False - }, - { - 'label': 'Kink density', - 'value': 'kink_density', - 'disabled': False - }, - { - 'label': 'Schedule', - 'value': 'schedule', - 'disabled': False - }, - ], - value='both', - inputStyle={'margin-right': '10px', 'margin-bottom': '5px'}, - labelStyle={'color': 'rgb(3, 184, 255)', 'font-size': 12, 'display': 'inline-block', 'marginLeft': 20}, - inline=True, # Currently requires above 'inline-block' -) +def get_config_kz_graph(demo_type): + if demo_type == "Kibble-Zurek": + return RadioItems( + id="kz_graph_display", + options=[ + { + 'label': 'Both', + 'value': 'both', + 'disabled': False + }, + { + 'label': 'Kink density', + 'value': 'kink_density', + 'disabled': False + }, + { + 'label': 'Schedule', + 'value': 'schedule', + 'disabled': False + }, + ], + value='both', + inputStyle={'margin-right': '10px', 'margin-bottom': '5px'}, + labelStyle={'color': 'rgb(3, 184, 255)', 'font-size': 12, 'display': 'inline-block', 'marginLeft': 20}, + inline=True, + ) + else: + return RadioItems( + id="zne_graph_display", + options=[ + { + "label": "Kink density vs Anneal time", + "value": "kink_density", + "disabled": False, + }, + { + "label": "Kink density vs Noise level", + "value": "coupling", + "disabled": False, + }, + ], + value="coupling", + inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, + labelStyle={ + "color": "rgb(3, 184, 255)", + "font-size": 12, + "display": "inline-block", + "marginLeft": 20, + }, + inline=True, + ) config_spins = RadioItems( id="spins", diff --git a/helpers/plots.py b/helpers/plots.py index f2760d9..bef9e41 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -586,7 +586,7 @@ def plot_spin_orientation(num_spins=512, sample=None): def plot_zne_fitted_line( - fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str + fig, coupling_data, qpu_name, zne_estimates, kz_graph_display_zne, ta_str ): """ Fit a curve to the coupling data and plot the Zero-Noise Extrapolation (ZNE) estimate. @@ -657,7 +657,7 @@ def plot_zne_fitted_line( ) ] - if kz_graph_display == "coupling" and y_func_x is not None: + if kz_graph_display_zne == "coupling" and y_func_x is not None: x_axis = "x3" y_axis = "y1" x_zne = 0 From 71239c787e55450bba931de260fde8785e3dbb06 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 11 Dec 2024 14:29:34 -0800 Subject: [PATCH 106/170] Major fix to let both demo share the same id and callbacks --- app.py | 204 +++++++++++++++++----------------- helpers/layouts_components.py | 10 +- helpers/plots.py | 4 +- 3 files changed, 111 insertions(+), 107 deletions(-) diff --git a/app.py b/app.py index b7f743d..d95e0a3 100644 --- a/app.py +++ b/app.py @@ -63,15 +63,15 @@ if not client: client = "dummy" +def demo_layout(demo_type): -def demo1_layout(): return dbc.Container( [ dbc.Row( [ dbc.Col( # Left: control panel [ - control_card(solvers=qpus, init_job_status=init_job_status, demo_type="Kibble-Zurek"), + control_card(solvers=qpus, init_job_status=init_job_status, demo_type=demo_type), *dbc_modal("modal_solver"), *[ dbc.Tooltip( @@ -87,7 +87,7 @@ def demo1_layout(): style={"minWidth": "30rem"}, ), dbc.Col( # Right: display area - graphs_card(demo_type="Kibble-Zurek"), + graphs_card(demo_type=demo_type), width=8, style={"minWidth": "60rem"}, ), @@ -112,34 +112,6 @@ def demo1_layout(): fluid=True, ) -def demo2_layout(): - return dbc.Container([ - dbc.Row([ - dbc.Col( # Left: control panel - [ - control_card( - solvers=qpus, - init_job_status=init_job_status, - demo_type="Zero-Noise" - ), - *dbc_modal('modal_solver'), - *[dbc.Tooltip( - message, target=target, id=f'tooltip_{target}', style = dict()) - for target, message in tool_tips.items()] - ], - width=4, - style={'minWidth': "30rem"}, - ), - dbc.Col( # Right: display area - graphs_card(demo_type="Zero-Noise"), - width=8, - style={'minWidth': "60rem"}, - ), - ]), - ], - fluid=True, - ) - # Define the Navbar with two tabs navbar = dbc.Navbar( dbc.Container( @@ -192,13 +164,15 @@ def demo2_layout(): def display_page(pathname): # If the user goes to the "/demo1" route if pathname == "/demo1": - return demo1_layout() + + return demo_layout("Kibble-Zurek") # If the user goes to the "/demo2" route elif pathname == "/demo2": - return demo2_layout() - # Default fallback if no path matches: + + return demo_layout("Zero-Noise") else: - return demo1_layout() # or redirect to a "404" or default page + return demo_layout("Kibble-Zurek") + @app.callback( @@ -351,28 +325,32 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output("zne_estimates", "data"), # update zne_estimates Output("modal_trigger", "data"), Input("qpu_selection", "value"), - Input("kz_graph_display", "value"), + #Input("zne_graph_display", "value"), + Input("graph_display", "value"), State("coupling_strength", "value"), # previously input Input("quench_schedule_filename", "children"), Input("job_submit_state", "children"), Input("job_id", "children"), + #Input("anneal_duration_zne", "value"), Input("anneal_duration", "value"), Input("spins", "value"), + Input("url", "pathname"), State("embeddings_cached", "data"), State("sample_vs_theory", "figure"), State("coupling_data", "data"), # access previously stored data State("zne_estimates", "data"), # Access ZNE estimates + ) def display_graphics_kink_density( - dummy, qpu_name, - kz_graph_display, + graph_display, J, schedule_filename, job_submit_state, job_id, ta, spins, + pathname, embeddings_cached, figure, coupling_data, @@ -384,84 +362,110 @@ def display_graphics_kink_density( ta_min = 2 ta_max = 350 - if ( - trigger_id == "qpu_selection" or trigger_id == "spins" - ): - coupling_data = {} - zne_estimates = {} + if pathname == "/demo2": + + if ( + trigger_id == "qpu_selection" or trigger_id == "spins" + ): + coupling_data = {} + zne_estimates = {} + + fig = plot_kink_densities_bg( + graph_display, + [ta_min, ta_max], + J_baseline, + schedule_filename, + coupling_data, + zne_estimates, + ) - fig = plot_kink_densities_bg( - kz_graph_display, - [ta_min, ta_max], - J_baseline, - schedule_filename, - coupling_data, - zne_estimates, - ) + return fig, coupling_data, zne_estimates, False - return fig, coupling_data, zne_estimates, False + if trigger_id in [ + "zne_graph_display", + "coupling_strength", + "quench_schedule_filename", + ]: + + fig = plot_kink_densities_bg( + graph_display, + [ta_min, ta_max], + J_baseline, + schedule_filename, + coupling_data, + zne_estimates, + ) + + return fig, coupling_data, zne_estimates, False + + if trigger_id == "job_submit_state": + + if job_submit_state == "COMPLETED": + + embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) + + sampleset_unembedded = get_samples( + client, job_id, spins, J, embeddings_cached[spins] + ) + _, kink_density = kink_stats(sampleset_unembedded, J) + + fig = plot_kink_density(graph_display, figure, kink_density, ta, J) - if trigger_id in [ - "kz_graph_display", - "coupling_strength", - "quench_schedule_filename", - ]: + # Calculate kappa + kappa = calc_kappa(J, J_baseline) + # Initialize the list for this anneal_time if not present + ta_str = str(ta) + if ta_str not in coupling_data: + coupling_data[ta_str] = [] + # Append the new data point + coupling_data[ta_str].append( + {"kappa": kappa, "kink_density": kink_density, "coupling_strength": J} + ) + zne_estimates, modal_trigger = plot_zne_fitted_line( + fig, coupling_data, qpu_name, zne_estimates, graph_display, ta_str + ) + + return fig, coupling_data, zne_estimates, modal_trigger + + else: + return dash.no_update + + # use global J value fig = plot_kink_densities_bg( - kz_graph_display, + graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, ) - return fig, coupling_data, zne_estimates, False + else: + if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : + + + fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) - if trigger_id == "job_submit_state": - - if job_submit_state == "COMPLETED": - - embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) - - sampleset_unembedded = get_samples( - client, job_id, spins, J, embeddings_cached[spins] - ) - _, kink_density = kink_stats(sampleset_unembedded, J) - - fig = plot_kink_density(kz_graph_display, figure, kink_density, ta, J) - - # Calculate kappa - kappa = calc_kappa(J, J_baseline) - # Initialize the list for this anneal_time if not present - ta_str = str(ta) - if ta_str not in coupling_data: - coupling_data[ta_str] = [] - # Append the new data point - coupling_data[ta_str].append( - {"kappa": kappa, "kink_density": kink_density, "coupling_strength": J} - ) - - zne_estimates, modal_trigger = plot_zne_fitted_line( - fig, coupling_data, qpu_name, zne_estimates, kz_graph_display, ta_str - ) - - return fig, coupling_data, zne_estimates, modal_trigger + return fig, coupling_data, zne_estimates, False + + if trigger_id == 'job_submit_state': - else: - return dash.no_update + if job_submit_state == 'COMPLETED': - # use global J value - fig = plot_kink_densities_bg( - kz_graph_display, - [ta_min, ta_max], - J_baseline, - schedule_filename, - coupling_data, - zne_estimates, - ) - return fig, coupling_data, zne_estimates, False + embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) + sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) + _, kink_density = kink_stats(sampleset_unembedded, J) + + fig = plot_kink_density(graph_display, figure, kink_density, ta, J) + return fig, coupling_data, zne_estimates, False + + else: + return dash.no_update + + fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) + return fig, coupling_data, zne_estimates, False @app.callback( Output("spin_orientation", "figure"), diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 9abce87..679cb4f 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -32,9 +32,9 @@ ring_lengths = [512, 1024, 2048] def get_config_anneal_duration(demo_type): - if demo_type == "Kibble-Zurek": + if demo_type == "Zero-Noise": return dcc.Dropdown( - id="anneal_duration_kz", + id="anneal_duration", options=[ {"label": "5 ns", "value": 5}, {"label": "10 ns", "value": 10}, @@ -51,7 +51,7 @@ def get_config_anneal_duration(demo_type): ) else: return dbc.Input( - id="anneal_duration_zne", + id="anneal_duration", type='number', min=5, max=100, @@ -63,7 +63,7 @@ def get_config_anneal_duration(demo_type): def get_config_kz_graph(demo_type): if demo_type == "Kibble-Zurek": return RadioItems( - id="kz_graph_display", + id="graph_display", options=[ { 'label': 'Both', @@ -88,7 +88,7 @@ def get_config_kz_graph(demo_type): ) else: return RadioItems( - id="zne_graph_display", + id="graph_display", options=[ { "label": "Kink density vs Anneal time", diff --git a/helpers/plots.py b/helpers/plots.py index bef9e41..63520b3 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -586,7 +586,7 @@ def plot_spin_orientation(num_spins=512, sample=None): def plot_zne_fitted_line( - fig, coupling_data, qpu_name, zne_estimates, kz_graph_display_zne, ta_str + fig, coupling_data, qpu_name, zne_estimates, zne_graph_display, ta_str ): """ Fit a curve to the coupling data and plot the Zero-Noise Extrapolation (ZNE) estimate. @@ -657,7 +657,7 @@ def plot_zne_fitted_line( ) ] - if kz_graph_display_zne == "coupling" and y_func_x is not None: + if zne_graph_display == "coupling" and y_func_x is not None: x_axis = "x3" y_axis = "y1" x_zne = 0 From 54b7411cecbb14c23699580b6cd69f83b327ea8d Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 12 Dec 2024 19:11:48 -0800 Subject: [PATCH 107/170] remove plotting zne history in background plots --- helpers/plots.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 63520b3..e95028d 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -299,20 +299,6 @@ def plot_kink_densities_bg( marker=dict(size=10, color=color, symbol="x"), ) ) - # Plot ZNE estimates - for ta_str, a in zne_estimates.items(): - fig_data.append( - go.Scatter( - x=[0], - y=[a], - mode="markers", - name="ZNE Estimate", - marker=dict(size=12, color="purple", symbol="diamond"), - showlegend=False, - xaxis="x3", - yaxis="y1", - ) - ) else: # Display both plots together From 8caef9f047d45c8a1e8cda5a7358928ebc1d3043 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 12 Dec 2024 19:16:23 -0800 Subject: [PATCH 108/170] Change mock sampler name --- app.py | 14 +++++++------- helpers/layouts_components.py | 4 ++-- helpers/plots.py | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/app.py b/app.py index d95e0a3..857883e 100644 --- a/app.py +++ b/app.py @@ -56,7 +56,7 @@ client = None init_job_status = "NO SOLVER" if os.getenv("ZNE") == "YES": - qpus["mock_dwave_solver"] = MockKibbleZurekSampler( + qpus["Diffusion [Classical]"] = MockKibbleZurekSampler( topology_type="pegasus", topology_shape=[16] ) # Change sampler to mock init_job_status = "READY" @@ -277,7 +277,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): file for file in os.listdir("helpers") if ".json" in file and "emb_" in file ]: - if qpu_name == "mock_dwave_solver": + if qpu_name == "Diffusion [Classical]": _qpu_name = "Advantage_system6.4" else: _qpu_name = qpu_name @@ -527,15 +527,15 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): embedding = embeddings_cached[spins] annealing_time = calc_lambda(J, J_baseline) * (ta_ns / 1000) - if qpu_name == "mock_dwave_solver": + if qpu_name == "Diffusion [Classical]": bqm_embedded = embed_bqm( bqm, embedding, - qpus["mock_dwave_solver"].adjacency, + qpus["Diffusion [Classical]"].adjacency, ) - sampleset = qpus["mock_dwave_solver"].sample( + sampleset = qpus["Diffusion [Classical]"].sample( bqm_embedded, annealing_time=annealing_time ) return json.dumps(sampleset.to_serializable()) @@ -607,10 +607,10 @@ def simulate( if trigger_id == "btn_simulate": - if spins in cached_embedding_lengths or qpu_name == "mock_dwave_solver": + if spins in cached_embedding_lengths or qpu_name == "Diffusion [Classical]": submit_time = datetime.datetime.now().strftime("%c") - if qpu_name == "mock_dwave_solver": # Hack to fix switch from SA to QPU + if qpu_name == "Diffusion [Classical]": # Hack to fix switch from SA to QPU submit_time = "SA" job_submit_state = "SUBMITTED" embedding = dash.no_update diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 679cb4f..68ae328 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -172,8 +172,8 @@ def get_config_kz_graph(demo_type): ) -def config_qpu_selection(solvers, default="mock_dwave_solver"): - default = "mock_dwave_solver" if "mock_dwave_solver" in solvers else None +def config_qpu_selection(solvers, default="Diffusion [Classical]"): + default = "Diffusion [Classical]" if "Diffusion [Classical]" in solvers else None return Dropdown( id="qpu_selection", options=[{"label": qpu_name, "value": qpu_name} for qpu_name in solvers], diff --git a/helpers/plots.py b/helpers/plots.py index e95028d..c96e481 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -612,7 +612,7 @@ def plot_zne_fitted_line( # Ensure there are enough unique x values for fitting if len(np.unique(x)) > 1: # Fit a 1st degree polynomial (linear fit) - if qpu_name == "mock_dwave_solver": + if qpu_name == "Diffusion [Classical]": # Fancy non-linear function y_func_x = fitted_function(x, y, method="mixture_of_exponentials") else: From 190834f7dee0ac763f972477977969dd8e8a2b50 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 11:24:45 -0800 Subject: [PATCH 109/170] Improved branching on kink density background plots --- app.py | 36 ++++++++----- helpers/plots.py | 137 ++++++++++++++++++++++++++++++----------------- 2 files changed, 112 insertions(+), 61 deletions(-) diff --git a/app.py b/app.py index 857883e..c3e9bc6 100644 --- a/app.py +++ b/app.py @@ -98,6 +98,7 @@ def demo_layout(demo_type): # store zero noise extrapolation dcc.Store(id="zne_estimates", data={}), dcc.Store(id="modal_trigger", data=False), + dcc.Store(id="kz_data", data={}), dbc.Modal( [ dbc.ModalHeader(dbc.ModalTitle("Error")), @@ -324,6 +325,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output("coupling_data", "data"), # store data using dcc Output("zne_estimates", "data"), # update zne_estimates Output("modal_trigger", "data"), + Output("kz_data", "data"), Input("qpu_selection", "value"), #Input("zne_graph_display", "value"), Input("graph_display", "value"), @@ -339,7 +341,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State("sample_vs_theory", "figure"), State("coupling_data", "data"), # access previously stored data State("zne_estimates", "data"), # Access ZNE estimates - + State("kz_data", "data") # get kibble zurek data point ) def display_graphics_kink_density( qpu_name, @@ -355,6 +357,7 @@ def display_graphics_kink_density( figure, coupling_data, zne_estimates, + kz_data, ): """Generate graphics for kink density based on theory and QPU samples.""" @@ -377,9 +380,10 @@ def display_graphics_kink_density( schedule_filename, coupling_data, zne_estimates, + url="Demo2", ) - return fig, coupling_data, zne_estimates, False + return fig, coupling_data, zne_estimates, False, kz_data if trigger_id in [ "zne_graph_display", @@ -394,9 +398,10 @@ def display_graphics_kink_density( schedule_filename, coupling_data, zne_estimates, + url="Demo2", ) - return fig, coupling_data, zne_estimates, False + return fig, coupling_data, zne_estimates, False, kz_data if trigger_id == "job_submit_state": @@ -426,7 +431,7 @@ def display_graphics_kink_density( fig, coupling_data, qpu_name, zne_estimates, graph_display, ta_str ) - return fig, coupling_data, zne_estimates, modal_trigger + return fig, coupling_data, zne_estimates, modal_trigger, kz_data else: return dash.no_update @@ -439,15 +444,16 @@ def display_graphics_kink_density( schedule_filename, coupling_data, zne_estimates, + url='Demo2' ) - return fig, coupling_data, zne_estimates, False + return fig, coupling_data, zne_estimates, False, kz_data else: if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : - - fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) + kz_data = {} + fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, url="Demo1") - return fig, coupling_data, zne_estimates, False + return fig, coupling_data, zne_estimates, False, kz_data if trigger_id == 'job_submit_state': @@ -458,14 +464,20 @@ def display_graphics_kink_density( sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) _, kink_density = kink_stats(sampleset_unembedded, J) - fig = plot_kink_density(graph_display, figure, kink_density, ta, J) - return fig, coupling_data, zne_estimates, False + if J not in kz_data: + kz_data[J] = [] + # Append the new data point + kz_data[J].append( + {"kink_density": kink_density, "ta_ns": ta} + ) + fig = plot_kink_density(graph_display, figure, kink_density, ta, J, url="Demo1") + return fig, coupling_data, zne_estimates, False, kz_data else: return dash.no_update - fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates) - return fig, coupling_data, zne_estimates, False + fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, kz_data, url="Demo1") + return fig, coupling_data, zne_estimates, False, kz_data @app.callback( Output("spin_orientation", "figure"), diff --git a/helpers/plots.py b/helpers/plots.py index c96e481..2d5f5d2 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -59,7 +59,7 @@ def plot_kink_densities_bg( - display, time_range, J_base, schedule_name, coupling_data, zne_estimates + display, time_range, J_base, schedule_name, coupling_data, zne_estimates, kz_data=None, url=None ): """ Plot the background of theoretical kink density and QPU energy scales. @@ -194,61 +194,65 @@ def plot_kink_densities_bg( ) x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[0, 3]) + if display == "kink_density": fig_layout = go.Layout( xaxis=x_axis1, yaxis=y_axis1, ) - _coupling_label = { - -1.8: False, - -1.6: False, - -1.4: False, - -1.2: False, - -1: False, - -0.8: False, - -0.6: False, - } - fig_data = [predicted_plus, predicted_minus] - for ta_str, data_points in coupling_data.items(): - for point in data_points: - _J = point["coupling_strength"] - color = coupling_color_theme[_J] - - if not _coupling_label[_J]: - legend = True - _coupling_label[_J] = True - else: - legend = False - - kink_density = point["kink_density"] + if url == 'Demo2': + _coupling_label = { + -1.8: False, + -1.6: False, + -1.4: False, + -1.2: False, + -1: False, + -0.8: False, + -0.6: False, + } + fig_data = [predicted_plus, predicted_minus] + for ta_str, data_points in coupling_data.items(): + for point in data_points: + _J = point["coupling_strength"] + color = coupling_color_theme[_J] + + if not _coupling_label[_J]: + legend = True + _coupling_label[_J] = True + else: + legend = False + + kink_density = point["kink_density"] - fig_data.append( - go.Scatter( - x=[ta_str], - y=[kink_density], - xaxis="x1", - yaxis="y1", - mode="markers", - name=f"Coupling Strength: {_J}", - showlegend=legend, - marker=dict(size=10, color=color, symbol="x"), + fig_data.append( + go.Scatter( + x=[ta_str], + y=[kink_density], + xaxis="x1", + yaxis="y1", + mode="markers", + name=f"Coupling Strength: {_J}", + showlegend=legend, + marker=dict(size=10, color=color, symbol="x"), + ) ) - ) - # Plot ZNE estimates - for ta_str, a in zne_estimates.items(): - fig_data.append( - go.Scatter( - x=[ta_str], - y=[a], - mode="markers", - name="ZNE Estimate", - marker=dict(size=12, color="purple", symbol="diamond"), - showlegend=False, - xaxis="x1", - yaxis="y1", + # Plot ZNE estimates + for ta_str, a in zne_estimates.items(): + fig_data.append( + go.Scatter( + x=[ta_str], + y=[a], + mode="markers", + name="ZNE Estimate", + marker=dict(size=12, color="purple", symbol="diamond"), + showlegend=False, + xaxis="x1", + yaxis="y1", + ) ) - ) - + else: + fig_data = [predicted_plus, predicted_minus] + elif display == "schedule": fig_layout = go.Layout( @@ -373,10 +377,28 @@ def plot_kink_densities_bg( arrowhead=5, ) + if kz_data is not None: + for J, pair in kz_data.items(): + for p in pair: + fig_data.append( + go.Scatter( + x=[p["kink_density"]], + y=[p["ta_ns"]], + xaxis="x1", + yaxis="y1", + mode="markers", + marker=dict( + size=10, + color="black", + symbol="x", + ) + ) + ) + return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): +def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, url=None): """ Add a kink density marker from QPU samples to an existing plot. @@ -442,6 +464,23 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J): else: legend = False + if url == "Demo1": + fig.add_trace( + go.Scatter( + x=[anneal_time], + y=[kink_density], + xaxis='x1', + yaxis='y1', + showlegend=False, + marker=dict(size=10, + color='black', + symbol='x', + ) + ) + ) + + return fig + fig.add_trace( go.Scatter( x=[anneal_time], From 620bdf0b57bc1d93d58c4aac8e7078cc7de6ebbe Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 11:40:30 -0800 Subject: [PATCH 110/170] Change demo tab name --- app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index c3e9bc6..b758efa 100644 --- a/app.py +++ b/app.py @@ -132,8 +132,8 @@ def demo_layout(demo_type): # Navbar Tabs dbc.Nav( [ - dbc.NavItem(dbc.NavLink("Demo 1", href="/demo1", active="exact")), - dbc.NavItem(dbc.NavLink("Demo 2", href="/demo2", active="exact")), + dbc.NavItem(dbc.NavLink("Kibble-Zurek Mechanism", href="/demo1", active="exact")), + dbc.NavItem(dbc.NavLink("Kibble-Zurek Mechanism with Noise Mitigation", href="/demo2", active="exact")), ], pills=True, ), From ccf53792f8d416e4ee0bb31a3084ca0edf63298f Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 11:53:41 -0800 Subject: [PATCH 111/170] Adjust the styling of coupling strength slider --- helpers/layouts_components.py | 48 +++++++++++++++++------------------ 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 68ae328..81fc220 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -129,29 +129,29 @@ def get_config_kz_graph(demo_type): inline=True, # Currently requires above 'inline-block' ) -j_marks = { - round(0.1 * val, 1): ( - {"label": f"{round(0.1*val, 1)}", "style": {"color": "blue"}} - if round(0.1 * val, 0) != 0.1 * val - else {"label": f"{round(0.1*val)}", "style": {"color": "blue"}} - ) - for val in range(-18, 0, 2) -} -j_marks.update( - { - round(0.1 * val, 1): ( - {"label": f"{round(0.1*val, 1)}", "style": {"color": "red"}} - if round(0.1 * val, 0) != 0.1 * val - else {"label": f"{round(0.1*val)}", "style": {"color": "red"}} - ) - for val in range(2, 10, 2) - } -) +# j_marks = { +# round(0.1 * val, 1): ( +# {"label": f"{round(0.1*val, 1)}", "style": {"color": "blue"}} +# if round(0.1 * val, 0) != 0.1 * val +# else {"label": f"{round(0.1*val)}", "style": {"color": "blue"}} +# ) +# for val in range(-18, 0, 2) +# } +# j_marks.update( +# { +# round(0.1 * val, 1): ( +# {"label": f"{round(0.1*val, 1)}", "style": {"color": "red"}} +# if round(0.1 * val, 0) != 0.1 * val +# else {"label": f"{round(0.1*val)}", "style": {"color": "red"}} +# ) +# for val in range(2, 10, 2) +# } +# ) # Dash Slider has some issue with int values having a zero after the decimal point -j_marks[-2] = {"label": "-2", "style": {"color": "blue"}} -del j_marks[-1.0] -j_marks[-1] = {"label": "-1", "style": {"color": "blue"}} -j_marks[1] = {"label": "1", "style": {"color": "red"}} +# j_marks[-2] = {"label": "-2", "style": {"color": "blue"}} +# del j_marks[-1.0] +# j_marks[-1] = {"label": "-1", "style": {"color": "blue"}} +# j_marks[1] = {"label": "1", "style": {"color": "red"}} config_coupling_strength = dbc.Row( [ dbc.Col( @@ -160,10 +160,10 @@ def get_config_kz_graph(demo_type): Slider( id="coupling_strength", value=-1.8, - marks=j_marks, - step=None, + step=0.2, min=-1.8, max=-0.6, + tooltip={"placement": "bottom", "always_visible": True} ) ] ), From fde444dd7c523147bca49fa54d75a9abfdce6853 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 12:01:33 -0800 Subject: [PATCH 112/170] Added branching in coupling strength slider for demos --- helpers/layouts_components.py | 42 +++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 81fc220..0561d94 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -152,24 +152,32 @@ def get_config_kz_graph(demo_type): # del j_marks[-1.0] # j_marks[-1] = {"label": "-1", "style": {"color": "blue"}} # j_marks[1] = {"label": "1", "style": {"color": "red"}} -config_coupling_strength = dbc.Row( - [ - dbc.Col( - html.Div( - [ - Slider( - id="coupling_strength", - value=-1.8, - step=0.2, - min=-1.8, - max=-0.6, - tooltip={"placement": "bottom", "always_visible": True} - ) - ] +def get_config_coupling_strength(demo_type): + max_J = -2 + min_J = 1 + default = -1.4 + if demo_type == "Zero-Noise": + max_J = -0.6 + min_J = -1.8 + default = -1.8 + return dbc.Row( + [ + dbc.Col( + html.Div( + [ + Slider( + id="coupling_strength", + value=default, + step=0.2, + min=min_J, + max=-max_J, + tooltip={"placement": "bottom", "always_visible": True} + ) + ] + ), ), - ), - ] -) + ] + ) def config_qpu_selection(solvers, default="Diffusion [Classical]"): From 017f4487d629b8432b8dd61d69cd960d16c62690 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 12:02:47 -0800 Subject: [PATCH 113/170] Call coupling strength function with demo type --- helpers/layouts_cards.py | 2 +- helpers/layouts_components.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index e1f6bbd..0ff36d5 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -96,7 +96,7 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") html.H5( "Coupling Strength (J)", style=control_header_style ), - html.Div([config_coupling_strength]), + html.Div([get_config_coupling_strength(demo_type)]), html.H5("Quench Duration [ns]", style=control_header_style), html.Div([demo_anneal_duration]), html.H5("QPU", style=control_header_style), diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 0561d94..bc95daa 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -20,7 +20,7 @@ "get_config_anneal_duration", "get_config_kz_graph", "config_spins", - "config_coupling_strength", + "get_config_coupling_strength", "config_qpu_selection", "dbc_modal", "embeddings", From e4f42168c972efc6726ff7713b9c410d027dec8e Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 12:10:03 -0800 Subject: [PATCH 114/170] Final adjustment to coupling strength slider --- helpers/layouts_components.py | 96 +++++++++++++++++++---------------- 1 file changed, 52 insertions(+), 44 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index bc95daa..aab6b05 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -129,55 +129,63 @@ def get_config_kz_graph(demo_type): inline=True, # Currently requires above 'inline-block' ) -# j_marks = { -# round(0.1 * val, 1): ( -# {"label": f"{round(0.1*val, 1)}", "style": {"color": "blue"}} -# if round(0.1 * val, 0) != 0.1 * val -# else {"label": f"{round(0.1*val)}", "style": {"color": "blue"}} -# ) -# for val in range(-18, 0, 2) -# } -# j_marks.update( -# { -# round(0.1 * val, 1): ( -# {"label": f"{round(0.1*val, 1)}", "style": {"color": "red"}} -# if round(0.1 * val, 0) != 0.1 * val -# else {"label": f"{round(0.1*val)}", "style": {"color": "red"}} -# ) -# for val in range(2, 10, 2) -# } -# ) -# Dash Slider has some issue with int values having a zero after the decimal point -# j_marks[-2] = {"label": "-2", "style": {"color": "blue"}} -# del j_marks[-1.0] -# j_marks[-1] = {"label": "-1", "style": {"color": "blue"}} -# j_marks[1] = {"label": "1", "style": {"color": "red"}} +j_marks = { + round(0.1 * val, 1): ( + {"label": f"{round(0.1*val, 1)}", "style": {"color": "white"}} + if round(0.1 * val, 0) != 0.1 * val + else {"label": f"{round(0.1*val)}", "style": {"color": "white"}} + ) + for val in range(-18, 0, 2) +} +j_marks.update( + { + round(0.1 * val, 1): ( + {"label": f"{round(0.1*val, 1)}", "style": {"color": "white"}} + if round(0.1 * val, 0) != 0.1 * val + else {"label": f"{round(0.1*val)}", "style": {"color": "white"}} + ) + for val in range(2, 10, 2) + } +) +#Dash Slider has some issue with int values having a zero after the decimal point +j_marks[-2] = {"label": "-2", "style": {"color": "white"}} +del j_marks[-1.0] +j_marks[-1] = {"label": "-1", "style": {"color": "white"}} +j_marks[1] = {"label": "1", "style": {"color": "white"}} def get_config_coupling_strength(demo_type): - max_J = -2 - min_J = 1 - default = -1.4 if demo_type == "Zero-Noise": - max_J = -0.6 - min_J = -1.8 - default = -1.8 - return dbc.Row( - [ - dbc.Col( - html.Div( - [ + return dbc.Row( + [ + dbc.Col( + html.Div( + [ + Slider( + id="coupling_strength", + value=-1.8, + marks=j_marks, + min=-1.8, + max=-0.6, + step=None, + tooltip={"placement": "bottom", "always_visible": True} + ) + ] + ), + ), + ] + ) + return dbc.Row([ + dbc.Col( + html.Div([ Slider( - id="coupling_strength", - value=default, - step=0.2, - min=min_J, - max=-max_J, + id='coupling_strength', + value=-1.4, + marks=j_marks, + step=None, tooltip={"placement": "bottom", "always_visible": True} ) - ] - ), - ), - ] - ) + ]), + ), + ]) def config_qpu_selection(solvers, default="Diffusion [Classical]"): From 741b12bbdbcd754c769e2acc3ae56731b8e9fdd3 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 13:05:49 -0800 Subject: [PATCH 115/170] Added persistent storage on kz_data to be displayed on background plot --- app.py | 11 ++++---- helpers/plots.py | 72 +++++++++++++++++++++++------------------------- 2 files changed, 39 insertions(+), 44 deletions(-) diff --git a/app.py b/app.py index b758efa..90ac41d 100644 --- a/app.py +++ b/app.py @@ -450,8 +450,8 @@ def display_graphics_kink_density( else: if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : - kz_data = {} - fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, url="Demo1") + kz_data = {"k":[]} + fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, kz_data=kz_data, url="Demo1") return fig, coupling_data, zne_estimates, False, kz_data @@ -464,11 +464,10 @@ def display_graphics_kink_density( sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) _, kink_density = kink_stats(sampleset_unembedded, J) - if J not in kz_data: - kz_data[J] = [] + # Append the new data point - kz_data[J].append( - {"kink_density": kink_density, "ta_ns": ta} + kz_data["k"].append( + (kink_density, ta) ) fig = plot_kink_density(graph_display, figure, kink_density, ta, J, url="Demo1") return fig, coupling_data, zne_estimates, False, kz_data diff --git a/helpers/plots.py b/helpers/plots.py index 2d5f5d2..e7e6e33 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -317,32 +317,46 @@ def plot_kink_densities_bg( ) fig_data = [predicted_plus, predicted_minus, energy_transverse, energy_problem] - for ta_str, data_points in coupling_data.items(): - for point in data_points: - color = "black" - kink_density = point["kink_density"] - fig_data.append( - go.Scatter( - x=[ta_str], - y=[kink_density], - xaxis="x1", - yaxis="y1", - showlegend=False, - marker=dict(size=10, color=color, symbol="x"), - ) - ) + # for ta_str, data_points in coupling_data.items(): + # for point in data_points: + # color = "black" + # kink_density = point["kink_density"] + # fig_data.append( + # go.Scatter( + # x=[ta_str], + # y=[kink_density], + # xaxis="x1", + # yaxis="y1", + # showlegend=False, + # marker=dict(size=10, color=color, symbol="x"), + # ) + # ) # Plot ZNE estimates - for ta_str, a in zne_estimates.items(): + # for ta_str, a in zne_estimates.items(): + # fig_data.append( + # go.Scatter( + # x=[ta_str], + # y=[a], + # mode="markers", + # name="ZNE Estimate", + # marker=dict(size=12, color="purple", symbol="diamond"), + # showlegend=False, + # xaxis="x1", + # yaxis="y1", + # ) + # ) + # Add previously computed kz_data points + if kz_data is not None and kz_data["k"] is not None: + for pair in kz_data["k"]: fig_data.append( go.Scatter( - x=[ta_str], - y=[a], + x=[pair[1]], + y=[pair[0]], mode="markers", - name="ZNE Estimate", - marker=dict(size=12, color="purple", symbol="diamond"), - showlegend=False, + marker=dict(size=10, color="black", symbol="x"), xaxis="x1", yaxis="y1", + showlegend=False ) ) fig = go.Figure(data=fig_data, layout=fig_layout) @@ -377,24 +391,6 @@ def plot_kink_densities_bg( arrowhead=5, ) - if kz_data is not None: - for J, pair in kz_data.items(): - for p in pair: - fig_data.append( - go.Scatter( - x=[p["kink_density"]], - y=[p["ta_ns"]], - xaxis="x1", - yaxis="y1", - mode="markers", - marker=dict( - size=10, - color="black", - symbol="x", - ) - ) - ) - return fig From 1e1f79b7e703340fc343a09b1d73c63e88404ca8 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 13:16:06 -0800 Subject: [PATCH 116/170] Persistent plotting background data in demo1, only refresh when user change spins or qpu selections --- app.py | 2 +- helpers/plots.py | 36 ++++++++++++++++++------------------ 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/app.py b/app.py index 90ac41d..4d2d06a 100644 --- a/app.py +++ b/app.py @@ -448,7 +448,7 @@ def display_graphics_kink_density( ) return fig, coupling_data, zne_estimates, False, kz_data else: - if trigger_id in ['kz_graph_display', 'coupling_strength', 'quench_schedule_filename'] : + if trigger_id == "qpu_selection" or trigger_id == "spins": kz_data = {"k":[]} fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, kz_data=kz_data, url="Demo1") diff --git a/helpers/plots.py b/helpers/plots.py index e7e6e33..a1e1eab 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -346,7 +346,7 @@ def plot_kink_densities_bg( # ) # ) # Add previously computed kz_data points - if kz_data is not None and kz_data["k"] is not None: + if "k" in kz_data: for pair in kz_data["k"]: fig_data.append( go.Scatter( @@ -424,6 +424,23 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, url=None) fig = go.Figure(fig_dict) + if url == "Demo1": + fig.add_trace( + go.Scatter( + x=[anneal_time], + y=[kink_density], + xaxis='x1', + yaxis='y1', + showlegend=False, + marker=dict(size=10, + color='black', + symbol='x', + ) + ) + ) + + return fig + ta_value = float(anneal_time) if display == "coupling": @@ -459,23 +476,6 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, url=None) coupling_label[J] = True else: legend = False - - if url == "Demo1": - fig.add_trace( - go.Scatter( - x=[anneal_time], - y=[kink_density], - xaxis='x1', - yaxis='y1', - showlegend=False, - marker=dict(size=10, - color='black', - symbol='x', - ) - ) - ) - - return fig fig.add_trace( go.Scatter( From fdd4a3c61f00fa7edbaa7d8359921a46416726da Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 13:19:18 -0800 Subject: [PATCH 117/170] Added persistent data point plotting in kin_density background plot for demo1 --- helpers/plots.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/helpers/plots.py b/helpers/plots.py index a1e1eab..395b9c5 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -252,6 +252,19 @@ def plot_kink_densities_bg( ) else: fig_data = [predicted_plus, predicted_minus] + if "k" in kz_data: + for pair in kz_data["k"]: + fig_data.append( + go.Scatter( + x=[pair[1]], + y=[pair[0]], + mode="markers", + marker=dict(size=10, color="black", symbol="x"), + xaxis="x1", + yaxis="y1", + showlegend=False + ) + ) elif display == "schedule": From 9858d8bbb7a11b9e1b98b3bc68253dac4f6af9f1 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 14:48:38 -0800 Subject: [PATCH 118/170] Minor fix on kz_data dictionary --- helpers/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/plots.py b/helpers/plots.py index 395b9c5..1f5c0f4 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -359,7 +359,7 @@ def plot_kink_densities_bg( # ) # ) # Add previously computed kz_data points - if "k" in kz_data: + if kz_data is not None and "k" in kz_data: for pair in kz_data["k"]: fig_data.append( go.Scatter( From 5ba9253412c7a512e92fc2f277f5b788a76be2eb Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 15:08:53 -0800 Subject: [PATCH 119/170] Branch tooltips based on the different demo used, default to Kibble-Zurek --- app.py | 12 ++++++++---- helpers/tooltips.py | 37 ++++++++++++++++++++++++++++++++++--- 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/app.py b/app.py index 4d2d06a..453c433 100644 --- a/app.py +++ b/app.py @@ -32,7 +32,7 @@ from helpers.layouts_components import * from helpers.plots import * from helpers.qa import * -from helpers.tooltips import tool_tips +from helpers.tooltips import tool_tips_demo1, tool_tips_demo2 import plotly.graph_objects as go @@ -63,8 +63,14 @@ if not client: client = "dummy" +tool_tips = tool_tips_demo1 def demo_layout(demo_type): + if demo_type == "Kibble-Zurek": + tool_tips = tool_tips_demo1 + else: + tool_tips = tool_tips_demo2 + return dbc.Container( [ dbc.Row( @@ -739,7 +745,7 @@ def activate_tooltips(tooltips_show): trigger = dash.callback_context.triggered trigger_id = trigger[0]["prop_id"].split(".")[0] - + if trigger_id == "tooltips_show": if tooltips_show == "off": return ( @@ -752,7 +758,6 @@ def activate_tooltips(tooltips_show): dict(display="none"), dict(display="none"), dict(display="none"), - dict(display="none"), ) return ( @@ -765,7 +770,6 @@ def activate_tooltips(tooltips_show): dict(), dict(), dict(), - dict(), ) diff --git a/helpers/tooltips.py b/helpers/tooltips.py index 66df7f5..920ce0a 100644 --- a/helpers/tooltips.py +++ b/helpers/tooltips.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -tool_tips = { +tool_tips_demo2 = { "anneal_duration": f"""Duration of the quantum anneal. Range of 5 to 320 nanoseconds.""", - "kz_graph_display": f"""Plot selection: Defects vs anneal duration or defects vs noise level""", + "graph_display": f"""Plot selection: Defects vs anneal duration or defects vs noise level""", "spins": f"""Number of spins in the 1D ring.""", "coupling_strength": f"""Coupling strength, J, between spins in the ferromagnetic ring. Range of -1.8 to -0.6. @@ -33,5 +33,36 @@ https://docs.dwavesys.com/docs/latest/doc_physical_properties.html """, "job_submit_state": f"""Status of the last submission to the quantum computer (or initial state).""", - "btn_reset": f"""Clear all existing data stored for the current run and reset all plots.""", } + +tool_tips_demo1 = { + "anneal_duration": +f"""Duration of the quantum anneal. Range of 5 to 100 nanoseconds.""", + "graph_display": +f"""Plot selection: Kibble-Zurek prediction and/or QPU energies (either separate or combined).""", + "spins": +f"""Number of spins in the 1D ring.""", + "coupling_strength": +f"""Coupling strength, J, between spins in the ring. +Range of -2 (ferromagnetic) to +1 (anti-ferromagnetic). +""", + "qpu_selection": +f"""Selection from quantum computers available to your account/project token.""", + "embedding_is_cached": +f"""Whether or not a minor-embedding is cached for the selected QPU, for each +of the available number of spins. If not available, an attempt is made to find +an embedding the first time you submit a problem. +""", + "btn_simulate": +f"""Click to (minor-embed if a cached embedding is unavailable) and +submit the problem to your selected QPU. +""", + "quench_schedule_filename": +f"""CSV file with the fast-anneal schedule for the selected quantum computer. +If none exists, uses one from a different quantum computer (expect inaccuracies). +You can download schedules from +https://docs.dwavesys.com/docs/latest/doc_physical_properties.html +""", + "job_submit_state": +f"""Status of the last submission to the quantum computer (or initial state).""", +} \ No newline at end of file From 5f610f40965c06ce9cc7415bc39787ff7d10a672 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 13 Dec 2024 15:19:53 -0800 Subject: [PATCH 120/170] Kibble-Zurek demo change J refresh plots and new theoretical predictions are calculated using J --- app.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app.py b/app.py index 453c433..18b3c53 100644 --- a/app.py +++ b/app.py @@ -335,7 +335,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Input("qpu_selection", "value"), #Input("zne_graph_display", "value"), Input("graph_display", "value"), - State("coupling_strength", "value"), # previously input + Input("coupling_strength", "value"), # previously input Input("quench_schedule_filename", "children"), Input("job_submit_state", "children"), Input("job_id", "children"), @@ -454,10 +454,10 @@ def display_graphics_kink_density( ) return fig, coupling_data, zne_estimates, False, kz_data else: - if trigger_id == "qpu_selection" or trigger_id == "spins": + if trigger_id == "qpu_selection" or trigger_id == "spins" or trigger_id == "coupling_strength": kz_data = {"k":[]} - fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, kz_data=kz_data, url="Demo1") + fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J, schedule_filename, coupling_data, zne_estimates, kz_data=kz_data, url="Demo1") return fig, coupling_data, zne_estimates, False, kz_data @@ -481,7 +481,7 @@ def display_graphics_kink_density( else: return dash.no_update - fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J_baseline, schedule_filename, coupling_data, zne_estimates, kz_data, url="Demo1") + fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J, schedule_filename, coupling_data, zne_estimates, kz_data, url="Demo1") return fig, coupling_data, zne_estimates, False, kz_data @app.callback( From b8f7e7b36e3f368bdeef7eb9bc97772c104d5e44 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Sat, 14 Dec 2024 22:19:13 -0800 Subject: [PATCH 121/170] correct lambda definition --- app.py | 10 ++++--- helpers/kz_calcs.py | 69 +++++++++++++++++++++++++++++++++++++++------ helpers/qa.py | 16 ----------- 3 files changed, 66 insertions(+), 29 deletions(-) diff --git a/app.py b/app.py index 18b3c53..5e3a3fe 100644 --- a/app.py +++ b/app.py @@ -40,7 +40,6 @@ # global variable for a default J value J_baseline = -1.8 - # Initialize: available QPUs, initial progress-bar status try: client = Client.from_config(client="qpu") @@ -378,7 +377,6 @@ def display_graphics_kink_density( ): coupling_data = {} zne_estimates = {} - fig = plot_kink_densities_bg( graph_display, [ta_min, ta_max], @@ -528,9 +526,13 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca State("coupling_strength", "value"), State("anneal_duration", "value"), State("embeddings_cached", "data"), + # State("ta_multiplier", "value") ) def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): + """Submit job and provide job ID.""" + # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes. In which case recalculate as ta_multiplier=calc_lambda(coupling_strength, schedule, J_baseline=-1.8) as a function of the correct schedule + ta_multiplier = calc_lambda(J, schedule=None, J_baseline=J_baseline) trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] @@ -542,7 +544,7 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): embeddings_cached = json_to_dict(embeddings_cached) embedding = embeddings_cached[spins] - annealing_time = calc_lambda(J, J_baseline) * (ta_ns / 1000) + annealing_time = (ta_ns / 1000) if qpu_name == "Diffusion [Classical]": @@ -566,7 +568,7 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, - annealing_time=annealing_time, + annealing_time=annealing_time*ta_multiplier, auto_scale=False, answer_mode="raw", # Easier than accounting for num_occurrences num_reads=100, diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 6a67246..1b67cc8 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -14,22 +14,22 @@ import numpy as np -__all__ = ["kink_stats", "theoretical_kink_density"] +__all__ = ["kink_stats", "theoretical_kink_density_prefactor", "theoretical_kink_density", "calc_kappa", "calc_lambda"] -def theoretical_kink_density(annealing_times_ns, J, schedule, schedule_name): +def theoretical_kink_density_prefactor(J, schedule=None, schedule_name=None): """ Calculate the kink density predicted for given the coupling strength and annealing times. - Args: - annealing_times_ns: Iterable of annealing times, in nanoseconds. + See: "" + Args: J: Coupling strength between the spins of the ring. schedule: Anneal schedule for the selected QPU. - schedule_name: Filename of anneal schedule. Used to compensate for schedule energy - overestimate. + schedule_name: Filename of anneal schedule. Used to compensate for + schedule energy overestimate. Returns: Kink density per anneal time, as a NumPy array. @@ -37,8 +37,13 @@ def theoretical_kink_density(annealing_times_ns, J, schedule, schedule_name): # See the Code section of the README.md file for an explanation of the # following code. + if schedule is None: + if schedule_name: + schedule = pd.read_csv(f"helpers/{schedule_name}") + else: + schedule = pd.read_csv("helpers/FALLBACK_SCHEDULE.csv") - COMPENSATION_SCHEDULE_ENERGY = 0.8 if "Advantage_system" in schedule_name else 1.0 + COMPENSATION_SCHEDULE_ENERGY = 0.8 if (schedule_name is not None and "Advantage_system" in schedule_name) else 1.0 A = COMPENSATION_SCHEDULE_ENERGY * schedule["A(s) (GHz)"] B = COMPENSATION_SCHEDULE_ENERGY * schedule["B(s) (GHz)"] @@ -53,10 +58,56 @@ def theoretical_kink_density(annealing_times_ns, J, schedule, schedule_name): b_denominator = B_tag[sc_indx] / B[sc_indx] - A_tag[sc_indx] / A[sc_indx] b = b_numerator / b_denominator - return np.power([1e-9 * t for t in annealing_times_ns], -0.5) / ( - 2 * np.pi * np.sqrt(2 * b) + return b + +def theoretical_kink_density(annealing_times_ns, J=None, schedule=None, schedule_name=None, b=None): + """ + Calculate the kink density as a function of anneal time + + Args: + annealing_times_ns: Iterable of annealing times, in nanoseconds. + + b: A timescale based on linearization of the schedule at criticality + + J: Coupling strength between the spins of the ring. + + schedule: Anneal schedule for the selected QPU. + + schedule_name: Filename of anneal schedule. Used to compensate for + schedule energy overestimate. + + Returns: + Kink density per anneal time, as a NumPy array. + """ + if b is None: + b = theoretical_kink_density_prefactor(J, schedule, schedule_name) + return np.power([1e-9 * t / b for t in annealing_times_ns], -0.5) / ( + 2 * np.pi * np.sqrt(2) ) +def calc_kappa(J, J_baseline=-1.8): + """Coupling ratio + + See "Quantum error mitigation in quantum annealing" usage.""" + return abs(J_baseline / J) + +def calc_lambda(J, *, schedule=None, schedule_name=None, J_baseline=-1.8): + """Time rescaling factor (relative to J_baseline) + + Rate through the transition is modified non-linearly by the + rescaling of J. If |J| is smaller than |J_baseline| we effectively move + more slowly through the critical region, the ratio of timescales is > 1. + See "Quantum error mitigation in quantum annealing" usage. + """ + if schedule is None: + # Fallback, assume ideal linear schedule + kappa = calc_kappa(J, J_baseline) + return kappa + else: + b_ref = theoretical_kink_density_prefactor(J_baseline, schedule, schedule_name) + b = theoretical_kink_density_prefactor(J, schedule, schedule_name) + + return b/b_ref def kink_stats(sampleset, J): """ diff --git a/helpers/qa.py b/helpers/qa.py index d145f86..061684d 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -23,8 +23,6 @@ import minorminer __all__ = [ - "calc_lambda", - "calc_kappa", "create_bqm", "find_one_to_one_embedding", "get_job_status", @@ -34,20 +32,6 @@ ] -def calc_kappa(coupling_strength, J_baseline=-1.8): - """Downgraded energy scale, see paper.""" - return abs(J_baseline / coupling_strength) - - -def calc_lambda(coupling_strength, J_baseline=-1.8): - """Time rescaling factor (relative to J_baseline) - - lambda is approximately linear in kappa (see paper). - kappa used as a placeholder (update later) - """ - kappa = calc_kappa(coupling_strength, J_baseline) - return kappa - def create_bqm(num_spins=512, coupling_strength=-1.4): """ From 523095f4d6683d5f2ac3b1d027751b22556c22b7 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Sat, 14 Dec 2024 22:43:56 -0800 Subject: [PATCH 122/170] Move ta_rescaling to relevant conditional branch --- app.py | 10 +++++----- helpers/kz_calcs.py | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/app.py b/app.py index 5e3a3fe..9a69de2 100644 --- a/app.py +++ b/app.py @@ -526,14 +526,10 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca State("coupling_strength", "value"), State("anneal_duration", "value"), State("embeddings_cached", "data"), - # State("ta_multiplier", "value") ) def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): """Submit job and provide job ID.""" - # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes. In which case recalculate as ta_multiplier=calc_lambda(coupling_strength, schedule, J_baseline=-1.8) as a function of the correct schedule - ta_multiplier = calc_lambda(J, schedule=None, J_baseline=J_baseline) - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] if trigger_id == "job_submit_time": @@ -564,7 +560,11 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): bqm_embedded = embed_bqm( bqm, embedding, DWaveSampler(solver=solver.name).adjacency ) - + # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes, shouldn't change for MockSampler. In which case recalculate as ta_multiplier=calc_lambda(coupling_strength, schedule, J_baseline=-1.8) as a function of the correct schedule + # State("ta_multiplier", "value") ? Should recalculate when J or schedule changes IFF noise mitigation tab? + ta_multiplier = calc_lambda(J, schedule=None, J_baseline=J_baseline) + print(f'{ta_multiplier}: qpu_name') + computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 1b67cc8..3a9a196 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -28,7 +28,7 @@ def theoretical_kink_density_prefactor(J, schedule=None, schedule_name=None): schedule: Anneal schedule for the selected QPU. - schedule_name: Filename of anneal schedule. Used to compensate for + schedule_name: Filename of anneal schedule. Used to compensate for schedule energy overestimate. Returns: @@ -86,7 +86,7 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule=None, schedule ) def calc_kappa(J, J_baseline=-1.8): - """Coupling ratio + """Coupling ratio See "Quantum error mitigation in quantum annealing" usage.""" return abs(J_baseline / J) @@ -95,7 +95,7 @@ def calc_lambda(J, *, schedule=None, schedule_name=None, J_baseline=-1.8): """Time rescaling factor (relative to J_baseline) Rate through the transition is modified non-linearly by the - rescaling of J. If |J| is smaller than |J_baseline| we effectively move + rescaling of J. If |J| is smaller than |J_baseline| we effectively move more slowly through the critical region, the ratio of timescales is > 1. See "Quantum error mitigation in quantum annealing" usage. """ @@ -106,7 +106,7 @@ def calc_lambda(J, *, schedule=None, schedule_name=None, J_baseline=-1.8): else: b_ref = theoretical_kink_density_prefactor(J_baseline, schedule, schedule_name) b = theoretical_kink_density_prefactor(J, schedule, schedule_name) - + return b/b_ref def kink_stats(sampleset, J): From f4c5b9f7663fbf79b4217faa560a2dc0e758b479 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Mon, 16 Dec 2024 10:45:09 -0800 Subject: [PATCH 123/170] Move simulator curve upwards by changing sweep per ta ratio --- MockKibbleZurekSampler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MockKibbleZurekSampler.py b/MockKibbleZurekSampler.py index e4fd1af..6987305 100644 --- a/MockKibbleZurekSampler.py +++ b/MockKibbleZurekSampler.py @@ -28,7 +28,7 @@ def __init__( # At large time (equilibrium) for long chains # lessthansimilarto t, # At J=-1 we want a kink density to bottom out. Therefore: - beta = np.atanh(1 - 2 * kink_density_limit_absJ1) + beta = np.arctanh(1 - 2 * kink_density_limit_absJ1) substitute_kwargs = { "beta_range": [beta, beta], # Quench "randomize_order": True, @@ -52,7 +52,7 @@ def sample(self, bqm, **kwargs): # Extract annealing_time from kwargs (if provided) annealing_time = kwargs.pop("annealing_time", 20) # 20us default. - num_sweeps = int(annealing_time * 3000) # 3000 sweeps per microsecond + num_sweeps = int(annealing_time * 1000) # 1000 sweeps per microsecond # Extract flux biases from kwargs (if provided) # flux_biases = kwargs.pop('flux_biases', {}) # flux_to_h_factor = fluxbias_to_h() From 6f26054614cbcf1e32c0f2a8524bea0847b272dc Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Mon, 16 Dec 2024 10:45:35 -0800 Subject: [PATCH 124/170] Correct error division versus multiplication error in use of b --- helpers/kz_calcs.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 3a9a196..079184b 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -18,10 +18,11 @@ def theoretical_kink_density_prefactor(J, schedule=None, schedule_name=None): - """ - Calculate the kink density predicted for given the coupling strength and annealing times. + """Time rescaling factor - See: "" + Calculate the rescaling of time necessary to replicate + the behaviour of a linearized schedule at coupling strength 1. + See: "Error Mitigation in Quantum Annealing" Args: J: Coupling strength between the spins of the ring. @@ -81,7 +82,7 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule=None, schedule """ if b is None: b = theoretical_kink_density_prefactor(J, schedule, schedule_name) - return np.power([1e-9 * t / b for t in annealing_times_ns], -0.5) / ( + return np.power([1e-9 * t * b for t in annealing_times_ns], -0.5) / ( 2 * np.pi * np.sqrt(2) ) From d95ce19006522160810fff5f3c61ff7d208ae4ea Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 16 Dec 2024 11:52:53 -0800 Subject: [PATCH 125/170] Added branching for schedule file inside submit_job. Changed environment variable to config file --- app.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/app.py b/app.py index 9a69de2..a2a0b8c 100644 --- a/app.py +++ b/app.py @@ -34,7 +34,7 @@ from helpers.qa import * from helpers.tooltips import tool_tips_demo1, tool_tips_demo2 -import plotly.graph_objects as go +import yaml app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) @@ -54,13 +54,16 @@ qpus = {} client = None init_job_status = "NO SOLVER" -if os.getenv("ZNE") == "YES": - qpus["Diffusion [Classical]"] = MockKibbleZurekSampler( - topology_type="pegasus", topology_shape=[16] - ) # Change sampler to mock - init_job_status = "READY" - if not client: - client = "dummy" +with open("config.yaml", "r") as file: + config = yaml.safe_load(file) + if config["ZNE"]: + qpus["Diffusion [Classical]"] = globals()[config["sampler"]["type"]]( + topology_type=config["sampler"]["topology_type"], + topology_shape=config["sampler"]["topology_shape"], + ) + init_job_status = config["init_job_status"] + if not client: + client = config["client"] tool_tips = tool_tips_demo1 def demo_layout(demo_type): @@ -526,8 +529,10 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca State("coupling_strength", "value"), State("anneal_duration", "value"), State("embeddings_cached", "data"), + State("url", "pathname"), + State("quench_schedule_filename", "children"), ) -def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): +def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached, pathname, filename): """Submit job and provide job ID.""" trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] @@ -562,9 +567,13 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached): ) # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes, shouldn't change for MockSampler. In which case recalculate as ta_multiplier=calc_lambda(coupling_strength, schedule, J_baseline=-1.8) as a function of the correct schedule # State("ta_multiplier", "value") ? Should recalculate when J or schedule changes IFF noise mitigation tab? - ta_multiplier = calc_lambda(J, schedule=None, J_baseline=J_baseline) + ta_multiplier = 1 + + if pathname == "/demo2": + ta_multiplier = calc_lambda(J, schedule_name=filename, J_baseline=J_baseline) + print(f'{ta_multiplier}: qpu_name') - + computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, From 44d9e8ebe71f262f33e12567ee214995f31f5626 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 16 Dec 2024 11:53:09 -0800 Subject: [PATCH 126/170] Added config file for mock sampler --- config.yaml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 config.yaml diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..038da13 --- /dev/null +++ b/config.yaml @@ -0,0 +1,7 @@ +ZNE: YES +sampler: + type: MockKibbleZurekSampler + topology_type: pegasus + topology_shape: [16] +init_job_status: READY +client: dummy From 37162b5a4629b7860c58de7c5fac99a1c430aa9f Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 16 Dec 2024 16:23:41 -0800 Subject: [PATCH 127/170] Added a warning modal when user submit job with classical sampler for the first time. --- app.py | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/app.py b/app.py index a2a0b8c..6fb4029 100644 --- a/app.py +++ b/app.py @@ -40,6 +40,7 @@ # global variable for a default J value J_baseline = -1.8 + # Initialize: available QPUs, initial progress-bar status try: client = Client.from_config(client="qpu") @@ -106,6 +107,7 @@ def demo_layout(demo_type): # store zero noise extrapolation dcc.Store(id="zne_estimates", data={}), dcc.Store(id="modal_trigger", data=False), + dcc.Store(id="initial_warning", data=False), dcc.Store(id="kz_data", data={}), dbc.Modal( [ @@ -117,6 +119,17 @@ def demo_layout(demo_type): id="error-modal", is_open=False, ), + dbc.Modal( + [ + dbc.ModalHeader(dbc.ModalTitle("Warning", style={"color": "orange", "fontWeight": "bold"})), + dbc.ModalBody( + "The Classical [diffusion] option executes a Markov Chain method locally for purposes of testing the demo interface. Kinks diffuse to annihilate, but are also created/destroyed by thermal fluctuations. The number of updates performed is set proportional to the annealing time. In the limit of no thermal noise, kinks diffuse to eliminate producing a power law, this process produces a power-law but for reasons independent of the Kibble-Zurek mechanism. In the noise mitigation demo we fit the impact of thermal fluctuations with a mixture of exponentials, by contrast with the quadratic fit appropriate to quantum dynamics.", + style={"color": "black", "fontSize": "16px"}, + ), + ], + id="warning-modal", + is_open=False, + ), ], fluid=True, ) @@ -183,7 +196,6 @@ def display_page(pathname): return demo_layout("Kibble-Zurek") - @app.callback( Output("solver_modal", "is_open"), Input("btn_simulate", "n_clicks"), @@ -523,6 +535,8 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca @app.callback( Output("job_id", "children"), + Output("initial_warning", "data"), + Output("warning-modal", "is_open"), Input("job_submit_time", "children"), State("qpu_selection", "value"), State("spins", "value"), @@ -531,8 +545,9 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca State("embeddings_cached", "data"), State("url", "pathname"), State("quench_schedule_filename", "children"), + State("initial_warning", "data") ) -def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached, pathname, filename): +def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached, pathname, filename, initial_warning): """Submit job and provide job ID.""" trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] @@ -558,7 +573,9 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached, pa sampleset = qpus["Diffusion [Classical]"].sample( bqm_embedded, annealing_time=annealing_time ) - return json.dumps(sampleset.to_serializable()) + if not initial_warning: + return json.dumps(sampleset.to_serializable()), True, True + return json.dumps(sampleset.to_serializable()), True, False else: @@ -584,7 +601,7 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached, pa label=f"Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}", ) - return computation.wait_id() + return computation.wait_id(), False, False return dash.no_update @@ -794,6 +811,5 @@ def toggle_modal(trigger, is_open): return True return is_open - if __name__ == "__main__": app.run_server(debug=True) From aefabd8f87e9ffcc2bd39f81ff222442f2f8e3bf Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Mon, 16 Dec 2024 16:48:16 -0800 Subject: [PATCH 128/170] Fixed schedule file branching, only pass in filename --- helpers/kz_calcs.py | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 079184b..c2df0d3 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -13,11 +13,12 @@ # limitations under the License. import numpy as np +import pandas as pd __all__ = ["kink_stats", "theoretical_kink_density_prefactor", "theoretical_kink_density", "calc_kappa", "calc_lambda"] -def theoretical_kink_density_prefactor(J, schedule=None, schedule_name=None): +def theoretical_kink_density_prefactor(J, schedule_name=None): """Time rescaling factor Calculate the rescaling of time necessary to replicate @@ -27,8 +28,6 @@ def theoretical_kink_density_prefactor(J, schedule=None, schedule_name=None): Args: J: Coupling strength between the spins of the ring. - schedule: Anneal schedule for the selected QPU. - schedule_name: Filename of anneal schedule. Used to compensate for schedule energy overestimate. @@ -38,11 +37,10 @@ def theoretical_kink_density_prefactor(J, schedule=None, schedule_name=None): # See the Code section of the README.md file for an explanation of the # following code. - if schedule is None: - if schedule_name: - schedule = pd.read_csv(f"helpers/{schedule_name}") - else: - schedule = pd.read_csv("helpers/FALLBACK_SCHEDULE.csv") + if schedule_name is None: + schedule = pd.read_csv("helpers/FALLBACK_SCHEDULE.csv") + else: + schedule = pd.read_csv(f"helpers/{schedule_name}") COMPENSATION_SCHEDULE_ENERGY = 0.8 if (schedule_name is not None and "Advantage_system" in schedule_name) else 1.0 @@ -72,8 +70,6 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule=None, schedule J: Coupling strength between the spins of the ring. - schedule: Anneal schedule for the selected QPU. - schedule_name: Filename of anneal schedule. Used to compensate for schedule energy overestimate. @@ -81,7 +77,7 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule=None, schedule Kink density per anneal time, as a NumPy array. """ if b is None: - b = theoretical_kink_density_prefactor(J, schedule, schedule_name) + b = theoretical_kink_density_prefactor(J, schedule_name) return np.power([1e-9 * t * b for t in annealing_times_ns], -0.5) / ( 2 * np.pi * np.sqrt(2) ) @@ -92,7 +88,7 @@ def calc_kappa(J, J_baseline=-1.8): See "Quantum error mitigation in quantum annealing" usage.""" return abs(J_baseline / J) -def calc_lambda(J, *, schedule=None, schedule_name=None, J_baseline=-1.8): +def calc_lambda(J, *, schedule_name=None, J_baseline=-1.8): """Time rescaling factor (relative to J_baseline) Rate through the transition is modified non-linearly by the @@ -100,13 +96,13 @@ def calc_lambda(J, *, schedule=None, schedule_name=None, J_baseline=-1.8): more slowly through the critical region, the ratio of timescales is > 1. See "Quantum error mitigation in quantum annealing" usage. """ - if schedule is None: + if schedule_name is None: # Fallback, assume ideal linear schedule kappa = calc_kappa(J, J_baseline) return kappa else: - b_ref = theoretical_kink_density_prefactor(J_baseline, schedule, schedule_name) - b = theoretical_kink_density_prefactor(J, schedule, schedule_name) + b_ref = theoretical_kink_density_prefactor(J_baseline, schedule_name) + b = theoretical_kink_density_prefactor(J, schedule_name) return b/b_ref From f1f197cf2221e3d50dfa9f5ab2f9ae82c7c5f407 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Tue, 17 Dec 2024 14:12:06 -0800 Subject: [PATCH 129/170] Fix another division instead of multiplication error --- helpers/kz_calcs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index c2df0d3..fb0ce53 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -104,7 +104,7 @@ def calc_lambda(J, *, schedule_name=None, J_baseline=-1.8): b_ref = theoretical_kink_density_prefactor(J_baseline, schedule_name) b = theoretical_kink_density_prefactor(J, schedule_name) - return b/b_ref + return b_ref/b def kink_stats(sampleset, J): """ From a668318be8ac6846dcb3c87a3a96ec57871bab9a Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 16:20:34 -0800 Subject: [PATCH 130/170] Update maxmimum anneal time for demo2 plot' y-axis --- app.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/app.py b/app.py index 6fb4029..5c97573 100644 --- a/app.py +++ b/app.py @@ -387,6 +387,9 @@ def display_graphics_kink_density( if pathname == "/demo2": + # update the maximum anneal time for zne demo + ta_max = 1280 + if ( trigger_id == "qpu_selection" or trigger_id == "spins" ): From c5a88b62098e9a068e4ea6040c66e8f27fae5033 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 16:29:05 -0800 Subject: [PATCH 131/170] Minor fix --- app.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index 5c97573..aa6f3ae 100644 --- a/app.py +++ b/app.py @@ -388,8 +388,8 @@ def display_graphics_kink_density( if pathname == "/demo2": # update the maximum anneal time for zne demo - ta_max = 1280 - + ta_max = 1400 + if ( trigger_id == "qpu_selection" or trigger_id == "spins" ): From 77a5d02f721ed37d18e265cc1b513e92ca3e0113 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 16:55:35 -0800 Subject: [PATCH 132/170] Quick fix to duplicate zne points --- helpers/plots.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 1f5c0f4..5e0b5d5 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -714,18 +714,18 @@ def plot_zne_fitted_line( y_axis = "y1" x_zne = float(ta_str) # for ta_str, a in zne_estimates.items(): - if y_func_x is not None: - fig.add_trace( - go.Scatter( - x=[x_zne], - y=[zne_estimates[ta_str]], - mode="markers", - name="ZNE Estimate", - legendgroup=f"ta_{ta_str}", - marker=dict(size=12, color="purple", symbol="diamond"), - showlegend=False, - xaxis=x_axis, - yaxis=y_axis, - ) - ) + # if y_func_x is not None: + # fig.add_trace( + # go.Scatter( + # x=[x_zne], + # y=[zne_estimates[ta_str]], + # mode="markers", + # name="ZNE Estimate", + # legendgroup=f"ta_{ta_str}", + # marker=dict(size=12, color="purple", symbol="diamond"), + # showlegend=False, + # xaxis=x_axis, + # yaxis=y_axis, + # ) + # ) return zne_estimates, modal_trigger From efc7b2971e2c53625d137062965ceba14585ec41 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 17:00:36 -0800 Subject: [PATCH 133/170] Minimize delay by adding an extra call to plot background function --- app.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/app.py b/app.py index aa6f3ae..ce421bc 100644 --- a/app.py +++ b/app.py @@ -453,6 +453,15 @@ def display_graphics_kink_density( fig, coupling_data, qpu_name, zne_estimates, graph_display, ta_str ) + fig = plot_kink_densities_bg( + graph_display, + [ta_min, ta_max], + J_baseline, + schedule_filename, + coupling_data, + zne_estimates, + url='Demo2' + ) return fig, coupling_data, zne_estimates, modal_trigger, kz_data else: From 47f38ebb1b04f5c41d43b24b1f2784b6e9e97e99 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 19:26:43 -0800 Subject: [PATCH 134/170] Minor fix for zne and fitting function display on demo2 --- app.py | 27 +++++++++++++++------------ helpers/plots.py | 33 +++++++++++++++++---------------- 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/app.py b/app.py index ce421bc..afac454 100644 --- a/app.py +++ b/app.py @@ -388,7 +388,7 @@ def display_graphics_kink_density( if pathname == "/demo2": # update the maximum anneal time for zne demo - ta_max = 1400 + ta_max = 1500 if ( trigger_id == "qpu_selection" or trigger_id == "spins" @@ -436,10 +436,11 @@ def display_graphics_kink_density( ) _, kink_density = kink_stats(sampleset_unembedded, J) + # Calculate kappa + kappa = calc_kappa(J, J_baseline) + fig = plot_kink_density(graph_display, figure, kink_density, ta, J) - # Calculate kappa - kappa = calc_kappa(J, J_baseline) # Initialize the list for this anneal_time if not present ta_str = str(ta) if ta_str not in coupling_data: @@ -453,15 +454,17 @@ def display_graphics_kink_density( fig, coupling_data, qpu_name, zne_estimates, graph_display, ta_str ) - fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], - J_baseline, - schedule_filename, - coupling_data, - zne_estimates, - url='Demo2' - ) + if graph_display == "kink_density": + fig = plot_kink_densities_bg( + graph_display, + [ta_min, ta_max], + J_baseline, + schedule_filename, + coupling_data, + zne_estimates, + url='Demo2' + ) + return fig, coupling_data, zne_estimates, modal_trigger, kz_data else: diff --git a/helpers/plots.py b/helpers/plots.py index 5e0b5d5..8e171a3 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -635,7 +635,7 @@ def plot_zne_fitted_line( fig (plotly.graph_objs.Figure): The existing Plotly figure to update. coupling_data (dict): A dictionary containing coupling-related data structured as {ta_str: [data_points]}, where each data point - includes "kappa" and "kink_density". + includes "lambda" and "kink_density". qpu_name (str): The name of the Quantum Processing Unit (QPU) used. Determines the fitting method. zne_estimates (dict): A dictionary to store Zero-Noise Extrapolation @@ -709,23 +709,24 @@ def plot_zne_fitted_line( yaxis=y_axis, ) ) + + fig.add_trace( + go.Scatter( + x=[x_zne], + y=[zne_estimates[ta_str]], + mode="markers", + name="ZNE Estimate", + legendgroup=f"ta_{ta_str}", + marker=dict(size=12, color="purple", symbol="diamond"), + showlegend=False, + xaxis=x_axis, + yaxis=y_axis, + ) + ) + else: x_axis = "x1" y_axis = "y1" x_zne = float(ta_str) - # for ta_str, a in zne_estimates.items(): - # if y_func_x is not None: - # fig.add_trace( - # go.Scatter( - # x=[x_zne], - # y=[zne_estimates[ta_str]], - # mode="markers", - # name="ZNE Estimate", - # legendgroup=f"ta_{ta_str}", - # marker=dict(size=12, color="purple", symbol="diamond"), - # showlegend=False, - # xaxis=x_axis, - # yaxis=y_axis, - # ) - # ) + return zne_estimates, modal_trigger From 03e789ae8719db91f1f730d7c8f0631f5fdae417 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 19:31:22 -0800 Subject: [PATCH 135/170] Changed kappa to lambda globally (use calc kappa only inside kz_calc script) --- app.py | 9 +++++---- helpers/plots.py | 16 ++++++++-------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/app.py b/app.py index afac454..d21da8f 100644 --- a/app.py +++ b/app.py @@ -436,10 +436,11 @@ def display_graphics_kink_density( ) _, kink_density = kink_stats(sampleset_unembedded, J) - # Calculate kappa - kappa = calc_kappa(J, J_baseline) + # Calculate lambda (previously kappa) + # Added _ to avoid keyword restriction + _lambda = calc_lambda(J=J, J_baseline=J_baseline) - fig = plot_kink_density(graph_display, figure, kink_density, ta, J) + fig = plot_kink_density(graph_display, figure, kink_density, ta, J, _lambda) # Initialize the list for this anneal_time if not present ta_str = str(ta) @@ -447,7 +448,7 @@ def display_graphics_kink_density( coupling_data[ta_str] = [] # Append the new data point coupling_data[ta_str].append( - {"kappa": kappa, "kink_density": kink_density, "coupling_strength": J} + {"lambda": _lambda, "kink_density": kink_density, "coupling_strength": J} ) zne_estimates, modal_trigger = plot_zne_fitted_line( diff --git a/helpers/plots.py b/helpers/plots.py index 8e171a3..b554f11 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -289,12 +289,12 @@ def plot_kink_densities_bg( ta_value = float(ta_str) color = ta_color_theme[ta_value] for point in data_points: - kappa = point["kappa"] + _lambda = point["lambda"] kink_density = point["kink_density"] if not label: fig_data.append( go.Scatter( - x=[kappa], + x=[_lambda], y=[kink_density], xaxis="x3", yaxis="y1", @@ -308,7 +308,7 @@ def plot_kink_densities_bg( else: fig_data.append( go.Scatter( - x=[kappa], + x=[_lambda], y=[kink_density], xaxis="x3", yaxis="y1", @@ -407,7 +407,7 @@ def plot_kink_densities_bg( return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, url=None): +def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, _lambda, url=None): """ Add a kink density marker from QPU samples to an existing plot. @@ -458,10 +458,10 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, url=None) if display == "coupling": color = ta_color_theme[ta_value] - kappa = -1.8 / J + #kappa = -1.8 / J fig.add_trace( go.Scatter( - x=[kappa], + x=[_lambda], y=[kink_density], xaxis="x3", yaxis="y1", @@ -654,7 +654,7 @@ def plot_zne_fitted_line( if len(coupling_data[ta_str]) > 2: data_points = coupling_data[ta_str] - x = np.array([point["kappa"] for point in data_points]) + x = np.array([point["lambda"] for point in data_points]) y = np.array([point["kink_density"] for point in data_points]) # Ensure there are enough unique x values for fitting @@ -723,7 +723,7 @@ def plot_zne_fitted_line( yaxis=y_axis, ) ) - + else: x_axis = "x1" y_axis = "y1" From dba6e511194eee11f4e387d718eed8f3543874d4 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 19:46:27 -0800 Subject: [PATCH 136/170] Minor adjustment to axixes --- helpers/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/plots.py b/helpers/plots.py index b554f11..54143c0 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -193,7 +193,7 @@ def plot_kink_densities_bg( type="linear", ) - x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[0, 3]) + x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[-1, 4]) if display == "kink_density": fig_layout = go.Layout( From 9d3d0f52acc554629dc1394850e5dc4e8a2cbcf7 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 19:56:28 -0800 Subject: [PATCH 137/170] Added persistent display of fitting curve for zero-noise demo plot2 --- app.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/app.py b/app.py index d21da8f..e9a2231 100644 --- a/app.py +++ b/app.py @@ -423,6 +423,11 @@ def display_graphics_kink_density( url="Demo2", ) + if graph_display == "coupling": + zne_estimates, modal_trigger = plot_zne_fitted_line( + fig, coupling_data, qpu_name, zne_estimates, graph_display, str(ta) + ) + return fig, coupling_data, zne_estimates, False, kz_data if trigger_id == "job_submit_state": From 19f608fd36f3285451c3bcb84e998ef6cea8bdc9 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 20:14:07 -0800 Subject: [PATCH 138/170] Added branching inside lambda function for classical sampler --- helpers/kz_calcs.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index fb0ce53..43ac24a 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -59,7 +59,7 @@ def theoretical_kink_density_prefactor(J, schedule_name=None): return b -def theoretical_kink_density(annealing_times_ns, J=None, schedule=None, schedule_name=None, b=None): +def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=None): """ Calculate the kink density as a function of anneal time @@ -88,7 +88,7 @@ def calc_kappa(J, J_baseline=-1.8): See "Quantum error mitigation in quantum annealing" usage.""" return abs(J_baseline / J) -def calc_lambda(J, *, schedule_name=None, J_baseline=-1.8): +def calc_lambda(J, *, qpu_name=None, schedule_name=None, J_baseline=-1.8): """Time rescaling factor (relative to J_baseline) Rate through the transition is modified non-linearly by the @@ -96,7 +96,7 @@ def calc_lambda(J, *, schedule_name=None, J_baseline=-1.8): more slowly through the critical region, the ratio of timescales is > 1. See "Quantum error mitigation in quantum annealing" usage. """ - if schedule_name is None: + if qpu_name == "Diffusion [Classical]": # Fallback, assume ideal linear schedule kappa = calc_kappa(J, J_baseline) return kappa From 2a0c2f05d82460e4c68b36935bb6323e4e206d45 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 20:34:14 -0800 Subject: [PATCH 139/170] Slight modification to config file --- app.py | 20 +++++++++----------- config.yaml | 10 +++------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/app.py b/app.py index e9a2231..c19afbb 100644 --- a/app.py +++ b/app.py @@ -38,8 +38,6 @@ app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) -# global variable for a default J value -J_baseline = -1.8 # Initialize: available QPUs, initial progress-bar status try: @@ -57,14 +55,14 @@ init_job_status = "NO SOLVER" with open("config.yaml", "r") as file: config = yaml.safe_load(file) - if config["ZNE"]: - qpus["Diffusion [Classical]"] = globals()[config["sampler"]["type"]]( - topology_type=config["sampler"]["topology_type"], - topology_shape=config["sampler"]["topology_shape"], - ) - init_job_status = config["init_job_status"] - if not client: - client = config["client"] + J_baseline = config["J_baseline"] + if config["use_classical"]: + qpus["Diffusion [Classical]"] = MockKibbleZurekSampler( + topology_type="pegasus", topology_shape=[16] + ) # Change sampler to mock + init_job_status = "READY" + if not client: + client = "dummy" tool_tips = tool_tips_demo1 def demo_layout(demo_type): @@ -443,7 +441,7 @@ def display_graphics_kink_density( # Calculate lambda (previously kappa) # Added _ to avoid keyword restriction - _lambda = calc_lambda(J=J, J_baseline=J_baseline) + _lambda = calc_lambda(J=J, qpu_name=qpu_name, J_baseline=J_baseline) fig = plot_kink_density(graph_display, figure, kink_density, ta, J, _lambda) diff --git a/config.yaml b/config.yaml index 038da13..8839269 100644 --- a/config.yaml +++ b/config.yaml @@ -1,7 +1,3 @@ -ZNE: YES -sampler: - type: MockKibbleZurekSampler - topology_type: pegasus - topology_shape: [16] -init_job_status: READY -client: dummy +use_classical: YES +J_baseline = -1.8 + From 0cafff0e35f01894de41eb1576a1817a72e882f6 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Tue, 17 Dec 2024 20:35:02 -0800 Subject: [PATCH 140/170] Applied black format --- app.py | 162 +++++++++++++++++++++++----------- helpers/kz_calcs.py | 22 ++++- helpers/layouts_cards.py | 48 +++++----- helpers/layouts_components.py | 62 ++++++------- helpers/plots.py | 80 +++++++++-------- helpers/qa.py | 3 +- helpers/tooltips.py | 29 +++--- 7 files changed, 241 insertions(+), 165 deletions(-) diff --git a/app.py b/app.py index c19afbb..fbc9084 100644 --- a/app.py +++ b/app.py @@ -58,13 +58,15 @@ J_baseline = config["J_baseline"] if config["use_classical"]: qpus["Diffusion [Classical]"] = MockKibbleZurekSampler( - topology_type="pegasus", topology_shape=[16] - ) # Change sampler to mock + topology_type="pegasus", topology_shape=[16] + ) # Change sampler to mock init_job_status = "READY" if not client: client = "dummy" tool_tips = tool_tips_demo1 + + def demo_layout(demo_type): if demo_type == "Kibble-Zurek": @@ -78,7 +80,11 @@ def demo_layout(demo_type): [ dbc.Col( # Left: control panel [ - control_card(solvers=qpus, init_job_status=init_job_status, demo_type=demo_type), + control_card( + solvers=qpus, + init_job_status=init_job_status, + demo_type=demo_type, + ), *dbc_modal("modal_solver"), *[ dbc.Tooltip( @@ -105,7 +111,7 @@ def demo_layout(demo_type): # store zero noise extrapolation dcc.Store(id="zne_estimates", data={}), dcc.Store(id="modal_trigger", data=False), - dcc.Store(id="initial_warning", data=False), + dcc.Store(id="initial_warning", data=False), dcc.Store(id="kz_data", data={}), dbc.Modal( [ @@ -119,10 +125,14 @@ def demo_layout(demo_type): ), dbc.Modal( [ - dbc.ModalHeader(dbc.ModalTitle("Warning", style={"color": "orange", "fontWeight": "bold"})), + dbc.ModalHeader( + dbc.ModalTitle( + "Warning", style={"color": "orange", "fontWeight": "bold"} + ) + ), dbc.ModalBody( "The Classical [diffusion] option executes a Markov Chain method locally for purposes of testing the demo interface. Kinks diffuse to annihilate, but are also created/destroyed by thermal fluctuations. The number of updates performed is set proportional to the annealing time. In the limit of no thermal noise, kinks diffuse to eliminate producing a power law, this process produces a power-law but for reasons independent of the Kibble-Zurek mechanism. In the noise mitigation demo we fit the impact of thermal fluctuations with a mixture of exponentials, by contrast with the quadratic fit appropriate to quantum dynamics.", - style={"color": "black", "fontSize": "16px"}, + style={"color": "black", "fontSize": "16px"}, ), ], id="warning-modal", @@ -132,6 +142,7 @@ def demo_layout(demo_type): fluid=True, ) + # Define the Navbar with two tabs navbar = dbc.Navbar( dbc.Container( @@ -147,12 +158,21 @@ def demo_layout(demo_type): ], href="/demo1", # Default route ), - # Navbar Tabs dbc.Nav( [ - dbc.NavItem(dbc.NavLink("Kibble-Zurek Mechanism", href="/demo1", active="exact")), - dbc.NavItem(dbc.NavLink("Kibble-Zurek Mechanism with Noise Mitigation", href="/demo2", active="exact")), + dbc.NavItem( + dbc.NavLink( + "Kibble-Zurek Mechanism", href="/demo1", active="exact" + ) + ), + dbc.NavItem( + dbc.NavLink( + "Kibble-Zurek Mechanism with Noise Mitigation", + href="/demo2", + active="exact", + ) + ), ], pills=True, ), @@ -167,7 +187,9 @@ def demo_layout(demo_type): [ dcc.Location(id="url", refresh=False), # Tracks the URL navbar, # Includes the Navbar at the top - html.Div(id="page-content", style={"paddingTop": "20px"}), # Dynamic page content + html.Div( + id="page-content", style={"paddingTop": "20px"} + ), # Dynamic page content ], fluid=True, ) @@ -177,18 +199,16 @@ def demo_layout(demo_type): # Callbacks Section -@app.callback( - Output("page-content", "children"), - Input("url", "pathname") -) + +@app.callback(Output("page-content", "children"), Input("url", "pathname")) def display_page(pathname): # If the user goes to the "/demo1" route if pathname == "/demo1": - + return demo_layout("Kibble-Zurek") # If the user goes to the "/demo2" route elif pathname == "/demo2": - + return demo_layout("Zero-Noise") else: return demo_layout("Kibble-Zurek") @@ -345,13 +365,13 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output("modal_trigger", "data"), Output("kz_data", "data"), Input("qpu_selection", "value"), - #Input("zne_graph_display", "value"), + # Input("zne_graph_display", "value"), Input("graph_display", "value"), Input("coupling_strength", "value"), # previously input Input("quench_schedule_filename", "children"), Input("job_submit_state", "children"), Input("job_id", "children"), - #Input("anneal_duration_zne", "value"), + # Input("anneal_duration_zne", "value"), Input("anneal_duration", "value"), Input("spins", "value"), Input("url", "pathname"), @@ -359,7 +379,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): State("sample_vs_theory", "figure"), State("coupling_data", "data"), # access previously stored data State("zne_estimates", "data"), # Access ZNE estimates - State("kz_data", "data") # get kibble zurek data point + State("kz_data", "data"), # get kibble zurek data point ) def display_graphics_kink_density( qpu_name, @@ -388,9 +408,7 @@ def display_graphics_kink_density( # update the maximum anneal time for zne demo ta_max = 1500 - if ( - trigger_id == "qpu_selection" or trigger_id == "spins" - ): + if trigger_id == "qpu_selection" or trigger_id == "spins": coupling_data = {} zne_estimates = {} fig = plot_kink_densities_bg( @@ -440,10 +458,12 @@ def display_graphics_kink_density( _, kink_density = kink_stats(sampleset_unembedded, J) # Calculate lambda (previously kappa) - # Added _ to avoid keyword restriction + # Added _ to avoid keyword restriction _lambda = calc_lambda(J=J, qpu_name=qpu_name, J_baseline=J_baseline) - fig = plot_kink_density(graph_display, figure, kink_density, ta, J, _lambda) + fig = plot_kink_density( + graph_display, figure, kink_density, ta, J, _lambda + ) # Initialize the list for this anneal_time if not present ta_str = str(ta) @@ -451,7 +471,11 @@ def display_graphics_kink_density( coupling_data[ta_str] = [] # Append the new data point coupling_data[ta_str].append( - {"lambda": _lambda, "kink_density": kink_density, "coupling_strength": J} + { + "lambda": _lambda, + "kink_density": kink_density, + "coupling_strength": J, + } ) zne_estimates, modal_trigger = plot_zne_fitted_line( @@ -466,7 +490,7 @@ def display_graphics_kink_density( schedule_filename, coupling_data, zne_estimates, - url='Demo2' + url="Demo2", ) return fig, coupling_data, zne_estimates, modal_trigger, kz_data @@ -482,40 +506,64 @@ def display_graphics_kink_density( schedule_filename, coupling_data, zne_estimates, - url='Demo2' + url="Demo2", ) return fig, coupling_data, zne_estimates, False, kz_data else: - if trigger_id == "qpu_selection" or trigger_id == "spins" or trigger_id == "coupling_strength": - - kz_data = {"k":[]} - fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J, schedule_filename, coupling_data, zne_estimates, kz_data=kz_data, url="Demo1") + if ( + trigger_id == "qpu_selection" + or trigger_id == "spins" + or trigger_id == "coupling_strength" + ): + + kz_data = {"k": []} + fig = plot_kink_densities_bg( + graph_display, + [ta_min, ta_max], + J, + schedule_filename, + coupling_data, + zne_estimates, + kz_data=kz_data, + url="Demo1", + ) return fig, coupling_data, zne_estimates, False, kz_data - - if trigger_id == 'job_submit_state': - if job_submit_state == 'COMPLETED': + if trigger_id == "job_submit_state": + + if job_submit_state == "COMPLETED": embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) - sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) + sampleset_unembedded = get_samples( + client, job_id, spins, J, embeddings_cached[spins] + ) _, kink_density = kink_stats(sampleset_unembedded, J) - - + # Append the new data point - kz_data["k"].append( - (kink_density, ta) + kz_data["k"].append((kink_density, ta)) + fig = plot_kink_density( + graph_display, figure, kink_density, ta, J, url="Demo1" ) - fig = plot_kink_density(graph_display, figure, kink_density, ta, J, url="Demo1") return fig, coupling_data, zne_estimates, False, kz_data - + else: return dash.no_update - - fig = plot_kink_densities_bg(graph_display, [ta_min, ta_max], J, schedule_filename, coupling_data, zne_estimates, kz_data, url="Demo1") + + fig = plot_kink_densities_bg( + graph_display, + [ta_min, ta_max], + J, + schedule_filename, + coupling_data, + zne_estimates, + kz_data, + url="Demo1", + ) return fig, coupling_data, zne_estimates, False, kz_data + @app.callback( Output("spin_orientation", "figure"), Input("spins", "value"), @@ -564,10 +612,19 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca State("embeddings_cached", "data"), State("url", "pathname"), State("quench_schedule_filename", "children"), - State("initial_warning", "data") + State("initial_warning", "data"), ) -def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached, pathname, filename, initial_warning): - +def submit_job( + job_submit_time, + qpu_name, + spins, + J, + ta_ns, + embeddings_cached, + pathname, + filename, + initial_warning, +): """Submit job and provide job ID.""" trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] @@ -579,7 +636,7 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached, pa embeddings_cached = json_to_dict(embeddings_cached) embedding = embeddings_cached[spins] - annealing_time = (ta_ns / 1000) + annealing_time = ta_ns / 1000 if qpu_name == "Diffusion [Classical]": @@ -606,14 +663,16 @@ def submit_job(job_submit_time, qpu_name, spins, J, ta_ns, embeddings_cached, pa ta_multiplier = 1 if pathname == "/demo2": - ta_multiplier = calc_lambda(J, schedule_name=filename, J_baseline=J_baseline) + ta_multiplier = calc_lambda( + J, schedule_name=filename, J_baseline=J_baseline + ) - print(f'{ta_multiplier}: qpu_name') + print(f"{ta_multiplier}: qpu_name") computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, - annealing_time=annealing_time*ta_multiplier, + annealing_time=annealing_time * ta_multiplier, auto_scale=False, answer_mode="raw", # Easier than accounting for num_occurrences num_reads=100, @@ -792,7 +851,7 @@ def activate_tooltips(tooltips_show): trigger = dash.callback_context.triggered trigger_id = trigger[0]["prop_id"].split(".")[0] - + if trigger_id == "tooltips_show": if tooltips_show == "off": return ( @@ -830,5 +889,6 @@ def toggle_modal(trigger, is_open): return True return is_open + if __name__ == "__main__": app.run_server(debug=True) diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 43ac24a..650f016 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -15,7 +15,13 @@ import numpy as np import pandas as pd -__all__ = ["kink_stats", "theoretical_kink_density_prefactor", "theoretical_kink_density", "calc_kappa", "calc_lambda"] +__all__ = [ + "kink_stats", + "theoretical_kink_density_prefactor", + "theoretical_kink_density", + "calc_kappa", + "calc_lambda", +] def theoretical_kink_density_prefactor(J, schedule_name=None): @@ -42,7 +48,11 @@ def theoretical_kink_density_prefactor(J, schedule_name=None): else: schedule = pd.read_csv(f"helpers/{schedule_name}") - COMPENSATION_SCHEDULE_ENERGY = 0.8 if (schedule_name is not None and "Advantage_system" in schedule_name) else 1.0 + COMPENSATION_SCHEDULE_ENERGY = ( + 0.8 + if (schedule_name is not None and "Advantage_system" in schedule_name) + else 1.0 + ) A = COMPENSATION_SCHEDULE_ENERGY * schedule["A(s) (GHz)"] B = COMPENSATION_SCHEDULE_ENERGY * schedule["B(s) (GHz)"] @@ -59,6 +69,7 @@ def theoretical_kink_density_prefactor(J, schedule_name=None): return b + def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=None): """ Calculate the kink density as a function of anneal time @@ -70,7 +81,7 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=N J: Coupling strength between the spins of the ring. - schedule_name: Filename of anneal schedule. Used to compensate for + schedule_name: Filename of anneal schedule. Used to compensate for schedule energy overestimate. Returns: @@ -82,12 +93,14 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=N 2 * np.pi * np.sqrt(2) ) + def calc_kappa(J, J_baseline=-1.8): """Coupling ratio See "Quantum error mitigation in quantum annealing" usage.""" return abs(J_baseline / J) + def calc_lambda(J, *, qpu_name=None, schedule_name=None, J_baseline=-1.8): """Time rescaling factor (relative to J_baseline) @@ -104,7 +117,8 @@ def calc_lambda(J, *, qpu_name=None, schedule_name=None, J_baseline=-1.8): b_ref = theoretical_kink_density_prefactor(J_baseline, schedule_name) b = theoretical_kink_density_prefactor(J, schedule_name) - return b_ref/b + return b_ref / b + def kink_stats(sampleset, J): """ diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index 0ff36d5..ee68500 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -27,7 +27,7 @@ control_header_style = {"color": "rgb(3, 184, 255)", "marginTop": "10px"} -def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek"): +def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek"): """Lay out the configuration and job-submission card. Args: @@ -46,39 +46,39 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek") else: job_status_color = "white" - if demo_type == 'Kibble-Zurek': - demo_title = 'Coherent Annealing: KZ Simulation' + if demo_type == "Kibble-Zurek": + demo_title = "Coherent Annealing: KZ Simulation" demo_description = html.P( - """ + """ Use a quantum computer to simulate the formation of topological defects in a 1D ring of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. - """, - style={'color': 'white', 'fontSize': 14}) + """, + style={"color": "white", "fontSize": 14}, + ) demo_anneal_duration = get_config_anneal_duration(demo_type) - + else: demo_title = "Coherent Annealing: Zero-Noise Extrapolation" demo_description = html.P( - [ - """ + [ + """ Simulate zero-temperature and zero-time extrapolations on a quantum computer using the Kibble-Zurek mechanism. Fitting occurs once three or more data points are plotted, with -1.8 representing the highest energy scale corresponding to the lowest noise level. Learn more in the """, - html.A( - "paper", - href="https://arxiv.org/abs/2311.01306", # Replace with the actual URL - target="_blank", # Opens the link in a new tab - style={ - "color": "rgb(3, 184, 255)", - "textDecoration": "none", - }, # Optional styling - ), - ".", - ], - style={"color": "white", "fontSize": 14}, - ) + html.A( + "paper", + href="https://arxiv.org/abs/2311.01306", # Replace with the actual URL + target="_blank", # Opens the link in a new tab + style={ + "color": "rgb(3, 184, 255)", + "textDecoration": "none", + }, # Optional styling + ), + ".", + ], + style={"color": "white", "fontSize": 14}, + ) demo_anneal_duration = get_config_anneal_duration(demo_type) - - + return dbc.Card( [ dbc.Row( diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index aab6b05..27c99df 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -31,6 +31,7 @@ ring_lengths = [512, 1024, 2048] + def get_config_anneal_duration(demo_type): if demo_type == "Zero-Noise": return dcc.Dropdown( @@ -52,38 +53,32 @@ def get_config_anneal_duration(demo_type): else: return dbc.Input( id="anneal_duration", - type='number', + type="number", min=5, max=100, step=1, value=7, - style={'max-width': '95%'} + style={"max-width": "95%"}, ) + def get_config_kz_graph(demo_type): if demo_type == "Kibble-Zurek": return RadioItems( id="graph_display", options=[ - { - 'label': 'Both', - 'value': 'both', - 'disabled': False - }, - { - 'label': 'Kink density', - 'value': 'kink_density', - 'disabled': False - }, - { - 'label': 'Schedule', - 'value': 'schedule', - 'disabled': False - }, + {"label": "Both", "value": "both", "disabled": False}, + {"label": "Kink density", "value": "kink_density", "disabled": False}, + {"label": "Schedule", "value": "schedule", "disabled": False}, ], - value='both', - inputStyle={'margin-right': '10px', 'margin-bottom': '5px'}, - labelStyle={'color': 'rgb(3, 184, 255)', 'font-size': 12, 'display': 'inline-block', 'marginLeft': 20}, + value="both", + inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, + labelStyle={ + "color": "rgb(3, 184, 255)", + "font-size": 12, + "display": "inline-block", + "marginLeft": 20, + }, inline=True, ) else: @@ -112,6 +107,7 @@ def get_config_kz_graph(demo_type): inline=True, ) + config_spins = RadioItems( id="spins", options=[ @@ -147,11 +143,13 @@ def get_config_kz_graph(demo_type): for val in range(2, 10, 2) } ) -#Dash Slider has some issue with int values having a zero after the decimal point +# Dash Slider has some issue with int values having a zero after the decimal point j_marks[-2] = {"label": "-2", "style": {"color": "white"}} del j_marks[-1.0] j_marks[-1] = {"label": "-1", "style": {"color": "white"}} j_marks[1] = {"label": "1", "style": {"color": "white"}} + + def get_config_coupling_strength(demo_type): if demo_type == "Zero-Noise": return dbc.Row( @@ -166,26 +164,30 @@ def get_config_coupling_strength(demo_type): min=-1.8, max=-0.6, step=None, - tooltip={"placement": "bottom", "always_visible": True} + tooltip={"placement": "bottom", "always_visible": True}, ) ] ), ), ] ) - return dbc.Row([ - dbc.Col( - html.Div([ + return dbc.Row( + [ + dbc.Col( + html.Div( + [ Slider( - id='coupling_strength', + id="coupling_strength", value=-1.4, marks=j_marks, step=None, - tooltip={"placement": "bottom", "always_visible": True} + tooltip={"placement": "bottom", "always_visible": True}, ) - ]), - ), - ]) + ] + ), + ), + ] + ) def config_qpu_selection(solvers, default="Diffusion [Classical]"): diff --git a/helpers/plots.py b/helpers/plots.py index 54143c0..378f817 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -59,7 +59,14 @@ def plot_kink_densities_bg( - display, time_range, J_base, schedule_name, coupling_data, zne_estimates, kz_data=None, url=None + display, + time_range, + J_base, + schedule_name, + coupling_data, + zne_estimates, + kz_data=None, + url=None, ): """ Plot the background of theoretical kink density and QPU energy scales. @@ -200,7 +207,7 @@ def plot_kink_densities_bg( xaxis=x_axis1, yaxis=y_axis1, ) - if url == 'Demo2': + if url == "Demo2": _coupling_label = { -1.8: False, -1.6: False, @@ -256,16 +263,16 @@ def plot_kink_densities_bg( for pair in kz_data["k"]: fig_data.append( go.Scatter( - x=[pair[1]], + x=[pair[1]], y=[pair[0]], mode="markers", marker=dict(size=10, color="black", symbol="x"), xaxis="x1", yaxis="y1", - showlegend=False + showlegend=False, ) ) - + elif display == "schedule": fig_layout = go.Layout( @@ -344,32 +351,32 @@ def plot_kink_densities_bg( # marker=dict(size=10, color=color, symbol="x"), # ) # ) - # Plot ZNE estimates - # for ta_str, a in zne_estimates.items(): - # fig_data.append( - # go.Scatter( - # x=[ta_str], - # y=[a], - # mode="markers", - # name="ZNE Estimate", - # marker=dict(size=12, color="purple", symbol="diamond"), - # showlegend=False, - # xaxis="x1", - # yaxis="y1", - # ) - # ) + # Plot ZNE estimates + # for ta_str, a in zne_estimates.items(): + # fig_data.append( + # go.Scatter( + # x=[ta_str], + # y=[a], + # mode="markers", + # name="ZNE Estimate", + # marker=dict(size=12, color="purple", symbol="diamond"), + # showlegend=False, + # xaxis="x1", + # yaxis="y1", + # ) + # ) # Add previously computed kz_data points if kz_data is not None and "k" in kz_data: for pair in kz_data["k"]: fig_data.append( go.Scatter( - x=[pair[1]], + x=[pair[1]], y=[pair[0]], mode="markers", marker=dict(size=10, color="black", symbol="x"), xaxis="x1", yaxis="y1", - showlegend=False + showlegend=False, ) ) fig = go.Figure(data=fig_data, layout=fig_layout) @@ -407,7 +414,9 @@ def plot_kink_densities_bg( return fig -def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, _lambda, url=None): +def plot_kink_density( + display, fig_dict, kink_density, anneal_time, J, _lambda, url=None +): """ Add a kink density marker from QPU samples to an existing plot. @@ -439,26 +448,27 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, _lambda, if url == "Demo1": fig.add_trace( - go.Scatter( - x=[anneal_time], - y=[kink_density], - xaxis='x1', - yaxis='y1', + go.Scatter( + x=[anneal_time], + y=[kink_density], + xaxis="x1", + yaxis="y1", showlegend=False, - marker=dict(size=10, - color='black', - symbol='x', - ) + marker=dict( + size=10, + color="black", + symbol="x", + ), ) ) return fig - + ta_value = float(anneal_time) if display == "coupling": color = ta_color_theme[ta_value] - #kappa = -1.8 / J + # kappa = -1.8 / J fig.add_trace( go.Scatter( x=[_lambda], @@ -489,7 +499,7 @@ def plot_kink_density(display, fig_dict, kink_density, anneal_time, J, _lambda, coupling_label[J] = True else: legend = False - + fig.add_trace( go.Scatter( x=[anneal_time], @@ -651,7 +661,7 @@ def plot_zne_fitted_line( due to ill conditioned data for fitting. """ modal_trigger = False - if len(coupling_data[ta_str]) > 2: + if len(coupling_data) > 0 and len(coupling_data[ta_str]) > 2: data_points = coupling_data[ta_str] x = np.array([point["lambda"] for point in data_points]) diff --git a/helpers/qa.py b/helpers/qa.py index 061684d..3a7f1c7 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -32,7 +32,6 @@ ] - def create_bqm(num_spins=512, coupling_strength=-1.4): """ Create a binary quadratic model (BQM) representing a magnetic 1D ring. @@ -218,7 +217,7 @@ def mixture_of_exponentials(x, p_0, p_1, p_2): f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0 ) except: - return None + return None def y_func_x(x): return mixture_of_exponentials(x, *p) diff --git a/helpers/tooltips.py b/helpers/tooltips.py index 920ce0a..8e4eb51 100644 --- a/helpers/tooltips.py +++ b/helpers/tooltips.py @@ -36,33 +36,24 @@ } tool_tips_demo1 = { - "anneal_duration": -f"""Duration of the quantum anneal. Range of 5 to 100 nanoseconds.""", - "graph_display": -f"""Plot selection: Kibble-Zurek prediction and/or QPU energies (either separate or combined).""", - "spins": -f"""Number of spins in the 1D ring.""", - "coupling_strength": -f"""Coupling strength, J, between spins in the ring. + "anneal_duration": f"""Duration of the quantum anneal. Range of 5 to 100 nanoseconds.""", + "graph_display": f"""Plot selection: Kibble-Zurek prediction and/or QPU energies (either separate or combined).""", + "spins": f"""Number of spins in the 1D ring.""", + "coupling_strength": f"""Coupling strength, J, between spins in the ring. Range of -2 (ferromagnetic) to +1 (anti-ferromagnetic). """, - "qpu_selection": -f"""Selection from quantum computers available to your account/project token.""", - "embedding_is_cached": -f"""Whether or not a minor-embedding is cached for the selected QPU, for each + "qpu_selection": f"""Selection from quantum computers available to your account/project token.""", + "embedding_is_cached": f"""Whether or not a minor-embedding is cached for the selected QPU, for each of the available number of spins. If not available, an attempt is made to find an embedding the first time you submit a problem. """, - "btn_simulate": -f"""Click to (minor-embed if a cached embedding is unavailable) and + "btn_simulate": f"""Click to (minor-embed if a cached embedding is unavailable) and submit the problem to your selected QPU. """, - "quench_schedule_filename": -f"""CSV file with the fast-anneal schedule for the selected quantum computer. + "quench_schedule_filename": f"""CSV file with the fast-anneal schedule for the selected quantum computer. If none exists, uses one from a different quantum computer (expect inaccuracies). You can download schedules from https://docs.dwavesys.com/docs/latest/doc_physical_properties.html """, - "job_submit_state": -f"""Status of the last submission to the quantum computer (or initial state).""", -} \ No newline at end of file + "job_submit_state": f"""Status of the last submission to the quantum computer (or initial state).""", +} From 113d495ef4ec64d2653c093a0c96fefa46abd974 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Wed, 18 Dec 2024 12:29:08 -0800 Subject: [PATCH 141/170] Update yaml --- config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index 8839269..334137d 100644 --- a/config.yaml +++ b/config.yaml @@ -1,3 +1,3 @@ use_classical: YES -J_baseline = -1.8 +J_baseline: -1.8 From 933dffacebec3154f2d0e1fb17f776c11bf9823b Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Wed, 18 Dec 2024 15:34:49 -0800 Subject: [PATCH 142/170] Minor fix on theoretical calc parameter --- app.py | 2 +- helpers/plots.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index fbc9084..cc85b9b 100644 --- a/app.py +++ b/app.py @@ -459,7 +459,7 @@ def display_graphics_kink_density( # Calculate lambda (previously kappa) # Added _ to avoid keyword restriction - _lambda = calc_lambda(J=J, qpu_name=qpu_name, J_baseline=J_baseline) + _lambda = calc_lambda(J=J, qpu_name=qpu_name, schedule_name=schedule_filename, J_baseline=J_baseline) fig = plot_kink_density( graph_display, figure, kink_density, ta, J, _lambda diff --git a/helpers/plots.py b/helpers/plots.py index 378f817..90535a8 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -111,7 +111,7 @@ def plot_kink_densities_bg( A_joule = A_ghz / 1.5092e24 B_joule = B_ghz / 1.5092e24 - n = theoretical_kink_density(time_range, J_base, schedule, schedule_name) + n = theoretical_kink_density(time_range, J_base, schedule_name) predicted_plus = go.Scatter( x=np.asarray(time_range), From 0e3aef6a60f235ef0b4f780c4bd7cdb8b2479cb1 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Thu, 19 Dec 2024 22:47:08 -0800 Subject: [PATCH 143/170] Removed unused code --- helpers/plots.py | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 90535a8..7cfeae3 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -337,34 +337,7 @@ def plot_kink_densities_bg( ) fig_data = [predicted_plus, predicted_minus, energy_transverse, energy_problem] - # for ta_str, data_points in coupling_data.items(): - # for point in data_points: - # color = "black" - # kink_density = point["kink_density"] - # fig_data.append( - # go.Scatter( - # x=[ta_str], - # y=[kink_density], - # xaxis="x1", - # yaxis="y1", - # showlegend=False, - # marker=dict(size=10, color=color, symbol="x"), - # ) - # ) - # Plot ZNE estimates - # for ta_str, a in zne_estimates.items(): - # fig_data.append( - # go.Scatter( - # x=[ta_str], - # y=[a], - # mode="markers", - # name="ZNE Estimate", - # marker=dict(size=12, color="purple", symbol="diamond"), - # showlegend=False, - # xaxis="x1", - # yaxis="y1", - # ) - # ) + # Add previously computed kz_data points if kz_data is not None and "k" in kz_data: for pair in kz_data["k"]: From d637422c7e276df8b421160af796c1877c53abc0 Mon Sep 17 00:00:00 2001 From: Andy Zhang Date: Fri, 20 Dec 2024 14:51:03 -0800 Subject: [PATCH 144/170] Minor bugfix and variable name adjustments --- app.py | 15 ++++++++------- helpers/kz_calcs.py | 5 +++-- helpers/plots.py | 12 ++++++------ 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/app.py b/app.py index cc85b9b..f557101 100644 --- a/app.py +++ b/app.py @@ -53,17 +53,18 @@ qpus = {} client = None init_job_status = "NO SOLVER" +# Load base coupling strength and user configuration for mock sampler with open("config.yaml", "r") as file: config = yaml.safe_load(file) J_baseline = config["J_baseline"] if config["use_classical"]: qpus["Diffusion [Classical]"] = MockKibbleZurekSampler( topology_type="pegasus", topology_shape=[16] - ) # Change sampler to mock + ) init_job_status = "READY" if not client: client = "dummy" - +# Demo defaults to use tool_tips = tool_tips_demo1 @@ -459,10 +460,10 @@ def display_graphics_kink_density( # Calculate lambda (previously kappa) # Added _ to avoid keyword restriction - _lambda = calc_lambda(J=J, qpu_name=qpu_name, schedule_name=schedule_filename, J_baseline=J_baseline) + lambda_ = calclambda_(J=J, qpu_name=qpu_name, schedule_name=schedule_filename, J_baseline=J_baseline) fig = plot_kink_density( - graph_display, figure, kink_density, ta, J, _lambda + graph_display, figure, kink_density, ta, J, lambda_ ) # Initialize the list for this anneal_time if not present @@ -472,7 +473,7 @@ def display_graphics_kink_density( # Append the new data point coupling_data[ta_str].append( { - "lambda": _lambda, + "lambda": lambda_, "kink_density": kink_density, "coupling_strength": J, } @@ -658,12 +659,12 @@ def submit_job( bqm_embedded = embed_bqm( bqm, embedding, DWaveSampler(solver=solver.name).adjacency ) - # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes, shouldn't change for MockSampler. In which case recalculate as ta_multiplier=calc_lambda(coupling_strength, schedule, J_baseline=-1.8) as a function of the correct schedule + # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes, shouldn't change for MockSampler. In which case recalculate as ta_multiplier=calclambda_(coupling_strength, schedule, J_baseline=-1.8) as a function of the correct schedule # State("ta_multiplier", "value") ? Should recalculate when J or schedule changes IFF noise mitigation tab? ta_multiplier = 1 if pathname == "/demo2": - ta_multiplier = calc_lambda( + ta_multiplier = calclambda_( J, schedule_name=filename, J_baseline=J_baseline ) diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 650f016..818e4bd 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -20,7 +20,7 @@ "theoretical_kink_density_prefactor", "theoretical_kink_density", "calc_kappa", - "calc_lambda", + "calclambda_", ] @@ -89,6 +89,7 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=N """ if b is None: b = theoretical_kink_density_prefactor(J, schedule_name) + return np.power([1e-9 * t * b for t in annealing_times_ns], -0.5) / ( 2 * np.pi * np.sqrt(2) ) @@ -101,7 +102,7 @@ def calc_kappa(J, J_baseline=-1.8): return abs(J_baseline / J) -def calc_lambda(J, *, qpu_name=None, schedule_name=None, J_baseline=-1.8): +def calclambda_(J, *, qpu_name=None, schedule_name=None, J_baseline=-1.8): """Time rescaling factor (relative to J_baseline) Rate through the transition is modified non-linearly by the diff --git a/helpers/plots.py b/helpers/plots.py index 7cfeae3..1318456 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -296,12 +296,12 @@ def plot_kink_densities_bg( ta_value = float(ta_str) color = ta_color_theme[ta_value] for point in data_points: - _lambda = point["lambda"] + lambda_ = point["lambda"] kink_density = point["kink_density"] if not label: fig_data.append( go.Scatter( - x=[_lambda], + x=[lambda_], y=[kink_density], xaxis="x3", yaxis="y1", @@ -315,7 +315,7 @@ def plot_kink_densities_bg( else: fig_data.append( go.Scatter( - x=[_lambda], + x=[lambda_], y=[kink_density], xaxis="x3", yaxis="y1", @@ -388,7 +388,7 @@ def plot_kink_densities_bg( def plot_kink_density( - display, fig_dict, kink_density, anneal_time, J, _lambda, url=None + display, fig_dict, kink_density, anneal_time, J, lambda_=None, url=None ): """ Add a kink density marker from QPU samples to an existing plot. @@ -444,7 +444,7 @@ def plot_kink_density( # kappa = -1.8 / J fig.add_trace( go.Scatter( - x=[_lambda], + x=[lambda_], y=[kink_density], xaxis="x3", yaxis="y1", @@ -634,7 +634,7 @@ def plot_zne_fitted_line( due to ill conditioned data for fitting. """ modal_trigger = False - if len(coupling_data) > 0 and len(coupling_data[ta_str]) > 2: + if ta_str in coupling_data.keys() and len(coupling_data[ta_str]) > 2: data_points = coupling_data[ta_str] x = np.array([point["lambda"] for point in data_points]) From c511dac3bd3da5951200185637b3da10f42f4c49 Mon Sep 17 00:00:00 2001 From: Jack Raymond <10591246+jackraymond@users.noreply.github.com> Date: Fri, 20 Dec 2024 16:19:03 -0800 Subject: [PATCH 145/170] Remove debugging print statement --- app.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/app.py b/app.py index f557101..8fdf6da 100644 --- a/app.py +++ b/app.py @@ -668,8 +668,6 @@ def submit_job( J, schedule_name=filename, J_baseline=J_baseline ) - print(f"{ta_multiplier}: qpu_name") - computation = solver.sample_bqm( bqm=bqm_embedded, fast_anneal=True, From d972161555c8637147d7a5eb9c481d55866e6124 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Wed, 8 Jan 2025 16:16:00 -0800 Subject: [PATCH 146/170] Minor code refactor and clean up --- README.md | 14 +- app.py | 837 ++++++++---------- assets/custom.css | 39 +- config.yaml | 3 - demo_configs.py | 34 + helpers/kz_calcs.py | 48 +- helpers/layouts_cards.py | 317 ++----- helpers/layouts_components.py | 182 ++-- helpers/plots.py | 67 +- helpers/qa.py | 52 +- ...ibbleZurekSampler.py => mock_kz_sampler.py | 25 +- src/demo_enums.py | 28 + tests/test_cb_activate_tooltips.py | 41 - 13 files changed, 736 insertions(+), 951 deletions(-) delete mode 100644 config.yaml create mode 100644 demo_configs.py rename MockKibbleZurekSampler.py => mock_kz_sampler.py (76%) create mode 100644 src/demo_enums.py delete mode 100644 tests/test_cb_activate_tooltips.py diff --git a/README.md b/README.md index af96dd3..379aabd 100644 --- a/README.md +++ b/README.md @@ -118,7 +118,9 @@ Your development environment should be configured to You can see information about supported IDEs and authorizing access to your Leap account [here](https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html). -The default configuration uses `DWaveSampler` with specific models accessed through the Leap API. To run experiments using `MockDKibbleZurekSampler` locally, set the environment variable in your terminal before running the application. +The default configuration uses `DWaveSampler` with specific models accessed through the Leap API. +To run experiments using `MockDKibbleZurekSampler` locally, set the environment variable in your +terminal before running the application. **Windows terminal**: ``` @@ -203,10 +205,16 @@ the kink density away from the predicted value. ## Zero-Noise Extrapolation -Another feature showcased in this demo is the result achieved in Quantum Error Mitigation. In this paper, we demonstrate a practical implementation of zero-noise extrapolation as a method of quantum error mitigation specifically used for quantum annealing. +Another feature showcased in this demo is the result achieved in Quantum Error Mitigation. +In [this paper](https://arxiv.org/abs/2311.01306), we demonstrate a practical implementation +of zero-noise extrapolation as a method of quantum error mitigation specifically used for quantum +annealing. -For various coupling strengths at the same annealing time, we used a fitting function—quadratic for the Advantage solver and a multi-polynomial for the MockDwaveSampler—to calculate the theoretical zero-noise point. As the experiment runs for a longer time, we expect this zero-noise point to follow the same trend as the other data points. +For various coupling strengths at the same annealing time, we used a fitting function—quadratic +for the Advantage solver and a multi-polynomial for the MockDwaveSampler to calculate the +theoretical zero-noise point. As the experiment runs for a longer time, we expect this zero-noise +point to follow the same trend as the other data points. Experimental results diff --git a/app.py b/app.py index 8fdf6da..6f0e806 100644 --- a/app.py +++ b/app.py @@ -1,4 +1,4 @@ -# Copyright 2024 D-Wave +# Copyright 2025 D-Wave # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Union import dash import dash_bootstrap_components as dbc -from dash import html, Input, Output, State -from dash import dcc +from dash import ALL, ctx, dcc, html, Input, Output, State +from dash.exceptions import PreventUpdate import datetime import json +from demo_configs import DESCRIPTION, DESCRIPTION_NM, J_BASELINE, MAIN_HEADER, MAIN_HEADER_NM, THUMBNAIL, USE_CLASSICAL import numpy as np import os @@ -25,7 +27,7 @@ from dwave.cloud import Client from dwave.embedding import embed_bqm, is_valid_embedding from dwave.system import DWaveSampler -from MockKibbleZurekSampler import MockKibbleZurekSampler +from mock_kz_sampler import MockKibbleZurekSampler from helpers.kz_calcs import * from helpers.layouts_cards import * @@ -34,7 +36,7 @@ from helpers.qa import * from helpers.tooltips import tool_tips_demo1, tool_tips_demo2 -import yaml +from src.demo_enums import ProblemType app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) @@ -53,95 +55,17 @@ qpus = {} client = None init_job_status = "NO SOLVER" -# Load base coupling strength and user configuration for mock sampler -with open("config.yaml", "r") as file: - config = yaml.safe_load(file) - J_baseline = config["J_baseline"] - if config["use_classical"]: - qpus["Diffusion [Classical]"] = MockKibbleZurekSampler( - topology_type="pegasus", topology_shape=[16] - ) - init_job_status = "READY" - if not client: - client = "dummy" -# Demo defaults to use -tool_tips = tool_tips_demo1 +# Load base coupling strength and user configuration for mock sampler +if USE_CLASSICAL: + qpus["Diffusion [Classical]"] = MockKibbleZurekSampler( + topology_type="pegasus", topology_shape=[16] + ) -def demo_layout(demo_type): - - if demo_type == "Kibble-Zurek": - tool_tips = tool_tips_demo1 - else: - tool_tips = tool_tips_demo2 +init_job_status = "READY" - return dbc.Container( - [ - dbc.Row( - [ - dbc.Col( # Left: control panel - [ - control_card( - solvers=qpus, - init_job_status=init_job_status, - demo_type=demo_type, - ), - *dbc_modal("modal_solver"), - *[ - dbc.Tooltip( - message, - target=target, - id=f"tooltip_{target}", - style=dict(), - ) - for target, message in tool_tips.items() - ], - ], - width=4, - style={"minWidth": "30rem"}, - ), - dbc.Col( # Right: display area - graphs_card(demo_type=demo_type), - width=8, - style={"minWidth": "60rem"}, - ), - ] - ), - # store coupling data points - dcc.Store(id="coupling_data", data={}), - # store zero noise extrapolation - dcc.Store(id="zne_estimates", data={}), - dcc.Store(id="modal_trigger", data=False), - dcc.Store(id="initial_warning", data=False), - dcc.Store(id="kz_data", data={}), - dbc.Modal( - [ - dbc.ModalHeader(dbc.ModalTitle("Error")), - dbc.ModalBody( - "Fitting function failed likely due to ill conditioned data, please collect more." - ), - ], - id="error-modal", - is_open=False, - ), - dbc.Modal( - [ - dbc.ModalHeader( - dbc.ModalTitle( - "Warning", style={"color": "orange", "fontWeight": "bold"} - ) - ), - dbc.ModalBody( - "The Classical [diffusion] option executes a Markov Chain method locally for purposes of testing the demo interface. Kinks diffuse to annihilate, but are also created/destroyed by thermal fluctuations. The number of updates performed is set proportional to the annealing time. In the limit of no thermal noise, kinks diffuse to eliminate producing a power law, this process produces a power-law but for reasons independent of the Kibble-Zurek mechanism. In the noise mitigation demo we fit the impact of thermal fluctuations with a mixture of exponentials, by contrast with the quadratic fit appropriate to quantum dynamics.", - style={"color": "black", "fontSize": "16px"}, - ), - ], - id="warning-modal", - is_open=False, - ), - ], - fluid=True, - ) +if not client: + client = "dummy" # Define the Navbar with two tabs @@ -152,28 +76,22 @@ def demo_layout(demo_type): dbc.NavbarBrand( [ html.Img( - src="assets/dwave_logo.png", + src=THUMBNAIL, height="30px", style={"margin-right": "10px"}, ), ], - href="/demo1", # Default route ), # Navbar Tabs dbc.Nav( [ dbc.NavItem( dbc.NavLink( - "Kibble-Zurek Mechanism", href="/demo1", active="exact" - ) - ), - dbc.NavItem( - dbc.NavLink( - "Kibble-Zurek Mechanism with Noise Mitigation", - href="/demo2", - active="exact", + problem_type.label, + id={"type": "problem-type", "index": index}, + active="exact" ) - ), + ) for index, problem_type in enumerate(ProblemType) ], pills=True, ), @@ -184,35 +102,152 @@ def demo_layout(demo_type): sticky="top", ) -app.layout = dbc.Container( + +def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: + """Tooltip generator. + + Args: + problem_type: Either ProblemType.KZ or ProblemType.KZ_NM. + """ + tool_tips = tool_tips_demo1 if problem_type is ProblemType.KZ else tool_tips_demo2 + + return [ + dbc.Tooltip( + message, + target=target, + id=f"tooltip_{target}", + style=dict(), + ) + for target, message in tool_tips.items() + ] + +app.layout = html.Div( [ - dcc.Location(id="url", refresh=False), # Tracks the URL + dcc.Store(id="coupling_data", data={}), + # store zero noise extrapolation + dcc.Store(id="zne_estimates", data={}), + dcc.Store(id="modal_trigger", data=False), + dcc.Store(id="initial_warning", data=False), + dcc.Store(id="kz_data", data={}), + dcc.Store(id="selected-problem"), navbar, # Includes the Navbar at the top html.Div( - id="page-content", style={"paddingTop": "20px"} - ), # Dynamic page content + [ + dbc.Container( + [ + dbc.Row( + [ + dbc.Col( # Left: control panel + [ + control_card( + solvers=qpus, + init_job_status=init_job_status, + ), + *dbc_modal("modal_solver"), + html.Div(tooltips(ProblemType.KZ), id="tooltips") + ], + width=4, + style={"minWidth": "30rem"}, + ), + dbc.Col( # Right: display area + graphs_card(problem_type=ProblemType.KZ), + width=8, + style={"minWidth": "60rem"}, + ), + ] + ), + dbc.Modal( + [ + dbc.ModalHeader(dbc.ModalTitle("Error")), + dbc.ModalBody( + "Fitting function failed likely due to ill conditioned data, please collect more." + ), + ], + id="error-modal", + is_open=False, + ), + dbc.Modal( + [ + dbc.ModalHeader( + dbc.ModalTitle( + "Warning", style={"color": "orange", "fontWeight": "bold"} + ) + ), + dbc.ModalBody( + "The Classical [diffusion] option executes a Markov Chain method locally for purposes of testing the demo interface. Kinks diffuse to annihilate, but are also created/destroyed by thermal fluctuations. The number of updates performed is set proportional to the annealing time. In the limit of no thermal noise, kinks diffuse to eliminate producing a power law, this process produces a power-law but for reasons independent of the Kibble-Zurek mechanism. In the noise mitigation demo we fit the impact of thermal fluctuations with a mixture of exponentials, by contrast with the quadratic fit appropriate to quantum dynamics.", + style={"color": "black", "fontSize": "16px"}, + ), + ], + id="warning-modal", + is_open=False, + ), + ], + fluid=True, + ) + ], + style={"paddingTop": "20px"} + ), ], - fluid=True, ) server = app.server app.config["suppress_callback_exceptions"] = True -# Callbacks Section - -@app.callback(Output("page-content", "children"), Input("url", "pathname")) -def display_page(pathname): - # If the user goes to the "/demo1" route - if pathname == "/demo1": - - return demo_layout("Kibble-Zurek") - # If the user goes to the "/demo2" route - elif pathname == "/demo2": +@dash.callback( + Output({"type": "problem-type", "index": ALL}, "className"), + Output("selected-problem", "data"), + Output("graph-radio-options", "children"), + Output("tooltips", "children"), + Output("anneal-duration-dropdown", "children"), + Output("coupling-strength-slider", "children"), + Output("main-header", "children"), + Output("main-description", "children"), + inputs=[ + Input({"type": "problem-type", "index": ALL}, "n_clicks"), + State("selected-problem", "data"), + ], +) +def update_selected_problem_type( + problem_options: list[int], + selected_problem: Union[ProblemType, int], +) -> tuple[str, int, list, list]: + """Updates the problem that is selected (KZ or KZ_NM), hides/shows settings accordingly, + and updates the navigation options to indicate the currently active problem option. + + Args: + problem_options: A list containing the number of times each problem option has been clicked. + selected_problem: The currently selected problem. + + Returns: + problem-type-class (list): A list of classes for the header problem navigation options. + selected-period (int): Either KZ (``0`` or ``ProblemType.KZ``) or + KZ_NM (``1`` or ``ProblemType.KZ_NM``). + graph-radio-options: The radio options for the graph. + tooltips: The tooltips for the settings form. + anneal-duration-dropdown: The duration dropdown setting. + coupling-strength-slider: The coupling strength slider setting. + main-header: The main header of the problem in the left column. + main-description: The description of the problem in the left column. + """ + if ctx.triggered_id and selected_problem == ctx.triggered_id["index"]: + raise PreventUpdate + + nav_class_names = [""] * len(problem_options) + new_problem_type = ctx.triggered_id["index"] if ctx.triggered_id else ProblemType.KZ.value + + nav_class_names[new_problem_type] = "active" - return demo_layout("Zero-Noise") - else: - return demo_layout("Kibble-Zurek") + return ( + nav_class_names, + new_problem_type, + get_kz_graph_radio_options(ProblemType(new_problem_type)), + tooltips(ProblemType(new_problem_type)), + get_anneal_duration_setting(ProblemType(new_problem_type)), + get_coupling_strength_slider(ProblemType(new_problem_type)), + MAIN_HEADER if new_problem_type is ProblemType.KZ.value else MAIN_HEADER_NM, + DESCRIPTION if new_problem_type is ProblemType.KZ.value else DESCRIPTION_NM, + ) @app.callback( @@ -222,13 +257,7 @@ def display_page(pathname): def alert_no_solver(dummy): """Notify if no quantum computer is accessible.""" - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - - if trigger_id == "btn_simulate": - if not client: - return True - - return False + return ctx.triggered_id == "btn_simulate" and not client @app.callback( @@ -236,34 +265,25 @@ def alert_no_solver(dummy): Output("coupling_strength", "disabled"), Output("spins", "options"), Output("qpu_selection", "disabled"), - Input("job_submit_state", "children"), - State("spins", "options"), + inputs=[ + Input("job_submit_state", "children"), + State("spins", "options"), + ], + prevent_initial_call=True, ) def disable_buttons(job_submit_state, spins_options): """Disable user input during job submissions.""" + running_states = ["EMBEDDING", "SUBMITTED", "PENDING", "IN_PROGRESS"] + done_states = ["COMPLETED", "CANCELLED", "FAILED"] + is_running = job_submit_state in running_states - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - - if trigger_id != "job_submit_state": - return dash.no_update, dash.no_update, dash.no_update, dash.no_update - - if job_submit_state in ["EMBEDDING", "SUBMITTED", "PENDING", "IN_PROGRESS"]: - - for inx, _ in enumerate(spins_options): - - spins_options[inx]["disabled"] = True - - return True, True, spins_options, True - - elif job_submit_state in ["COMPLETED", "CANCELLED", "FAILED"]: - - for inx, _ in enumerate(spins_options): - spins_options[inx]["disabled"] = False + if job_submit_state not in running_states + done_states: + raise PreventUpdate - return False, False, spins_options, False + for inx, _ in enumerate(spins_options): + spins_options[inx]["disabled"] = is_running - else: - return dash.no_update, dash.no_update, dash.no_update, dash.no_update + return is_running, is_running, spins_options, is_running @app.callback( @@ -274,23 +294,18 @@ def disable_buttons(job_submit_state, spins_options): def set_schedule(qpu_name): """Set the schedule for the selected QPU.""" - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - schedule_filename = "FALLBACK_SCHEDULE.csv" schedule_filename_style = {"color": "red", "fontSize": 12} - if trigger_id == "qpu_selection": - + if ctx.triggered_id: for filename in [ file for file in os.listdir("helpers") if "schedule.csv" in file.lower() ]: if qpu_name.split(".")[0] in filename: # Accepts & reddens older versions - schedule_filename = filename if qpu_name in filename: - schedule_filename_style = {"color": "white", "fontSize": 12} return schedule_filename, schedule_filename_style @@ -299,17 +314,16 @@ def set_schedule(qpu_name): @app.callback( Output("embeddings_cached", "data"), Output("embedding_is_cached", "value"), - Input("qpu_selection", "value"), - Input("embeddings_found", "data"), - State("embeddings_cached", "data"), - State("spins", "value"), + inputs=[ + Input("qpu_selection", "value"), + Input("embeddings_found", "data"), + State("embeddings_cached", "data"), + ] ) -def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): +def cache_embeddings(qpu_name, embeddings_found, embeddings_cached): """Cache embeddings for the selected QPU.""" - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - - if trigger_id == "qpu_selection": + if ctx.triggered_id == "qpu_selection": embeddings_cached = {} # Wipe out previous QPU's embeddings @@ -317,14 +331,9 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): file for file in os.listdir("helpers") if ".json" in file and "emb_" in file ]: - if qpu_name == "Diffusion [Classical]": - _qpu_name = "Advantage_system6.4" - else: - _qpu_name = qpu_name + _qpu_name = "Advantage_system6.4" if qpu_name == "Diffusion [Classical]" else qpu_name - # splitting seemed unsafe since the graph can change between versions if _qpu_name in filename: - with open(f"helpers/{filename}", "r") as fp: embeddings_cached = json.load(fp) @@ -333,28 +342,21 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): # Validate that loaded embeddings' edges are still available on the selected QPU for length in list(embeddings_cached.keys()): - source_graph = dimod.to_networkx_graph( - create_bqm(num_spins=length) - ).edges + source_graph = dimod.to_networkx_graph(create_bqm(num_spins=length)).edges target_graph = qpus[_qpu_name].edges emb = embeddings_cached[length] if not is_valid_embedding(emb, source_graph, target_graph): - del embeddings_cached[length] - if trigger_id == "embeddings_found": - if not isinstance( - embeddings_found, str - ): # embeddings_found != 'needed' or 'not found' + if ctx.triggered_id == "embeddings_found": + if isinstance(embeddings_found, str): # embeddings_found = 'needed' or 'not found' + raise PreventUpdate - embeddings_cached = json_to_dict(embeddings_cached) - embeddings_found = json_to_dict(embeddings_found) - new_embedding = list(embeddings_found.keys())[0] - embeddings_cached[new_embedding] = embeddings_found[new_embedding] - - else: - return dash.no_update, dash.no_update + embeddings_cached = json_to_dict(embeddings_cached) + embeddings_found = json_to_dict(embeddings_found) + new_embedding = list(embeddings_found.keys())[0] + embeddings_cached[new_embedding] = embeddings_found[new_embedding] return embeddings_cached, list(embeddings_cached.keys()) @@ -365,22 +367,22 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached, spins): Output("zne_estimates", "data"), # update zne_estimates Output("modal_trigger", "data"), Output("kz_data", "data"), - Input("qpu_selection", "value"), - # Input("zne_graph_display", "value"), - Input("graph_display", "value"), - Input("coupling_strength", "value"), # previously input - Input("quench_schedule_filename", "children"), - Input("job_submit_state", "children"), - Input("job_id", "children"), - # Input("anneal_duration_zne", "value"), - Input("anneal_duration", "value"), - Input("spins", "value"), - Input("url", "pathname"), - State("embeddings_cached", "data"), - State("sample_vs_theory", "figure"), - State("coupling_data", "data"), # access previously stored data - State("zne_estimates", "data"), # Access ZNE estimates - State("kz_data", "data"), # get kibble zurek data point + inputs=[ + Input("qpu_selection", "value"), + Input("graph_display", "value"), + Input("coupling_strength", "value"), # previously input + Input("quench_schedule_filename", "children"), + Input("job_submit_state", "children"), + Input("job_id", "children"), + Input("anneal_duration", "value"), + Input("spins", "value"), + Input("selected-problem", "data"), + State("embeddings_cached", "data"), + State("sample_vs_theory", "figure"), + State("coupling_data", "data"), # access previously stored data + State("zne_estimates", "data"), # Access ZNE estimates + State("kz_data", "data"), # get kibble zurek data point + ] ) def display_graphics_kink_density( qpu_name, @@ -391,7 +393,7 @@ def display_graphics_kink_density( job_id, ta, spins, - pathname, + problem_type, embeddings_cached, figure, coupling_data, @@ -400,44 +402,38 @@ def display_graphics_kink_density( ): """Generate graphics for kink density based on theory and QPU samples.""" - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] ta_min = 2 ta_max = 350 + problem_type = ProblemType(problem_type) - if pathname == "/demo2": - + if problem_type is ProblemType.KZ_NM: # update the maximum anneal time for zne demo ta_max = 1500 - if trigger_id == "qpu_selection" or trigger_id == "spins": + if ctx.triggered_id == "qpu_selection" or ctx.triggered_id == "spins": coupling_data = {} zne_estimates = {} fig = plot_kink_densities_bg( graph_display, [ta_min, ta_max], - J_baseline, + J_BASELINE, schedule_filename, coupling_data, zne_estimates, - url="Demo2", + problem_type=problem_type, ) return fig, coupling_data, zne_estimates, False, kz_data - if trigger_id in [ - "zne_graph_display", - "coupling_strength", - "quench_schedule_filename", - ]: - + if ctx.triggered_id in ["zne_graph_display", "coupling_strength", "quench_schedule_filename"]: fig = plot_kink_densities_bg( graph_display, [ta_min, ta_max], - J_baseline, + J_BASELINE, schedule_filename, coupling_data, zne_estimates, - url="Demo2", + problem_type=problem_type, ) if graph_display == "coupling": @@ -447,111 +443,68 @@ def display_graphics_kink_density( return fig, coupling_data, zne_estimates, False, kz_data - if trigger_id == "job_submit_state": + if ctx.triggered_id == "job_submit_state": + if job_submit_state != "COMPLETED": + raise PreventUpdate - if job_submit_state == "COMPLETED": + embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) - embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) + sampleset_unembedded = get_samples( + client, job_id, spins, J, embeddings_cached[spins] + ) + _, kink_density = kink_stats(sampleset_unembedded, J) - sampleset_unembedded = get_samples( - client, job_id, spins, J, embeddings_cached[spins] - ) - _, kink_density = kink_stats(sampleset_unembedded, J) + # Calculate lambda (previously kappa) + # Added _ to avoid keyword restriction + lambda_ = calclambda_(J=J, qpu_name=qpu_name, schedule_name=schedule_filename, J_baseline=J_BASELINE) - # Calculate lambda (previously kappa) - # Added _ to avoid keyword restriction - lambda_ = calclambda_(J=J, qpu_name=qpu_name, schedule_name=schedule_filename, J_baseline=J_baseline) + fig = plot_kink_density( + graph_display, figure, kink_density, ta, J, lambda_ + ) - fig = plot_kink_density( - graph_display, figure, kink_density, ta, J, lambda_ - ) + # Initialize the list for this anneal_time if not present + ta_str = str(ta) + if ta_str not in coupling_data: + coupling_data[ta_str] = [] + # Append the new data point + coupling_data[ta_str].append( + { + "lambda": lambda_, + "kink_density": kink_density, + "coupling_strength": J, + } + ) - # Initialize the list for this anneal_time if not present - ta_str = str(ta) - if ta_str not in coupling_data: - coupling_data[ta_str] = [] - # Append the new data point - coupling_data[ta_str].append( - { - "lambda": lambda_, - "kink_density": kink_density, - "coupling_strength": J, - } - ) + zne_estimates, modal_trigger = plot_zne_fitted_line( + fig, coupling_data, qpu_name, zne_estimates, graph_display, ta_str + ) - zne_estimates, modal_trigger = plot_zne_fitted_line( - fig, coupling_data, qpu_name, zne_estimates, graph_display, ta_str + if graph_display == "kink_density": + fig = plot_kink_densities_bg( + graph_display, + [ta_min, ta_max], + J_BASELINE, + schedule_filename, + coupling_data, + zne_estimates, + problem_type=problem_type, ) - if graph_display == "kink_density": - fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], - J_baseline, - schedule_filename, - coupling_data, - zne_estimates, - url="Demo2", - ) - - return fig, coupling_data, zne_estimates, modal_trigger, kz_data + return fig, coupling_data, zne_estimates, modal_trigger, kz_data - else: - return dash.no_update - - # use global J value fig = plot_kink_densities_bg( graph_display, [ta_min, ta_max], - J_baseline, + J_BASELINE, schedule_filename, coupling_data, zne_estimates, - url="Demo2", + problem_type=problem_type, ) return fig, coupling_data, zne_estimates, False, kz_data - else: - if ( - trigger_id == "qpu_selection" - or trigger_id == "spins" - or trigger_id == "coupling_strength" - ): - - kz_data = {"k": []} - fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], - J, - schedule_filename, - coupling_data, - zne_estimates, - kz_data=kz_data, - url="Demo1", - ) - - return fig, coupling_data, zne_estimates, False, kz_data - - if trigger_id == "job_submit_state": - - if job_submit_state == "COMPLETED": - - embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) - - sampleset_unembedded = get_samples( - client, job_id, spins, J, embeddings_cached[spins] - ) - _, kink_density = kink_stats(sampleset_unembedded, J) - - # Append the new data point - kz_data["k"].append((kink_density, ta)) - fig = plot_kink_density( - graph_display, figure, kink_density, ta, J, url="Demo1" - ) - return fig, coupling_data, zne_estimates, False, kz_data - - else: - return dash.no_update + if ctx.triggered_id in ["qpu_selection", "spins", "coupling_strength"]: + kz_data = {"k": []} fig = plot_kink_densities_bg( graph_display, [ta_min, ta_max], @@ -559,43 +512,70 @@ def display_graphics_kink_density( schedule_filename, coupling_data, zne_estimates, - kz_data, - url="Demo1", + kz_data=kz_data, + problem_type=problem_type, + ) + + return fig, coupling_data, zne_estimates, False, kz_data + + if ctx.triggered_id == "job_submit_state": + if job_submit_state != "COMPLETED": + raise PreventUpdate + + embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) + + sampleset_unembedded = get_samples( + client, job_id, spins, J, embeddings_cached[spins] + ) + _, kink_density = kink_stats(sampleset_unembedded, J) + + # Append the new data point + kz_data["k"].append((kink_density, ta)) + fig = plot_kink_density( + graph_display, figure, kink_density, ta, J, problem_type=problem_type ) return fig, coupling_data, zne_estimates, False, kz_data + fig = plot_kink_densities_bg( + graph_display, + [ta_min, ta_max], + J, + schedule_filename, + coupling_data, + zne_estimates, + kz_data, + problem_type=problem_type, + ) + return fig, coupling_data, zne_estimates, False, kz_data + @app.callback( Output("spin_orientation", "figure"), - Input("spins", "value"), - Input("job_submit_state", "children"), - State("job_id", "children"), - State("coupling_strength", "value"), - State("embeddings_cached", "data"), + inputs=[ + Input("spins", "value"), + Input("job_submit_state", "children"), + State("job_id", "children"), + State("coupling_strength", "value"), + State("embeddings_cached", "data"), + ] ) def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_cached): """Generate graphics for spin-ring display.""" - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] + if ctx.triggered_id == "job_submit_state": + if job_submit_state != "COMPLETED": + raise PreventUpdate - if trigger_id == "job_submit_state": - - if job_submit_state == "COMPLETED": - - embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) - sampleset_unembedded = get_samples( - client, job_id, spins, J, embeddings_cached[spins] - ) - kinks_per_sample, kink_density = kink_stats(sampleset_unembedded, J) - best_indx = np.abs(kinks_per_sample - kink_density).argmin() - best_sample = sampleset_unembedded.record.sample[best_indx] - - fig = plot_spin_orientation(num_spins=spins, sample=best_sample) - return fig - - else: + embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) + sampleset_unembedded = get_samples( + client, job_id, spins, J, embeddings_cached[spins] + ) + kinks_per_sample, kink_density = kink_stats(sampleset_unembedded, J) + best_indx = np.abs(kinks_per_sample - kink_density).argmin() + best_sample = sampleset_unembedded.record.sample[best_indx] - return dash.no_update + fig = plot_spin_orientation(num_spins=spins, sample=best_sample) + return fig fig = plot_spin_orientation(num_spins=spins, sample=None) return fig @@ -605,15 +585,18 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca Output("job_id", "children"), Output("initial_warning", "data"), Output("warning-modal", "is_open"), - Input("job_submit_time", "children"), - State("qpu_selection", "value"), - State("spins", "value"), - State("coupling_strength", "value"), - State("anneal_duration", "value"), - State("embeddings_cached", "data"), - State("url", "pathname"), - State("quench_schedule_filename", "children"), - State("initial_warning", "data"), + inputs=[ + Input("job_submit_time", "children"), + State("qpu_selection", "value"), + State("spins", "value"), + State("coupling_strength", "value"), + State("anneal_duration", "value"), + State("embeddings_cached", "data"), + State("selected-problem", "data"), + State("quench_schedule_filename", "children"), + State("initial_warning", "data"), + ], + prevent_initial_call=True, ) def submit_job( job_submit_time, @@ -622,65 +605,56 @@ def submit_job( J, ta_ns, embeddings_cached, - pathname, + problem_type, filename, initial_warning, ): """Submit job and provide job ID.""" - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - - if trigger_id == "job_submit_time": - solver = qpus[qpu_name] + solver = qpus[qpu_name] - bqm = create_bqm(num_spins=spins, coupling_strength=J) + bqm = create_bqm(num_spins=spins, coupling_strength=J) - embeddings_cached = json_to_dict(embeddings_cached) - embedding = embeddings_cached[spins] - annealing_time = ta_ns / 1000 + embeddings_cached = json_to_dict(embeddings_cached) + embedding = embeddings_cached[spins] + annealing_time = ta_ns / 1000 - if qpu_name == "Diffusion [Classical]": - - bqm_embedded = embed_bqm( - bqm, - embedding, - qpus["Diffusion [Classical]"].adjacency, - ) + if qpu_name == "Diffusion [Classical]": + bqm_embedded = embed_bqm( + bqm, + embedding, + qpus["Diffusion [Classical]"].adjacency, + ) - sampleset = qpus["Diffusion [Classical]"].sample( - bqm_embedded, annealing_time=annealing_time - ) - if not initial_warning: - return json.dumps(sampleset.to_serializable()), True, True - return json.dumps(sampleset.to_serializable()), True, False + sampleset = qpus["Diffusion [Classical]"].sample( + bqm_embedded, annealing_time=annealing_time + ) - else: + return json.dumps(sampleset.to_serializable()), True, not initial_warning - bqm_embedded = embed_bqm( - bqm, embedding, DWaveSampler(solver=solver.name).adjacency - ) - # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes, shouldn't change for MockSampler. In which case recalculate as ta_multiplier=calclambda_(coupling_strength, schedule, J_baseline=-1.8) as a function of the correct schedule - # State("ta_multiplier", "value") ? Should recalculate when J or schedule changes IFF noise mitigation tab? - ta_multiplier = 1 + bqm_embedded = embed_bqm(bqm, embedding, DWaveSampler(solver=solver.name).adjacency) - if pathname == "/demo2": - ta_multiplier = calclambda_( - J, schedule_name=filename, J_baseline=J_baseline - ) + # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes, + # shouldn't change for MockSampler. In which case recalculate as + # ta_multiplier=calclambda_(coupling_strength, schedule, J_baseline=-1.8) as a function of the + # correct schedule + # State("ta_multiplier", "value") ? Should recalculate when J or schedule changes IFF noise mitigation tab? + ta_multiplier = 1 - computation = solver.sample_bqm( - bqm=bqm_embedded, - fast_anneal=True, - annealing_time=annealing_time * ta_multiplier, - auto_scale=False, - answer_mode="raw", # Easier than accounting for num_occurrences - num_reads=100, - label=f"Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}", - ) + if problem_type is ProblemType.KZ_NM.value: + ta_multiplier = calclambda_(J, schedule_name=filename, J_baseline=J_BASELINE) - return computation.wait_id(), False, False + computation = solver.sample_bqm( + bqm=bqm_embedded, + fast_anneal=True, + annealing_time=annealing_time * ta_multiplier, + auto_scale=False, + answer_mode="raw", # Easier than accounting for num_occurrences + num_reads=100, + label=f"Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}", + ) - return dash.no_update + return computation.wait_id(), False, False @app.callback( @@ -691,15 +665,18 @@ def submit_job( Output("job_submit_state", "children"), Output("job_submit_time", "children"), Output("embeddings_found", "data"), - Input("btn_simulate", "n_clicks"), - Input("wd_job", "n_intervals"), - State("job_id", "children"), - State("job_submit_state", "children"), - State("job_submit_time", "children"), - State("embedding_is_cached", "value"), - State("spins", "value"), - State("qpu_selection", "value"), - State("embeddings_found", "data"), + inputs=[ + Input("btn_simulate", "n_clicks"), + Input("wd_job", "n_intervals"), + State("job_id", "children"), + State("job_submit_state", "children"), + State("job_submit_time", "children"), + State("embedding_is_cached", "value"), + State("spins", "value"), + State("qpu_selection", "value"), + State("embeddings_found", "data"), + ], + prevent_initial_call=True, ) def simulate( dummy1, @@ -714,23 +691,9 @@ def simulate( ): """Manage simulation: embedding, job submission.""" - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - - if not any(trigger_id == input for input in ["btn_simulate", "wd_job"]): - return ( - dash.no_update, - dash.no_update, - dash.no_update, - dash.no_update, - dash.no_update, - dash.no_update, - dash.no_update, - ) - - if trigger_id == "btn_simulate": + if ctx.triggered_id == "btn_simulate": if spins in cached_embedding_lengths or qpu_name == "Diffusion [Classical]": - submit_time = datetime.datetime.now().strftime("%c") if qpu_name == "Diffusion [Classical]": # Hack to fix switch from SA to QPU submit_time = "SA" @@ -738,7 +701,6 @@ def simulate( embedding = dash.no_update else: - submit_time = dash.no_update job_submit_state = "EMBEDDING" embedding = "needed" @@ -757,12 +719,10 @@ def simulate( ) if job_submit_state == "EMBEDDING": - submit_time = dash.no_update embedding = dash.no_update if embeddings_found == "needed": - try: embedding = find_one_to_one_embedding(spins, qpus[qpu_name].edges) if embedding: @@ -778,16 +738,12 @@ def simulate( embedding = "not found" else: # Found embedding last WD, so is cached, so now can submit job - submit_time = datetime.datetime.now().strftime("%c") job_submit_state = "SUBMITTED" return True, False, 0.2 * 1000, 0, job_submit_state, submit_time, embedding - if any( - job_submit_state == status for status in ["SUBMITTED", "PENDING", "IN_PROGRESS"] - ): - + if job_submit_state in ["SUBMITTED", "PENDING", "IN_PROGRESS"]: job_submit_state = get_job_status(client, job_id, job_submit_time) if not job_submit_state: job_submit_state = "SUBMITTED" @@ -797,10 +753,7 @@ def simulate( return True, False, wd_time, 0, job_submit_state, dash.no_update, dash.no_update - if any( - job_submit_state == status for status in ["COMPLETED", "CANCELLED", "FAILED"] - ): - + if job_submit_state in ["COMPLETED", "CANCELLED", "FAILED"]: disable_btn = False disable_watchdog = True @@ -814,8 +767,8 @@ def simulate( dash.no_update, ) - else: # Exception state: should only ever happen in testing - return False, True, 0, 0, "ERROR", dash.no_update, dash.no_update + # Exception state: should only ever happen in testing + return False, True, 0, 0, "ERROR", dash.no_update, dash.no_update @app.callback( @@ -826,58 +779,12 @@ def simulate( def set_progress_bar(job_submit_state): """Update progress bar for job submissions.""" - trigger_id = dash.callback_context.triggered[0]["prop_id"].split(".")[0] - - if trigger_id == "job_submit_state": - - return ( - job_bar_display[job_submit_state][0], - job_bar_display[job_submit_state][1], - ) + if ctx.triggered_id: + return job_bar_display[job_submit_state][0], job_bar_display[job_submit_state][1] return job_bar_display["READY"][0], job_bar_display["READY"][1] -@app.callback( - *[ - Output(f"tooltip_{target}", component_property="style") - for target in tool_tips.keys() - ], - Input("tooltips_show", "value"), -) -def activate_tooltips(tooltips_show): - """Activate or hide tooltips.""" - - trigger = dash.callback_context.triggered - trigger_id = trigger[0]["prop_id"].split(".")[0] - - if trigger_id == "tooltips_show": - if tooltips_show == "off": - return ( - dict(display="none"), - dict(display="none"), - dict(display="none"), - dict(display="none"), - dict(display="none"), - dict(display="none"), - dict(display="none"), - dict(display="none"), - dict(display="none"), - ) - - return ( - dict(), - dict(), - dict(), - dict(), - dict(), - dict(), - dict(), - dict(), - dict(), - ) - - @app.callback( Output("error-modal", "is_open"), Input("modal_trigger", "data"), diff --git a/assets/custom.css b/assets/custom.css index 13f8a25..3b9f5d2 100644 --- a/assets/custom.css +++ b/assets/custom.css @@ -5,4 +5,41 @@ body { .row { flex-wrap: nowrap; -} \ No newline at end of file +} + +.nav-link { + cursor: pointer; +} + +.nav-link.active { + opacity: 1; + background: rgba(255, 255, 255, 0.2); +} + +.nav-link.active:hover { + filter: none; + cursor: default; +} + +h1 { + font-size: 1.5rem; + color: rgb(243, 120, 32); +} + +h3 { + font-size: 1.25rem; + color: rgb(243, 120, 32); + margin: 15px 0 0 15px; +} + +label { + color: rgb(3, 184, 255); + margin-top: 10px; + font-weight: 600; + font-size: 1rem; +} + +p { + color: white; + font-size: 14px; +} diff --git a/config.yaml b/config.yaml deleted file mode 100644 index 334137d..0000000 --- a/config.yaml +++ /dev/null @@ -1,3 +0,0 @@ -use_classical: YES -J_baseline: -1.8 - diff --git a/demo_configs.py b/demo_configs.py new file mode 100644 index 0000000..880a3e5 --- /dev/null +++ b/demo_configs.py @@ -0,0 +1,34 @@ +# Copyright 2025 D-Wave +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This file stores input parameters for the app.""" + +THUMBNAIL = "assets/dwave_logo.png" + +APP_TITLE = "Coherent Annealing" +MAIN_HEADER = "Coherent Annealing: KZ Simulation" +DESCRIPTION = """\ +Use a quantum computer to simulate the formation of topological defects in a 1D ring +of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. +""" +MAIN_HEADER_NM = "Coherent Annealing: Zero-Noise Extrapolation" +DESCRIPTION_NM = """\ +Simulate zero-temperature and zero-time extrapolations on a quantum computer using +the Kibble-Zurek mechanism. Fitting occurs once three or more data points are +plotted, with -1.8 representing the highest energy scale corresponding to the +lowest noise level. +""" + +USE_CLASSICAL = True +J_BASELINE = -1.8 diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 818e4bd..aeddb4e 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -1,4 +1,4 @@ -# Copyright 2024 D-Wave +# Copyright 2025 D-Wave # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -25,15 +25,14 @@ def theoretical_kink_density_prefactor(J, schedule_name=None): - """Time rescaling factor + """Time rescaling factor. Calculate the rescaling of time necessary to replicate the behaviour of a linearized schedule at coupling strength 1. - See: "Error Mitigation in Quantum Annealing" + See: "Error Mitigation in Quantum Annealing". Args: J: Coupling strength between the spins of the ring. - schedule_name: Filename of anneal schedule. Used to compensate for schedule energy overestimate. @@ -43,16 +42,12 @@ def theoretical_kink_density_prefactor(J, schedule_name=None): # See the Code section of the README.md file for an explanation of the # following code. - if schedule_name is None: - schedule = pd.read_csv("helpers/FALLBACK_SCHEDULE.csv") - else: - schedule = pd.read_csv(f"helpers/{schedule_name}") - - COMPENSATION_SCHEDULE_ENERGY = ( - 0.8 - if (schedule_name is not None and "Advantage_system" in schedule_name) - else 1.0 - ) + if not schedule_name: + schedule_name = "FALLBACK_SCHEDULE.csv" + + schedule = pd.read_csv(f"helpers/{schedule_name}") + + COMPENSATION_SCHEDULE_ENERGY = 0.8 if "Advantage_system" in schedule_name else 1.0 A = COMPENSATION_SCHEDULE_ENERGY * schedule["A(s) (GHz)"] B = COMPENSATION_SCHEDULE_ENERGY * schedule["B(s) (GHz)"] @@ -71,16 +66,12 @@ def theoretical_kink_density_prefactor(J, schedule_name=None): def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=None): - """ - Calculate the kink density as a function of anneal time + """Calculate the kink density as a function of anneal time. Args: annealing_times_ns: Iterable of annealing times, in nanoseconds. - - b: A timescale based on linearization of the schedule at criticality - + b: A timescale based on linearization of the schedule at criticality. J: Coupling strength between the spins of the ring. - schedule_name: Filename of anneal schedule. Used to compensate for schedule energy overestimate. @@ -90,9 +81,7 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=N if b is None: b = theoretical_kink_density_prefactor(J, schedule_name) - return np.power([1e-9 * t * b for t in annealing_times_ns], -0.5) / ( - 2 * np.pi * np.sqrt(2) - ) + return np.power([1e-9 * t * b for t in annealing_times_ns], -0.5) / (2 * np.pi * np.sqrt(2)) def calc_kappa(J, J_baseline=-1.8): @@ -114,16 +103,15 @@ def calclambda_(J, *, qpu_name=None, schedule_name=None, J_baseline=-1.8): # Fallback, assume ideal linear schedule kappa = calc_kappa(J, J_baseline) return kappa - else: - b_ref = theoretical_kink_density_prefactor(J_baseline, schedule_name) - b = theoretical_kink_density_prefactor(J, schedule_name) - return b_ref / b + b_ref = theoretical_kink_density_prefactor(J_baseline, schedule_name) + b = theoretical_kink_density_prefactor(J, schedule_name) + + return b_ref / b def kink_stats(sampleset, J): - """ - Calculate kink density for the sample set. + """Calculate kink density for the sample set. Calculation is the number of sign switches per sample divided by the length of the ring for ferromagnetic coupling. For anti-ferromagnetic coupling, @@ -131,7 +119,6 @@ def kink_stats(sampleset, J): Args: sampleset: dimod sample set. - J: Coupling strength between the spins of the ring. Returns: @@ -144,7 +131,6 @@ def kink_stats(sampleset, J): ) if J < 0: - switches_per_sample = np.count_nonzero(sign_switches, 1) kink_density = np.mean(switches_per_sample) / sampleset.record.sample.shape[1] diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index ee68500..8a60dac 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -1,4 +1,4 @@ -# Copyright 2024 D-Wave +# Copyright 2025 D-Wave # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ from dash import dcc, html import dash_bootstrap_components as dbc +from demo_configs import DESCRIPTION, MAIN_HEADER +from src.demo_enums import ProblemType import plotly.graph_objects as go from helpers.layouts_components import * @@ -24,182 +26,105 @@ "graphs_card", ] -control_header_style = {"color": "rgb(3, 184, 255)", "marginTop": "10px"} - - -def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek"): +def control_card(solvers={}, init_job_status="READY"): """Lay out the configuration and job-submission card. Args: - solvers: Dict of QPUs in the format {name: solver}. - init_job_status: Initial status of the submission progress bar. Returns: - Dash card. """ - if init_job_status == "NO SOLVER": - job_status_color = "red" - else: - job_status_color = "white" - - if demo_type == "Kibble-Zurek": - demo_title = "Coherent Annealing: KZ Simulation" - demo_description = html.P( - """ - Use a quantum computer to simulate the formation of topological defects in a 1D ring - of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. - """, - style={"color": "white", "fontSize": 14}, - ) - demo_anneal_duration = get_config_anneal_duration(demo_type) - - else: - demo_title = "Coherent Annealing: Zero-Noise Extrapolation" - demo_description = html.P( - [ - """ -Simulate zero-temperature and zero-time extrapolations on a quantum computer using the Kibble-Zurek mechanism. Fitting occurs once three or more data points are plotted, with -1.8 representing the highest energy scale corresponding to the lowest noise level. Learn more in the -""", - html.A( - "paper", - href="https://arxiv.org/abs/2311.01306", # Replace with the actual URL - target="_blank", # Opens the link in a new tab - style={ - "color": "rgb(3, 184, 255)", - "textDecoration": "none", - }, # Optional styling - ), - ".", - ], - style={"color": "white", "fontSize": 14}, - ) - demo_anneal_duration = get_config_anneal_duration(demo_type) + job_status_color = "red" if init_job_status == "NO SOLVER" else "white" return dbc.Card( [ + html.H1(MAIN_HEADER, id="main-header"), + html.P(DESCRIPTION, id="main-description"), + html.Label("Spins"), + html.Div(config_spins), + html.Label("Coupling Strength (J)"), + html.Div(get_coupling_strength_slider(ProblemType.KZ), id="coupling-strength-slider"), + html.Label("Quench Duration [ns]"), + html.Div(get_anneal_duration_setting(ProblemType.KZ), id="anneal-duration-dropdown"), + html.Label("QPU"), + html.Div(config_qpu_selection(solvers)), + html.P( + [ + "Quench Schedule: ", + html.Span( + id="quench_schedule_filename", + style={"color": "white", "fontSize": 10}, + ), + ], + style={"marginTop": "10px"}, + ), + html.Label("Cached Embeddings"), + embeddings, + html.Label("Simulation"), dbc.Row( [ dbc.Col( - [ - html.H4( - demo_title, - className="card-title", - style={"color": "rgb(243, 120, 32)"}, - ), - demo_description, - html.H5("Spins", style=control_header_style), - html.Div([config_spins]), - html.H5( - "Coupling Strength (J)", style=control_header_style - ), - html.Div([get_config_coupling_strength(demo_type)]), - html.H5("Quench Duration [ns]", style=control_header_style), - html.Div([demo_anneal_duration]), - html.H5("QPU", style=control_header_style), - html.Div( - [ - config_qpu_selection(solvers), - ] - ), - html.P( - [ - "Quench Schedule: ", - html.Span( - id="quench_schedule_filename", - children="", - style={"color": "white", "fontSize": 10}, - ), - ], - style={"color": "white", "marginTop": "10px"}, - ), - html.H5("Cached Embeddings", style=control_header_style), - embeddings, - html.H5("Simulation", style=control_header_style), - dbc.Row( - [ - dbc.Col( - dbc.Button( - "Run", - id="btn_simulate", - color="primary", - className="me-2", # Adds spacing between buttons - style={ - "marginTop": "10px" - }, # Adds some vertical spacing - ), - width="auto", - ), - ], - justify="start", # Aligns buttons to the left - align="center", # Vertically centers buttons - ), - dbc.Progress( - id="bar_job_status", - value=0, - color="link", - className="mb-3", - style={"width": "60%"}, - ), - html.P( - [ - "Status: ", - html.Span( - id="job_submit_state", - children=f"{init_job_status}", - style={ - "color": job_status_color, - "fontSize": 12, - "marginTop": "10px", - }, - ), - ], - style={"color": "white", "marginTop": "5px"}, - ), - html.P( - "Tooltips (hover over fields for descriptions)", - style={ - "color": "white", - "fontSize": 12, - "marginBottom": 5, - "marginTop": "10px", - }, - ), - tooltips_activate, - # Non-displayed section - dcc.Interval( - id="wd_job", - interval=None, - n_intervals=0, - disabled=True, - max_intervals=1, - ), - # Used for storing job status. Can probably be replaced with dcc.Store. - html.P( - id="job_submit_time", - children="", - style=dict(display="none"), - ), - html.P( - id="job_id", children="", style=dict(display="none") - ), - dcc.Store( - id="embeddings_cached", - storage_type="memory", - data={}, - ), - dcc.Store( - id="embeddings_found", - storage_type="memory", - data={}, - ), - ] + dbc.Button( + "Run", + id="btn_simulate", + color="primary", + className="me-2", # Adds spacing between buttons + style={ + "marginTop": "10px" + }, + ), + width="auto", ), ], - id="tour_settings_row", + justify="start", # Aligns buttons to the left + align="center", # Vertically centers buttons + ), + dbc.Progress( + id="bar_job_status", + value=0, + color="link", + className="mb-3", + style={"width": "60%"}, + ), + html.P( + [ + "Status: ", + html.Span( + id="job_submit_state", + children=f"{init_job_status}", + style={ + "color": job_status_color, + "fontSize": 12, + "marginTop": "10px", + }, + ), + ], + style={"marginTop": "5px"}, + ), + tooltips_activate, + # Non-displayed section + dcc.Interval( + id="wd_job", + interval=None, + n_intervals=0, + disabled=True, + max_intervals=1, + ), + # Used for storing job status. Can probably be replaced with dcc.Store. + html.P(id="job_submit_time", style={"display": "none"}), + html.P(id="job_id", style={"display": "none"}), + dcc.Store( + id="embeddings_cached", + storage_type="memory", + data={}, + ), + dcc.Store( + id="embeddings_found", + storage_type="memory", + data={}, ), ], body=True, @@ -208,70 +133,22 @@ def control_card(solvers={}, init_job_status="READY", demo_type="Kibble-Zurek"): ) -graphic_header_style = { - "color": "rgb(243, 120, 32)", - "margin": "15px 0px 0px 15px", - "backgroundColor": "white", -} - - -def graphs_card(demo_type="Kibble-Zurek"): - demo_graph = get_config_kz_graph(demo_type) +def graphs_card(problem_type=ProblemType.KZ): return dbc.Card( [ - dbc.Row( - [ - dbc.Col( - [ - html.H5( - "Spin States of Qubits in a 1D Ring", - style=graphic_header_style, - ), - ] - ), - ] - ), - dbc.Row( - [ - dbc.Col( - [ - dcc.Graph( - id="spin_orientation", - figure=go.Figure(), - style={"height": "40vh", "minHeight": "20rem"}, - ), - ], - width=12, - ), - ], - ), - dbc.Row( - [ - dbc.Col( - [ - html.H5( - "QPU Samples Vs. Kibble-Zurek Prediction", - style=graphic_header_style, - ), - html.Div([demo_graph]), - ] - ), - ] - ), - dbc.Row( - [ - dbc.Col( - [ - dcc.Graph( - id="sample_vs_theory", - figure=go.Figure(), - style={"height": "40vh", "minHeight": "20rem"}, - ) - ], - width=12, - ), - ] + html.H3("Spin States of Qubits in a 1D Ring"), + dcc.Graph( + id="spin_orientation", + figure=go.Figure(), + style={"height": "40vh", "minHeight": "20rem"}, ), + html.H3("QPU Samples Vs. Kibble-Zurek Prediction"), + html.Div(get_kz_graph_radio_options(problem_type), id="graph-radio-options"), + dcc.Graph( + id="sample_vs_theory", + figure=go.Figure(), + style={"height": "40vh", "minHeight": "20rem"}, + ) ], color="white", style={"height": "100%", "minHeight": "50rem"}, diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 27c99df..36acd17 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -1,4 +1,4 @@ -# Copyright 2024 D-Wave +# Copyright 2025 D-Wave # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,15 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from itertools import chain import dash_bootstrap_components as dbc -from dash.dcc import Checklist, Dropdown, Link, RadioItems, Slider, Input from dash import html, dcc +from src.demo_enums import ProblemType __all__ = [ - "get_config_anneal_duration", - "get_config_kz_graph", + "get_anneal_duration_setting", + "get_kz_graph_radio_options", "config_spins", - "get_config_coupling_strength", + "get_coupling_strength_slider", "config_qpu_selection", "dbc_modal", "embeddings", @@ -32,8 +33,8 @@ ring_lengths = [512, 1024, 2048] -def get_config_anneal_duration(demo_type): - if demo_type == "Zero-Noise": +def get_anneal_duration_setting(problem_type): + if problem_type is ProblemType.KZ_NM: return dcc.Dropdown( id="anneal_duration", options=[ @@ -50,39 +51,21 @@ def get_config_anneal_duration(demo_type): value=80, # default value style={"max-width": "95%"}, ) - else: - return dbc.Input( - id="anneal_duration", - type="number", - min=5, - max=100, - step=1, - value=7, - style={"max-width": "95%"}, - ) + return dbc.Input( + id="anneal_duration", + type="number", + min=5, + max=100, + step=1, + value=7, + style={"max-width": "95%"}, + ) -def get_config_kz_graph(demo_type): - if demo_type == "Kibble-Zurek": - return RadioItems( - id="graph_display", - options=[ - {"label": "Both", "value": "both", "disabled": False}, - {"label": "Kink density", "value": "kink_density", "disabled": False}, - {"label": "Schedule", "value": "schedule", "disabled": False}, - ], - value="both", - inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, - labelStyle={ - "color": "rgb(3, 184, 255)", - "font-size": 12, - "display": "inline-block", - "marginLeft": 20, - }, - inline=True, - ) - else: - return RadioItems( + +def get_kz_graph_radio_options(problem_type): + if problem_type is ProblemType.KZ_NM: + return dcc.RadioItems( id="graph_display", options=[ { @@ -107,8 +90,26 @@ def get_config_kz_graph(demo_type): inline=True, ) + return dcc.RadioItems( + id="graph_display", + options=[ + {"label": "Both", "value": "both", "disabled": False}, + {"label": "Kink density", "value": "kink_density", "disabled": False}, + {"label": "Schedule", "value": "schedule", "disabled": False}, + ], + value="both", + inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, + labelStyle={ + "color": "rgb(3, 184, 255)", + "font-size": 12, + "display": "inline-block", + "marginLeft": 20, + }, + inline=True, + ) -config_spins = RadioItems( + +config_spins = dcc.RadioItems( id="spins", options=[ {"label": f"{length}", "value": length, "disabled": False} @@ -122,81 +123,52 @@ def get_config_kz_graph(demo_type): "display": "inline-block", "marginLeft": 20, }, - inline=True, # Currently requires above 'inline-block' + inline=True, ) j_marks = { - round(0.1 * val, 1): ( - {"label": f"{round(0.1*val, 1)}", "style": {"color": "white"}} - if round(0.1 * val, 0) != 0.1 * val - else {"label": f"{round(0.1*val)}", "style": {"color": "white"}} + round(0.1 * val) if val % 10 == 0 else round(0.1 * val, 1): ( + {"label": f"{round(0.1*val)}", "style": {"color": "white"}} + if val % 10 == 0 + else {"label": f"{round(0.1*val, 1)}", "style": {"color": "white"}} ) - for val in range(-18, 0, 2) + for val in chain(range(-20, 0, 2), range(2, 12, 2)) } -j_marks.update( - { - round(0.1 * val, 1): ( - {"label": f"{round(0.1*val, 1)}", "style": {"color": "white"}} - if round(0.1 * val, 0) != 0.1 * val - else {"label": f"{round(0.1*val)}", "style": {"color": "white"}} - ) - for val in range(2, 10, 2) - } -) -# Dash Slider has some issue with int values having a zero after the decimal point -j_marks[-2] = {"label": "-2", "style": {"color": "white"}} -del j_marks[-1.0] -j_marks[-1] = {"label": "-1", "style": {"color": "white"}} -j_marks[1] = {"label": "1", "style": {"color": "white"}} - -def get_config_coupling_strength(demo_type): - if demo_type == "Zero-Noise": - return dbc.Row( +def get_coupling_strength_slider(problem_type): + if problem_type is ProblemType.KZ_NM: + return html.Div( [ - dbc.Col( - html.Div( - [ - Slider( - id="coupling_strength", - value=-1.8, - marks=j_marks, - min=-1.8, - max=-0.6, - step=None, - tooltip={"placement": "bottom", "always_visible": True}, - ) - ] - ), - ), + dcc.Slider( + id="coupling_strength", + value=-1.8, + marks=j_marks, + min=-1.8, + max=-0.6, + step=None, + tooltip={"placement": "bottom", "always_visible": True}, + ) ] ) - return dbc.Row( + + return html.Div( [ - dbc.Col( - html.Div( - [ - Slider( - id="coupling_strength", - value=-1.4, - marks=j_marks, - step=None, - tooltip={"placement": "bottom", "always_visible": True}, - ) - ] - ), - ), + dcc.Slider( + id="coupling_strength", + value=-1.4, + marks=j_marks, + step=None, + tooltip={"placement": "bottom", "always_visible": True}, + ) ] ) -def config_qpu_selection(solvers, default="Diffusion [Classical]"): - default = "Diffusion [Classical]" if "Diffusion [Classical]" in solvers else None - return Dropdown( +def config_qpu_selection(solvers): + return dcc.Dropdown( id="qpu_selection", options=[{"label": qpu_name, "value": qpu_name} for qpu_name in solvers], placeholder="Select a quantum computer", - # value=default ) @@ -222,10 +194,10 @@ def config_qpu_selection(solvers, default="Diffusion [Classical]"): html.Div( [ """ -If you are running locally, set environment variables or a -dwave-cloud-client configuration file as described in the -""", - Link( + If you are running locally, set environment variables or a + dwave-cloud-client configuration file as described in the + """, + dcc.Link( children=[html.Div(" Ocean")], href="https://docs.ocean.dwavesys.com/en/stable/overview/sapi.html", style={"display": "inline-block"}, @@ -237,7 +209,7 @@ def config_qpu_selection(solvers, default="Diffusion [Classical]"): html.Div( [ "If you are running in an online IDE, see the ", - Link( + dcc.Link( children=[html.Div("system documentation")], href="https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html", style={"display": "inline-block"}, @@ -271,7 +243,7 @@ def dbc_modal(name): ] -embeddings = Checklist( +embeddings = dcc.Checklist( options=[ { "label": html.Div( @@ -289,7 +261,7 @@ def dbc_modal(name): inline=True, ) -tooltips_activate = RadioItems( +tooltips_activate = dcc.RadioItems( id="tooltips_show", options=[ { @@ -309,5 +281,5 @@ def dbc_modal(name): "display": "inline-block", "marginLeft": 20, }, - inline=True, # Currently requires above 'inline-block' + inline=True, ) diff --git a/helpers/plots.py b/helpers/plots.py index 1318456..509433f 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -1,4 +1,4 @@ -# Copyright 2024 D-Wave +# Copyright 2025 D-Wave # # Licensed under the A_ghzpache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from dash import no_update +from src.demo_enums import ProblemType import numpy as np import pandas as pd import plotly.graph_objects as go from helpers.qa import fitted_function +from dash.exceptions import PreventUpdate from helpers.kz_calcs import theoretical_kink_density @@ -66,7 +67,7 @@ def plot_kink_densities_bg( coupling_data, zne_estimates, kz_data=None, - url=None, + problem_type=None, ): """ Plot the background of theoretical kink density and QPU energy scales. @@ -98,10 +99,10 @@ def plot_kink_densities_bg( predicted kink densities and/or QPU energy scales based on the specified display mode. """ - if schedule_name: - schedule = pd.read_csv(f"helpers/{schedule_name}") - else: - schedule = pd.read_csv("helpers/FALLBACK_SCHEDULE.csv") + if not schedule_name: + schedule_name = "FALLBACK_SCHEDULE.csv" + + schedule = pd.read_csv(f"helpers/{schedule_name}") A_ghz = schedule["A(s) (GHz)"] B_ghz = schedule["B(s) (GHz)"] @@ -207,7 +208,7 @@ def plot_kink_densities_bg( xaxis=x_axis1, yaxis=y_axis1, ) - if url == "Demo2": + if problem_type is ProblemType.KZ_NM: _coupling_label = { -1.8: False, -1.6: False, @@ -274,15 +275,14 @@ def plot_kink_densities_bg( ) elif display == "schedule": - fig_layout = go.Layout( xaxis=x_axis2, yaxis=y_axis2, ) fig_data = [energy_transverse, energy_problem] - elif display == "coupling": + elif display == "coupling": fig_layout = go.Layout( xaxis3=x_axis3, yaxis1=y_axis1, @@ -325,7 +325,6 @@ def plot_kink_densities_bg( ) else: # Display both plots together - x_axis2.update({"overlaying": "x1"}) y_axis2.update({"overlaying": "y1"}) @@ -339,7 +338,7 @@ def plot_kink_densities_bg( fig_data = [predicted_plus, predicted_minus, energy_transverse, energy_problem] # Add previously computed kz_data points - if kz_data is not None and "k" in kz_data: + if kz_data and "k" in kz_data: for pair in kz_data["k"]: fig_data.append( go.Scatter( @@ -352,12 +351,12 @@ def plot_kink_densities_bg( showlegend=False, ) ) + fig = go.Figure(data=fig_data, layout=fig_layout) fig.update_layout(legend=dict(x=0.1, y=0.1), margin=dict(b=5, l=5, r=20, t=10)) if display != "schedule" and display != "coupling": - fig.add_annotation( xref="x", yref="y", @@ -388,7 +387,7 @@ def plot_kink_densities_bg( def plot_kink_density( - display, fig_dict, kink_density, anneal_time, J, lambda_=None, url=None + display, fig_dict, kink_density, anneal_time, J, lambda_=None, problem_type=None ): """ Add a kink density marker from QPU samples to an existing plot. @@ -415,11 +414,11 @@ def plot_kink_density( - Otherwise, returns the updated Plotly figure with the new kink density marker. """ if display == "schedule": - return no_update + raise PreventUpdate fig = go.Figure(fig_dict) - if url == "Demo1": + if problem_type is ProblemType.KZ: fig.add_trace( go.Scatter( x=[anneal_time], @@ -462,10 +461,7 @@ def plot_kink_density( ) return fig - if display == "kink_density": - color = coupling_color_theme[J] - else: - color = "black" + color = coupling_color_theme[J] if display == "kink_density" else "black" if not coupling_label[J]: legend = True @@ -521,12 +517,10 @@ def plot_spin_orientation(num_spins=512, sample=None): x, y = z * np.cos(5 * z), z * np.sin(5 * z) if sample is None: - cones_red = cones_blue = np.ones(num_spins, dtype=bool) num_cones_red = num_cones_blue = num_spins else: - cones_red = ~np.isnan(np.where(sample == 1, z, np.nan)) cones_blue = ~cones_red num_cones_red = np.count_nonzero(cones_red) @@ -634,8 +628,8 @@ def plot_zne_fitted_line( due to ill conditioned data for fitting. """ modal_trigger = False - if ta_str in coupling_data.keys() and len(coupling_data[ta_str]) > 2: + if ta_str in coupling_data.keys() and len(coupling_data[ta_str]) > 2: data_points = coupling_data[ta_str] x = np.array([point["lambda"] for point in data_points]) y = np.array([point["kink_density"] for point in data_points]) @@ -643,12 +637,12 @@ def plot_zne_fitted_line( # Ensure there are enough unique x values for fitting if len(np.unique(x)) > 1: # Fit a 1st degree polynomial (linear fit) - if qpu_name == "Diffusion [Classical]": - # Fancy non-linear function - y_func_x = fitted_function(x, y, method="mixture_of_exponentials") - else: - # Pure quadratic (see paper) # y = a + b x^2 - y_func_x = fitted_function(x, y, method="pure_quadratic") + # Fancy non-linear function or pure quadratic (see paper) # y = a + b x^2 + y_func_x = fitted_function( + x, + y, + method="mixture_of_exponentials" if len(np.unique(x)) > 1 else "pure_quadratic", + ) if y_func_x is not None: zne_estimates[ta_str] = y_func_x(0) @@ -656,23 +650,16 @@ def plot_zne_fitted_line( y_fit = y_func_x(x_fit) else: modal_trigger = True - # Remove existing fitting curve traces to prevent duplication + + # Remove existing fitting curve traces and ZNE Estimate traces to prevent duplication fig.data = [ trace for trace in fig.data - if not ( - trace.name == "Fitting Curve" + if ( + trace.name not in ["Fitting Curve", "ZNE Estimate"] and trace.legendgroup == f"ta_{ta_str}" ) ] - # Remove existing ZNE Estimate traces to prevent duplication - fig.data = [ - trace - for trace in fig.data - if not ( - trace.name == "ZNE Estimate" and trace.legendgroup == f"ta_{ta_str}" - ) - ] if zne_graph_display == "coupling" and y_func_x is not None: x_axis = "x3" diff --git a/helpers/qa.py b/helpers/qa.py index 3a7f1c7..7242c16 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -38,7 +38,6 @@ def create_bqm(num_spins=512, coupling_strength=-1.4): Args: num_spins: Number of spins in the ring. - coupling_strength: Coupling strength between spins in the ring. Returns: @@ -58,7 +57,6 @@ def find_one_to_one_embedding(spins, sampler_edgelist): Args: spins: Number of spins. - sampler_edgelist: Edges (couplers) of the QPU. Returns: @@ -67,11 +65,9 @@ def find_one_to_one_embedding(spins, sampler_edgelist): bqm = create_bqm(spins) for _ in range(5): # 4 out of 5 times will find an embedding - embedding = minorminer.find_embedding(bqm.quadratic, sampler_edgelist) if max(len(val) for val in embedding.values()) == 1: - return embedding return {} @@ -82,9 +78,7 @@ def get_job_status(client, job_id, job_submit_time): Args: client: dwave-cloud-client Client instance. - job_id: Identification string of the job. - job_submit_time: Clock time of submission for identification. Returns: @@ -94,23 +88,19 @@ def get_job_status(client, job_id, job_submit_time): if '"type": "SampleSet"' in job_id and job_submit_time == "SA": return "COMPLETED" - else: - p = Problems.from_config(client.config) - - try: - - status = p.get_problem_status(job_id) - label_time = dict(status)["label"].split("submitted: ")[1] + p = Problems.from_config(client.config) - if label_time == job_submit_time: + try: + status = p.get_problem_status(job_id) + label_time = dict(status)["label"].split("submitted: ")[1] - return status.status.value + if label_time == job_submit_time: + return status.status.value - return None - - except exceptions.ResourceNotFoundError: + return None - return None + except exceptions.ResourceNotFoundError: + return None def get_samples(client, job_id, num_spins, J, embedding): @@ -118,15 +108,10 @@ def get_samples(client, job_id, num_spins, J, embedding): Args: client: dwave-cloud-client Client instance. - job_id: Identification string of the job. - num_spins: Number of spins in the ring. - coupling_strength: Coupling strength between spins in the ring. - qpu_name: Name of the quantum computer the job was submitted to. - embedding: Embedding used for the job. Returns: @@ -161,18 +146,19 @@ def json_to_dict(emb_json): } -def fitted_function(xdata, ydata, method=("polynomial", 1)): +def fitted_function(xdata, ydata, method="polynomial", degree=1): """ Generate a fitting function based on the provided data points and method. Args: xdata: Array-like, independent variable data points. ydata: Array-like, dependent variable data points. - method: Tuple or string specifying the fitting method. Options include: - - ("polynomial", deg): Fits a polynomial of degree `deg`. + method: A string specifying the fitting method. Options include: + - "polynomial": Fits a polynomial of degree `degree`. - "pure_quadratic": Fits a pure quadratic model, y = a + b*x^2. - "mixture_of_exponentials": Fits a mixture of exponential functions. - "sigmoidal_crossover": Fits a sigmoidal crossover model. + degree: The degree of the polynomial. Returns: Callable function that takes a single argument `x` and returns the fitted value. @@ -181,8 +167,8 @@ def fitted_function(xdata, ydata, method=("polynomial", 1)): Raises: ValueError: If the specified method is unknown. """ - if type(method) is tuple and method[0] == "polynomial": - coeffs = Polynomial.fit(xdata, ydata, deg=method[1]).convert().coef + if method == "polynomial": + coeffs = Polynomial.fit(xdata, ydata, deg=degree).convert().coef def y_func_x(x): return np.polynomial.polynomial.polyval(x, coeffs) @@ -203,8 +189,7 @@ def y_func_x(x): # This type of function is quite difficult to fit. def mixture_of_exponentials(x, p_0, p_1, p_2): # Strictly positive form. - # To do: Change to force saturation. Large x should go sigmoidally - # towards 0.5 + # TODO: Change to force saturation. Large x should go sigmoidally towards 0.5 return np.exp(p_2) / 2 * (1 + np.exp(p_1 + np.exp(p_0) * x)) # Take p_1 = 1; p_2 = min(x); take max(y) occurs at max(x) @@ -212,6 +197,7 @@ def mixture_of_exponentials(x, p_0, p_1, p_2): maxx = np.max(xdata) miny = np.min(ydata) p0 = [np.log(np.log(2 * maxy / miny - 1) / (maxx - 1)), 0, np.log(miny)] + try: p, _ = scipy.optimize.curve_fit( f=mixture_of_exponentials, xdata=xdata, ydata=ydata, p0=p0 @@ -229,8 +215,7 @@ def y_func_x(x): # This type of function is quite difficult to fit. def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): # Strictly positive form. - # To do: Change to force saturation. Large x should go sigmoidally - # towards 0.5 + # TODO: Change to force saturation. Large x should go sigmoidally towards 0.5 return np.exp(p_3) * ( 1 + np.exp(p_2) * np.tanh(np.exp(p_1) * (x - np.exp(p_0))) ) @@ -264,4 +249,5 @@ def y_func_x(x): else: raise ValueError("Unknown method") + return y_func_x diff --git a/MockKibbleZurekSampler.py b/mock_kz_sampler.py similarity index 76% rename from MockKibbleZurekSampler.py rename to mock_kz_sampler.py index 6987305..2e3398d 100644 --- a/MockKibbleZurekSampler.py +++ b/mock_kz_sampler.py @@ -1,8 +1,21 @@ +# Copyright 2025 D-Wave +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import numpy as np from dimod import SampleSet from dwave.samplers import SimulatedAnnealingSampler -from dwave.system.temperatures import fluxbias_to_h from dwave.system.testing import MockDWaveSampler @@ -47,22 +60,16 @@ def __init__( self.parameters.update({"num_sweeps": []}) def sample(self, bqm, **kwargs): - # TO DO: corrupt bqm with noise proportional to annealing_time + # TODO: corrupt bqsm with noise proportional to annealing_time _bqm = bqm.change_vartype("SPIN", inplace=False) # Extract annealing_time from kwargs (if provided) annealing_time = kwargs.pop("annealing_time", 20) # 20us default. num_sweeps = int(annealing_time * 1000) # 1000 sweeps per microsecond - # Extract flux biases from kwargs (if provided) - # flux_biases = kwargs.pop('flux_biases', {}) - # flux_to_h_factor = fluxbias_to_h() - # for v in _bqm.variables: - # bias = _bqm.get_linear(v) - # _bqm.set_linear(v, bias + flux_to_h_factor * flux_biases[v]) ss = super().sample(bqm=_bqm, num_sweeps=num_sweeps, **kwargs) - ss.change_vartype(bqm.vartype) # Not required (but safe) this case ... + ss.change_vartype(bqm.vartype) # Not required but safe ss = SampleSet.from_samples_bqm(ss, bqm) diff --git a/src/demo_enums.py b/src/demo_enums.py new file mode 100644 index 0000000..47e9682 --- /dev/null +++ b/src/demo_enums.py @@ -0,0 +1,28 @@ +# Copyright 2025 D-Wave +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class ProblemType(Enum): + KZ = 0 + KZ_NM = 1 + + @property + def label(self): + return { + ProblemType.KZ: "Kibble-Zurek Mechanism", + ProblemType.KZ_NM: "Kibble-Zurek Mechanism with Noise Mitigation", + }[self] + diff --git a/tests/test_cb_activate_tooltips.py b/tests/test_cb_activate_tooltips.py deleted file mode 100644 index 4d69c14..0000000 --- a/tests/test_cb_activate_tooltips.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2024 D-Wave -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - -from contextvars import copy_context -from dash._callback_context import context_value -from dash._utils import AttributeDict - -from app import activate_tooltips -from helpers.tooltips import tool_tips - -turn_off = [dict(display='none') for _ in tool_tips.keys()] -turn_on = [dict() for _ in tool_tips.keys()] - -@pytest.mark.parametrize('input_val, output_vals', - [('off', turn_off), ('on', turn_on)]) -def test_activate_tooltips(input_val, output_vals): - """Test tooltips are shown or not.""" - - def run_callback(): - context_value.set(AttributeDict(**{'triggered_inputs': - [{'prop_id': 'tooltips_show.value'}]})) - - return activate_tooltips(input_val) - - ctx = copy_context() - - output = ctx.run(run_callback) - assert list(output) == output_vals \ No newline at end of file From 746ef1c451b11ffb2e3b71a93c9966aec51c5f74 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Wed, 8 Jan 2025 16:17:50 -0800 Subject: [PATCH 147/170] Fix tests --- tests/__init__.py | 0 tests/test_cb_cache_embeddings.py | 53 +++++++++++----------- tests/test_cb_disable_buttons.py | 31 +++++++------ tests/test_cb_graph_kink_density.py | 70 ++++++++++++++++++----------- tests/test_cb_graph_spins.py | 8 ++-- tests/test_cb_submit_job.py | 14 +++++- tests/test_kz_calcs.py | 13 +++--- tests/test_mock_kz_sampler.py | 16 +++---- 8 files changed, 116 insertions(+), 89 deletions(-) create mode 100644 tests/__init__.py diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_cb_cache_embeddings.py b/tests/test_cb_cache_embeddings.py index 897a3ca..ec4514e 100644 --- a/tests/test_cb_cache_embeddings.py +++ b/tests/test_cb_cache_embeddings.py @@ -18,7 +18,7 @@ from contextvars import copy_context from dash._callback_context import context_value from dash._utils import AttributeDict -from dash import no_update +from dash.exceptions import PreventUpdate from app import cache_embeddings @@ -42,25 +42,20 @@ class mock_qpu(object): def __init__(self): self.edges_per_qpu = { 'Advantage_system4.1': edges_5, - 'Advantage2_prototype2.55': edges_3_5, } + 'Advantage2_prototype2.3': edges_3_5 + } def __getitem__(self, indx): return mock_qpu_edges(self.edges_per_qpu[indx]) parametrize_vals = [ - ('Advantage_system4.1', - embedding_filenames, - json_embeddings_file,), - ('Advantage2_prototype2.55', - embedding_filenames, - json_embeddings_file,), - ('Advantage88_prototype7.3', - embedding_filenames, - json_embeddings_file,), ] - -@pytest.mark.parametrize(['qpu_name_val', 'embeddings', 'json_emb_file',], -parametrize_vals) -def test_cache_embeddings_qpu_selection(mocker, qpu_name_val, embeddings, json_emb_file,): + ('Advantage_system4.1', embedding_filenames, json_embeddings_file), + ('Advantage2_prototype2.3', embedding_filenames, json_embeddings_file), + ('Advantage88_prototype7.3', embedding_filenames, json_embeddings_file) +] + +@pytest.mark.parametrize(['qpu_name_val', 'embeddings', 'json_emb_file',], parametrize_vals) +def test_cache_embeddings_qpu_selection(mocker, qpu_name_val, embeddings, json_emb_file): """Test the caching of embeddings: triggered by QPU selection.""" mocker.patch('app.os.listdir', return_value=embeddings) @@ -68,8 +63,9 @@ def test_cache_embeddings_qpu_selection(mocker, qpu_name_val, embeddings, json_e mocker.patch('app.qpus', new=mock_qpu()) def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': 'qpu_selection.value'},]})) + context_value.set( + AttributeDict(**{'triggered_inputs': [{'prop_id': 'qpu_selection.value'},]}) + ) return cache_embeddings(qpu_name_val, 'dummy', 'dummy') @@ -79,19 +75,17 @@ def run_callback(): if qpu_name_val == 'Advantage_system4.1': assert output[1] == [5] - if qpu_name_val == 'Advantage2_prototype2.55': + if qpu_name_val == 'Advantage2_prototype2.3': assert output[1] == [3, 5] if qpu_name_val == 'Advantage88_prototype7.3': assert output == ({}, []) parametrize_vals = [ - ('{"22": {"1": [11], "0": [10], "2": [12]}}', - json_embeddings_file,), - ('needed', - json_embeddings_file,), - ('not found', - json_embeddings_file,), ] + ('{"22": {"1": [11], "0": [10], "2": [12]}}', json_embeddings_file), + ('needed', json_embeddings_file), + ('not found', json_embeddings_file), +] @pytest.mark.parametrize(['embeddings_found_val', 'embeddings_cached_val'], parametrize_vals) @@ -99,15 +93,18 @@ def test_cache_embeddings_found_embedding(embeddings_found_val, embeddings_cache """Test the caching of embeddings: triggered by found embedding.""" def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': 'embeddings_found.data'},]})) + context_value.set( + AttributeDict(**{'triggered_inputs': [{'prop_id': 'embeddings_found.data'},]}) + ) return cache_embeddings('dummy', embeddings_found_val, embeddings_cached_val) ctx = copy_context() - output = ctx.run(run_callback) if not isinstance(embeddings_found_val, dict): - assert output == (no_update, no_update) + with pytest.raises(PreventUpdate): + ctx.run(run_callback) else: + output = ctx.run(run_callback) + assert 22 in output[1] diff --git a/tests/test_cb_disable_buttons.py b/tests/test_cb_disable_buttons.py index f0c920b..4c22a9f 100644 --- a/tests/test_cb_disable_buttons.py +++ b/tests/test_cb_disable_buttons.py @@ -17,7 +17,7 @@ from contextvars import copy_context from dash._callback_context import context_value from dash._utils import AttributeDict -from dash import no_update +from dash.exceptions import PreventUpdate from app import disable_buttons from helpers.layouts_components import ring_lengths @@ -28,14 +28,15 @@ spins_disabled = [{'disabled': True} for _ in ring_lengths] spins_enabled = [{'disabled': False} for _ in ring_lengths] parametrize_vals = [ -('EMBEDDING', spins_disabled, True, True, spins_disabled, True), -('SUBMITTED', spins_disabled, True, True, spins_disabled, True), -('PENDING', spins_disabled, True, True, spins_disabled, True), -('IN_PROGRESS', spins_disabled, True, True, spins_disabled, True), -('COMPLETED', spins_enabled, False, False, spins_enabled, False), -('CANCELLED', spins_enabled, False, False, spins_enabled, False), -('FAILED', spins_enabled, False, False, spins_enabled, False), -('FAKE', no_update, no_update, no_update, no_update, no_update)] + ('EMBEDDING', spins_disabled, True, True, spins_disabled, True), + ('SUBMITTED', spins_disabled, True, True, spins_disabled, True), + ('PENDING', spins_disabled, True, True, spins_disabled, True), + ('IN_PROGRESS', spins_disabled, True, True, spins_disabled, True), + ('COMPLETED', spins_enabled, False, False, spins_enabled, False), + ('CANCELLED', spins_enabled, False, False, spins_enabled, False), + ('FAILED', spins_enabled, False, False, spins_enabled, False), + ('FAKE', spins_enabled, False, False, spins_enabled, False) +] @pytest.mark.parametrize(parametrize_names, parametrize_vals) def test_disable_buttons(job_submit_state_val, spins_val_in, anneal_duration_val, @@ -51,7 +52,11 @@ def run_callback(): ctx = copy_context() - output = ctx.run(run_callback) - - assert output == (anneal_duration_val, coupling_strength_val, - spins_val_out, qpu_selection_val) \ No newline at end of file + if job_submit_state_val == "FAKE": + with pytest.raises(PreventUpdate): + ctx.run(run_callback) + else: + output = ctx.run(run_callback) + assert output == ( + anneal_duration_val, coupling_strength_val, spins_val_out, qpu_selection_val + ) \ No newline at end of file diff --git a/tests/test_cb_graph_kink_density.py b/tests/test_cb_graph_kink_density.py index b9a57ba..b27f379 100644 --- a/tests/test_cb_graph_kink_density.py +++ b/tests/test_cb_graph_kink_density.py @@ -15,7 +15,7 @@ import pytest from contextvars import copy_context -from dash import no_update +from dash.exceptions import PreventUpdate from dash._callback_context import context_value from dash._utils import AttributeDict import numpy as np @@ -41,46 +41,64 @@ 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'y'}}} }) -samples = dimod.as_samples([ - [-1, -1, -1, +1, +1], - [-1, -1, +1, +1, +1], - [-1, -1, -1, +1, +1],]) +samples = dimod.as_samples( + [ + [-1, -1, -1, +1, +1], + [-1, -1, +1, +1, +1], + [-1, -1, -1, +1, +1], + ] +) sampleset = dimod.SampleSet.from_samples(samples, 'SPIN', 0) -parametrize_vals = [('kz_graph_display.value', 'both', 'dummy'), - ('kz_graph_display.value', 'kink_density', 'dummy'), - ('kz_graph_display.value', 'schedule', 'dummy'), - ('coupling_strength.value', 'schedule', 'dummy'), - ('quench_schedule_filename.children', 'schedule', 'dummy'), - ('job_submit_state.children', 'dummy', 'SUBMITTED'), - ('job_submit_state.children', 'dummy', 'PENDING'), - ('job_submit_state.children', 'dummy', 'COMPLETED')] +parametrize_vals = [ + ('kz_graph_display.value', 'both', 'dummy'), + ('kz_graph_display.value', 'kink_density', 'dummy'), + ('kz_graph_display.value', 'schedule', 'dummy'), + ('coupling_strength.value', 'schedule', 'dummy'), + ('quench_schedule_filename.children', 'schedule', 'dummy'), + ('job_submit_state.children', 'dummy', 'SUBMITTED'), + ('job_submit_state.children', 'dummy', 'PENDING'), + ('job_submit_state.children', 'dummy', 'COMPLETED') +] -@pytest.mark.parametrize('trigger_val, kz_graph_display_val, job_submit_state_val', - parametrize_vals) +@pytest.mark.parametrize('trigger_val, kz_graph_display_val, job_submit_state_val', parametrize_vals) def test_graph_kink_density(mocker, trigger_val, kz_graph_display_val, job_submit_state_val): """Test graph of kink density.""" mocker.patch('app.get_samples', return_value=sampleset) def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': trigger_val},]})) + context_value.set( + AttributeDict(**{'triggered_inputs': [{'prop_id': trigger_val},]}) + ) return display_graphics_kink_density( - kz_graph_display_val, 2.5, "FALLBACK_SCHEDULE.csv", job_submit_state_val, '1234', - 5, 100, 7, 5, json_embeddings_file, sample_vs_theory) + None, + kz_graph_display_val, + 2.5, + "FALLBACK_SCHEDULE.csv", + job_submit_state_val, + '1234', + 7, + 5, + 0, + json_embeddings_file, + sample_vs_theory, + {}, + {}, + {"k": []}, + ) ctx = copy_context() - output = ctx.run(run_callback) - if trigger_val in [ 'kz_graph_display.value', 'coupling_strength.value', - 'quench_schedule_filename.children']: - assert type(output) == plotly.graph_objects.Figure - elif job_submit_state_val == "COMPLETED": - assert type(output) == plotly.graph_objects.Figure + 'quench_schedule_filename.children' + ] or job_submit_state_val == "COMPLETED": + output = ctx.run(run_callback) + + assert type(output[0]) == plotly.graph_objects.Figure else: - assert output == no_update \ No newline at end of file + with pytest.raises(PreventUpdate): + ctx.run(run_callback) diff --git a/tests/test_cb_graph_spins.py b/tests/test_cb_graph_spins.py index 2264cf0..5c29145 100644 --- a/tests/test_cb_graph_spins.py +++ b/tests/test_cb_graph_spins.py @@ -15,7 +15,7 @@ import pytest from contextvars import copy_context -from dash import no_update +from dash.exceptions import PreventUpdate from dash._callback_context import context_value from dash._utils import AttributeDict import plotly @@ -68,9 +68,9 @@ def run_callback(): ctx = copy_context() - output = ctx.run(run_callback) - if job_submit_state_val == 'COMPLETED': + output = ctx.run(run_callback) assert type(output) == plotly.graph_objects.Figure else: - output = no_update \ No newline at end of file + with pytest.raises(PreventUpdate): + ctx.run(run_callback) diff --git a/tests/test_cb_submit_job.py b/tests/test_cb_submit_job.py index 3b30385..635eb7c 100644 --- a/tests/test_cb_submit_job.py +++ b/tests/test_cb_submit_job.py @@ -55,11 +55,21 @@ def run_callback(): context_value.set(AttributeDict(** {'triggered_inputs': [{'prop_id': 'job_submit_time.children'},]})) - return submit_job('11:45AM', 'Advantage_system88.4', 3, 2.3, 7, json_embeddings_file) + return submit_job( + '11:45AM', + 'Advantage_system88.4', + 3, + 2.3, + 7, + json_embeddings_file, + 0, + "FALLBACK_SCHEDULE.csv", + False, + ) ctx = copy_context() output = ctx.run(run_callback) - assert output == (1234) + assert output == (1234, False, False) diff --git a/tests/test_kz_calcs.py b/tests/test_kz_calcs.py index c9d4ff7..5063a04 100644 --- a/tests/test_kz_calcs.py +++ b/tests/test_kz_calcs.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import pytest import pandas as pd @@ -19,7 +20,9 @@ from helpers.kz_calcs import * -schedule = pd.read_csv('helpers/FALLBACK_SCHEDULE.csv') +project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +schedule = pd.read_csv(project_dir + '/helpers/FALLBACK_SCHEDULE.csv') @pytest.mark.parametrize('t_a_ns, J1, J2', [([7, 25], -1.0, -0.3), ([10, 30], 1.0, 0.3), ]) @@ -29,14 +32,14 @@ def test_kz_theory(t_a_ns, J1, J2): output1 = theoretical_kink_density( annealing_times_ns=t_a_ns, J=J1, - schedule=schedule, - schedule_name='FALLBACK_SCHEDULE.csv') + schedule_name='FALLBACK_SCHEDULE.csv', + ) output2 = theoretical_kink_density( annealing_times_ns=t_a_ns, J=J2, - schedule=schedule, - schedule_name='FALLBACK_SCHEDULE.csv') + schedule_name='FALLBACK_SCHEDULE.csv', + ) assert output1[0] > output1[1] assert output1[0] < output2[0] diff --git a/tests/test_mock_kz_sampler.py b/tests/test_mock_kz_sampler.py index 15aea74..91c692b 100644 --- a/tests/test_mock_kz_sampler.py +++ b/tests/test_mock_kz_sampler.py @@ -1,4 +1,4 @@ -# Copyright 2024 D-Wave Systems Inc. +# Copyright 2025 D-Wave # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,17 +16,13 @@ import sys import os -import dimod from dimod.testing import * -from unittest.mock import patch -from dimod import SampleSet sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..") from dimod import BinaryQuadraticModel from dwave.samplers import SimulatedAnnealingSampler -from dwave.system.testing import MockDWaveSampler -from MockKibbleZurekSampler import MockKibbleZurekSampler +from mock_kz_sampler import MockKibbleZurekSampler @pytest.fixture def default_sampler(): @@ -57,12 +53,11 @@ def sample_bqm(): return BinaryQuadraticModel({'a': 1.0, 'b': -1.0}, {('a', 'b'): 0.5}, 0.0, 'BINARY') - def test_initialization(default_sampler, custom_sampler): - # #assert default_sampler.topology_type == 'pegasus' - # #assert default_sampler.topology_shape == [16] + # assert default_sampler.topology_type == 'pegasus' + # assert default_sampler.topology_shape == [16] # assert isinstance(default_sampler.substitute_sampler, SimulatedAnnealingSampler) - # #assert default_sampler.substitute_kwargs['beta_range'] == [0, 3] + # assert default_sampler.substitute_kwargs['beta_range'] == [0, 3] # assert default_sampler.substitute_kwargs['beta_schedule_type'] == 'linear' # assert default_sampler.substitute_kwargs['num_sweeps'] == 100 # assert default_sampler.substitute_kwargs['randomize_order'] is True @@ -78,7 +73,6 @@ def test_initialization(default_sampler, custom_sampler): assert custom_sampler.substitute_kwargs['beta_range'] == [1, 2] assert custom_sampler.substitute_kwargs['num_sweeps'] == 200 - # def test_sample_with_default_annealing_time(default_sampler, sample_bqm): # sampleset = default_sampler.sample(sample_bqm) From 3d7dedb45d3e36064707cdbe3db2a00a10d20be3 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Thu, 9 Jan 2025 13:39:36 -0800 Subject: [PATCH 148/170] Replace test_mock_kz_sampler with placeholder tests --- tests/test_mock_kz_sampler.py | 84 ++--------------------------------- 1 file changed, 4 insertions(+), 80 deletions(-) diff --git a/tests/test_mock_kz_sampler.py b/tests/test_mock_kz_sampler.py index 91c692b..554c460 100644 --- a/tests/test_mock_kz_sampler.py +++ b/tests/test_mock_kz_sampler.py @@ -12,87 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest -import sys -import os - -from dimod.testing import * - -sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/..") - -from dimod import BinaryQuadraticModel -from dwave.samplers import SimulatedAnnealingSampler -from mock_kz_sampler import MockKibbleZurekSampler - -@pytest.fixture -def default_sampler(): - nodelist = ['a', 'b'] - edgelist = [('a', 'b')] - return MockKibbleZurekSampler(nodelist=nodelist, edgelist=edgelist) - -@pytest.fixture -def custom_sampler(): - custom_nodelist = [0, 1, 2] - custom_edgelist = [(0, 1), (1, 2)] - substitute_sampler = SimulatedAnnealingSampler() - substitute_kwargs = { - 'beta_range': [1, 2], - 'num_sweeps': 200 - } - return MockKibbleZurekSampler( - nodelist=custom_nodelist, - edgelist=custom_edgelist, - topology_type='chimera', - topology_shape=[4, 4, 4], - substitute_sampler=substitute_sampler, - substitute_kwargs=substitute_kwargs - ) - -@pytest.fixture -def sample_bqm(): - return BinaryQuadraticModel({'a': 1.0, 'b': -1.0}, {('a', 'b'): 0.5}, 0.0, 'BINARY') - - -def test_initialization(default_sampler, custom_sampler): - # assert default_sampler.topology_type == 'pegasus' - # assert default_sampler.topology_shape == [16] - # assert isinstance(default_sampler.substitute_sampler, SimulatedAnnealingSampler) - # assert default_sampler.substitute_kwargs['beta_range'] == [0, 3] - # assert default_sampler.substitute_kwargs['beta_schedule_type'] == 'linear' - # assert default_sampler.substitute_kwargs['num_sweeps'] == 100 - # assert default_sampler.substitute_kwargs['randomize_order'] is True - # assert default_sampler.substitute_kwargs['proposal_acceptance_criteria'] == 'Gibbs' - # assert default_sampler.sampler_type == 'mock' - - assert custom_sampler.topology_type == 'chimera' - assert custom_sampler.topology_shape == [4, 4, 4] - assert custom_sampler.nodelist == [0, 1, 2] - assert custom_sampler.edgelist == [(0, 1), (1, 2)] - - assert isinstance(custom_sampler.substitute_sampler, SimulatedAnnealingSampler) - assert custom_sampler.substitute_kwargs['beta_range'] == [1, 2] - assert custom_sampler.substitute_kwargs['num_sweeps'] == 200 - - -# def test_sample_with_default_annealing_time(default_sampler, sample_bqm): -# sampleset = default_sampler.sample(sample_bqm) - -# # default anneal _time should be 20 -# expected_num_sweeps = int(20 * 1000) -# assert default_sampler.kwargs['num_sweeps']== expected_num_sweeps - -def test_sample_with_custom_annealing_time(default_sampler, sample_bqm): - pass - - -def test_sample_preserves_vartype(default_sampler, sample_bqm): +def test_init(): + """Test initialization of MockKibbleZurekSampler""" pass -def test_bqm_vartype_conversion(default_sampler, sample_bqm): - pass - -def test_substitute_sampler_call_parameters(default_sampler, sample_bqm): +def test_sample(): + """Test the MockKibbleZurekSampler ``sample`` method""" pass - - From 1ef718976a25302b2f608d62fd0a28d6e701c9e1 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 10 Jan 2025 16:58:33 -0800 Subject: [PATCH 149/170] Fix race condition, clean up Dash structure and UI --- README.md | 3 + app.py | 224 +++++++++++++++++------------- assets/custom.css | 5 + helpers/layouts_cards.py | 81 ++++------- helpers/layouts_components.py | 44 ------ helpers/tooltips.py | 34 +---- tests/test_cb_cache_embeddings.py | 6 +- 7 files changed, 173 insertions(+), 224 deletions(-) diff --git a/README.md b/README.md index 379aabd..41a713a 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,9 @@ to the Kibble-Zurek predictions. *Hover over an input field to see a description of the input and its range of* *supported values.* + + ## Model Overview Quantum simulation is valuable for demonstrating and understanding the diff --git a/app.py b/app.py index 6f0e806..2b8bead 100644 --- a/app.py +++ b/app.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union +from typing import NamedTuple, Union import dash import dash_bootstrap_components as dbc from dash import ALL, ctx, dcc, html, Input, Output, State @@ -130,6 +130,17 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: dcc.Store(id="initial_warning", data=False), dcc.Store(id="kz_data", data={}), dcc.Store(id="selected-problem"), + dcc.Store(id="job_submit_time"), + dcc.Store(id="job_id"), + dcc.Store(id="embeddings_cached", data={}), + dcc.Store(id="embeddings_found", data={}), + dcc.Interval( + id="wd_job", + interval=500, + n_intervals=0, + disabled=True, + max_intervals=1, + ), navbar, # Includes the Navbar at the top html.Div( [ @@ -313,7 +324,7 @@ def set_schedule(qpu_name): @app.callback( Output("embeddings_cached", "data"), - Output("embedding_is_cached", "value"), + Output("embedding_is_cached", "children"), inputs=[ Input("qpu_selection", "value"), Input("embeddings_found", "data"), @@ -331,9 +342,10 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached): file for file in os.listdir("helpers") if ".json" in file and "emb_" in file ]: - _qpu_name = "Advantage_system6.4" if qpu_name == "Diffusion [Classical]" else qpu_name + if qpu_name == "Diffusion [Classical]": + qpu_name = "Advantage_system6.4" - if _qpu_name in filename: + if qpu_name.split('.')[0] in filename: with open(f"helpers/{filename}", "r") as fp: embeddings_cached = json.load(fp) @@ -341,9 +353,8 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached): # Validate that loaded embeddings' edges are still available on the selected QPU for length in list(embeddings_cached.keys()): - source_graph = dimod.to_networkx_graph(create_bqm(num_spins=length)).edges - target_graph = qpus[_qpu_name].edges + target_graph = qpus[qpu_name].edges emb = embeddings_cached[length] if not is_valid_embedding(emb, source_graph, target_graph): @@ -358,7 +369,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached): new_embedding = list(embeddings_found.keys())[0] embeddings_cached[new_embedding] = embeddings_found[new_embedding] - return embeddings_cached, list(embeddings_cached.keys()) + return embeddings_cached, ", ".join(str(embedding) for embedding in embeddings_cached.keys()) @app.callback( @@ -373,7 +384,7 @@ def cache_embeddings(qpu_name, embeddings_found, embeddings_cached): Input("coupling_strength", "value"), # previously input Input("quench_schedule_filename", "children"), Input("job_submit_state", "children"), - Input("job_id", "children"), + Input("job_id", "data"), Input("anneal_duration", "value"), Input("spins", "value"), Input("selected-problem", "data"), @@ -447,7 +458,7 @@ def display_graphics_kink_density( if job_submit_state != "COMPLETED": raise PreventUpdate - embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) + embeddings_cached = json_to_dict(embeddings_cached) sampleset_unembedded = get_samples( client, job_id, spins, J, embeddings_cached[spins] @@ -522,7 +533,7 @@ def display_graphics_kink_density( if job_submit_state != "COMPLETED": raise PreventUpdate - embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) + embeddings_cached = json_to_dict(embeddings_cached) sampleset_unembedded = get_samples( client, job_id, spins, J, embeddings_cached[spins] @@ -554,7 +565,7 @@ def display_graphics_kink_density( inputs=[ Input("spins", "value"), Input("job_submit_state", "children"), - State("job_id", "children"), + State("job_id", "data"), State("coupling_strength", "value"), State("embeddings_cached", "data"), ] @@ -566,7 +577,7 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca if job_submit_state != "COMPLETED": raise PreventUpdate - embeddings_cached = embeddings_cached = json_to_dict(embeddings_cached) + embeddings_cached = json_to_dict(embeddings_cached) sampleset_unembedded = get_samples( client, job_id, spins, J, embeddings_cached[spins] ) @@ -582,11 +593,12 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca @app.callback( - Output("job_id", "children"), + Output("job_id", "data"), Output("initial_warning", "data"), Output("warning-modal", "is_open"), + Output("wd_job", "n_intervals"), inputs=[ - Input("job_submit_time", "children"), + Input("job_submit_time", "data"), State("qpu_selection", "value"), State("spins", "value"), State("coupling_strength", "value"), @@ -630,7 +642,7 @@ def submit_job( bqm_embedded, annealing_time=annealing_time ) - return json.dumps(sampleset.to_serializable()), True, not initial_warning + return json.dumps(sampleset.to_serializable()), True, not initial_warning, 0 bqm_embedded = embed_bqm(bqm, embedding, DWaveSampler(solver=solver.name).adjacency) @@ -654,24 +666,78 @@ def submit_job( label=f"Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}", ) - return computation.wait_id(), False, False + return computation.wait_id(), False, False, 0 @app.callback( Output("btn_simulate", "disabled"), Output("wd_job", "disabled"), - Output("wd_job", "interval"), - Output("wd_job", "n_intervals"), + Output("wd_job", "n_intervals", allow_duplicate=True), Output("job_submit_state", "children"), - Output("job_submit_time", "children"), + Output("job_submit_time", "data"), Output("embeddings_found", "data"), inputs=[ Input("btn_simulate", "n_clicks"), + State("job_submit_state", "children"), + State("embedding_is_cached", "children"), + State("spins", "value"), + State("qpu_selection", "value"), + ], + prevent_initial_call=True, +) +def run_button_click( + run_btn_click, + job_submit_state, + cached_embeddings, + spins, + qpu_name, +): + """Manage simulation: embedding, job submission.""" + if str(spins) in cached_embeddings.split(", ") or qpu_name == "Diffusion [Classical]": + submit_time = datetime.datetime.now().strftime("%c") + if qpu_name == "Diffusion [Classical]": # Hack to fix switch from SA to QPU + submit_time = "SA" + job_submit_state = "SUBMITTED" + embedding = dash.no_update + + else: + submit_time = dash.no_update + job_submit_state = "EMBEDDING" + embedding = "needed" + + return ( + True, + False, + 0, + job_submit_state, + submit_time, + embedding, + ) + +class SimulateReturn(NamedTuple): + """Return type for the ``simulate`` callback function.""" + + btn_simulate_disabled: bool = dash.no_update + wd_job_disabled: bool = dash.no_update + wd_job_interval: int = dash.no_update + wd_job_n_intervals: int = dash.no_update + job_submit_state: str = dash.no_update + job_submit_time: datetime = dash.no_update + embeddings_found: dict|str = dash.no_update + +@app.callback( + Output("btn_simulate", "disabled", allow_duplicate=True), + Output("wd_job", "disabled", allow_duplicate=True), + Output("wd_job", "interval"), + Output("wd_job", "n_intervals", allow_duplicate=True), + Output("job_submit_state", "children", allow_duplicate=True), + Output("job_submit_time", "data", allow_duplicate=True), + Output("embeddings_found", "data", allow_duplicate=True), + inputs=[ Input("wd_job", "n_intervals"), - State("job_id", "children"), + State("job_id", "data"), State("job_submit_state", "children"), - State("job_submit_time", "children"), - State("embedding_is_cached", "value"), + State("job_submit_time", "data"), State("spins", "value"), State("qpu_selection", "value"), State("embeddings_found", "data"), @@ -679,96 +745,64 @@ def submit_job( prevent_initial_call=True, ) def simulate( - dummy1, - dummy2, + interval, job_id, job_submit_state, job_submit_time, - cached_embedding_lengths, spins, qpu_name, embeddings_found, -): +) -> SimulateReturn: """Manage simulation: embedding, job submission.""" - if ctx.triggered_id == "btn_simulate": - - if spins in cached_embedding_lengths or qpu_name == "Diffusion [Classical]": - submit_time = datetime.datetime.now().strftime("%c") - if qpu_name == "Diffusion [Classical]": # Hack to fix switch from SA to QPU - submit_time = "SA" - job_submit_state = "SUBMITTED" - embedding = dash.no_update - - else: - submit_time = dash.no_update - job_submit_state = "EMBEDDING" - embedding = "needed" - - disable_btn = True - disable_watchdog = False - - return ( - disable_btn, - disable_watchdog, - 0.5 * 1000, - 0, - job_submit_state, - submit_time, - embedding, - ) - if job_submit_state == "EMBEDDING": - submit_time = dash.no_update - embedding = dash.no_update + if embeddings_found != "needed": + # Found embedding last WD, so is cached, so now can submit job + return SimulateReturn( + wd_job_interval=200, + job_submit_state="SUBMITTED", + job_submit_time=datetime.datetime.now().strftime("%c"), + ) - if embeddings_found == "needed": - try: - embedding = find_one_to_one_embedding(spins, qpus[qpu_name].edges) - if embedding: - job_submit_state = ( - "EMBEDDING" # Stay another WD to allow caching the embedding - ) - embedding = {spins: embedding} - else: - job_submit_state = "FAILED" - embedding = "not found" - except Exception: + try: + embedding = find_one_to_one_embedding(spins, qpus[qpu_name].edges) + if embedding: + job_submit_state = "EMBEDDING" # Stay another WD to allow caching the embedding + embedding = {spins: embedding} + + else: job_submit_state = "FAILED" embedding = "not found" - - else: # Found embedding last WD, so is cached, so now can submit job - submit_time = datetime.datetime.now().strftime("%c") - job_submit_state = "SUBMITTED" - - return True, False, 0.2 * 1000, 0, job_submit_state, submit_time, embedding + except Exception: + job_submit_state = "FAILED" + embedding = "not found" + + return SimulateReturn( + wd_job_interval=200, + wd_job_n_intervals=0, + job_submit_state=job_submit_state, + embeddings_found=embedding + ) if job_submit_state in ["SUBMITTED", "PENDING", "IN_PROGRESS"]: job_submit_state = get_job_status(client, job_id, job_submit_time) + wd_time = 1000 + if not job_submit_state: job_submit_state = "SUBMITTED" - wd_time = 0.2 * 1000 - else: - wd_time = 1 * 1000 - - return True, False, wd_time, 0, job_submit_state, dash.no_update, dash.no_update - - if job_submit_state in ["COMPLETED", "CANCELLED", "FAILED"]: - disable_btn = False - disable_watchdog = True - - return ( - disable_btn, - disable_watchdog, - 0.1 * 1000, - 0, - dash.no_update, - dash.no_update, - dash.no_update, + wd_time = 200 + + return SimulateReturn( + wd_job_interval=wd_time, + wd_job_n_intervals=0, + job_submit_state=job_submit_state, ) - # Exception state: should only ever happen in testing - return False, True, 0, 0, "ERROR", dash.no_update, dash.no_update + return SimulateReturn( + btn_simulate_disabled=False, + wd_job_disabled=True, + job_submit_state=dash.no_update if job_submit_state in ["COMPLETED", "CANCELLED", "FAILED"] else "ERROR", + ) @app.callback( @@ -791,9 +825,7 @@ def set_progress_bar(job_submit_state): State("error-modal", "is_open"), ) def toggle_modal(trigger, is_open): - if trigger: - return True - return is_open + return True if trigger else is_open if __name__ == "__main__": diff --git a/assets/custom.css b/assets/custom.css index 3b9f5d2..9c16a10 100644 --- a/assets/custom.css +++ b/assets/custom.css @@ -35,6 +35,7 @@ h3 { label { color: rgb(3, 184, 255); margin-top: 10px; + margin-bottom: 5px; font-weight: 600; font-size: 1rem; } @@ -43,3 +44,7 @@ p { color: white; font-size: 14px; } + +.progress { + background: rgba(255, 255, 255, 0.25); +} diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index 8a60dac..4a464e2 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -61,70 +61,45 @@ def control_card(solvers={}, init_job_status="READY"): ], style={"marginTop": "10px"}, ), - html.Label("Cached Embeddings"), - embeddings, - html.Label("Simulation"), + html.P(["Cached Embeddings: ", html.Span(id="embedding_is_cached")], style={"marginTop": 10}), dbc.Row( [ dbc.Col( dbc.Button( - "Run", + "Run Simulation", id="btn_simulate", color="primary", - className="me-2", # Adds spacing between buttons - style={ - "marginTop": "10px" - }, ), width="auto", ), - ], - justify="start", # Aligns buttons to the left - align="center", # Vertically centers buttons - ), - dbc.Progress( - id="bar_job_status", - value=0, - color="link", - className="mb-3", - style={"width": "60%"}, - ), - html.P( - [ - "Status: ", - html.Span( - id="job_submit_state", - children=f"{init_job_status}", - style={ - "color": job_status_color, - "fontSize": 12, - "marginTop": "10px", - }, + dbc.Col( + [ + dbc.Progress( + id="bar_job_status", + value=0, + color="link", + style={"width": "60%"}, + ), + html.P( + [ + "Status: ", + html.Span( + id="job_submit_state", + children=f"{init_job_status}", + style={ + "color": job_status_color, + "fontSize": 12, + }, + ), + ], + style={"margin": "0"}, + ), + ] ), ], - style={"marginTop": "5px"}, - ), - tooltips_activate, - # Non-displayed section - dcc.Interval( - id="wd_job", - interval=None, - n_intervals=0, - disabled=True, - max_intervals=1, - ), - # Used for storing job status. Can probably be replaced with dcc.Store. - html.P(id="job_submit_time", style={"display": "none"}), - html.P(id="job_id", style={"display": "none"}), - dcc.Store( - id="embeddings_cached", - storage_type="memory", - data={}, - ), - dcc.Store( - id="embeddings_found", - storage_type="memory", - data={}, + justify="start", # Aligns buttons to the left + align="end", + style={"marginTop": 40} ), ], body=True, diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 36acd17..d06658b 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -24,10 +24,8 @@ "get_coupling_strength_slider", "config_qpu_selection", "dbc_modal", - "embeddings", "job_bar_display", "ring_lengths", - "tooltips_activate", ] ring_lengths = [512, 1024, 2048] @@ -241,45 +239,3 @@ def dbc_modal(name): ] ) ] - - -embeddings = dcc.Checklist( - options=[ - { - "label": html.Div( - [f"{length}"], - style={"color": "white", "font-size": 10, "marginRight": 10}, - ), - "value": length, - "disabled": True, - } - for length in ring_lengths - ], - value=[], - id=f"embedding_is_cached", - style={"color": "white"}, - inline=True, -) - -tooltips_activate = dcc.RadioItems( - id="tooltips_show", - options=[ - { - "label": "On", - "value": "on", - }, - { - "label": "Off", - "value": "off", - }, - ], - value="on", - inputStyle={"margin-right": "10px", "margin-bottom": "10px"}, - labelStyle={ - "color": "white", - "font-size": 12, - "display": "inline-block", - "marginLeft": 20, - }, - inline=True, -) diff --git a/helpers/tooltips.py b/helpers/tooltips.py index 8e4eb51..fc4326a 100644 --- a/helpers/tooltips.py +++ b/helpers/tooltips.py @@ -14,46 +14,24 @@ tool_tips_demo2 = { "anneal_duration": f"""Duration of the quantum anneal. Range of 5 to 320 nanoseconds.""", - "graph_display": f"""Plot selection: Defects vs anneal duration or defects vs noise level""", "spins": f"""Number of spins in the 1D ring.""", - "coupling_strength": f"""Coupling strength, J, between spins in the ferromagnetic ring. + "coupling_strength": f"""Coupling strength between spins in the ferromagnetic ring. Range of -1.8 to -0.6. """, "qpu_selection": f"""Selection from quantum computers available to your account/project token.""", - "embedding_is_cached": f"""Whether or not a minor-embedding is cached for the selected QPU, for each -of the available number of spins. If not available, an attempt is made to find -an embedding the first time you submit a problem. + "quench_schedule_filename": f"""The fast-anneal schedule for the selected quantum computer. +If none exists, one from a different quantum computer is used (expect inaccuracies). """, - "btn_simulate": f"""Click to (minor-embed if a cached embedding is unavailable) and -submit the problem to your selected QPU. -""", - "quench_schedule_filename": f"""CSV file with the fast-anneal schedule for the selected quantum computer. -If none exists, uses one from a different quantum computer (expect inaccuracies). -You can download schedules from -https://docs.dwavesys.com/docs/latest/doc_physical_properties.html -""", - "job_submit_state": f"""Status of the last submission to the quantum computer (or initial state).""", } tool_tips_demo1 = { "anneal_duration": f"""Duration of the quantum anneal. Range of 5 to 100 nanoseconds.""", - "graph_display": f"""Plot selection: Kibble-Zurek prediction and/or QPU energies (either separate or combined).""", "spins": f"""Number of spins in the 1D ring.""", - "coupling_strength": f"""Coupling strength, J, between spins in the ring. + "coupling_strength": f"""Coupling strength between spins in the ring. Range of -2 (ferromagnetic) to +1 (anti-ferromagnetic). """, "qpu_selection": f"""Selection from quantum computers available to your account/project token.""", - "embedding_is_cached": f"""Whether or not a minor-embedding is cached for the selected QPU, for each -of the available number of spins. If not available, an attempt is made to find -an embedding the first time you submit a problem. -""", - "btn_simulate": f"""Click to (minor-embed if a cached embedding is unavailable) and -submit the problem to your selected QPU. -""", - "quench_schedule_filename": f"""CSV file with the fast-anneal schedule for the selected quantum computer. -If none exists, uses one from a different quantum computer (expect inaccuracies). -You can download schedules from -https://docs.dwavesys.com/docs/latest/doc_physical_properties.html + "quench_schedule_filename": f"""The fast-anneal schedule for the selected quantum computer. +If none exists, one from a different quantum computer is used (expect inaccuracies). """, - "job_submit_state": f"""Status of the last submission to the quantum computer (or initial state).""", } diff --git a/tests/test_cb_cache_embeddings.py b/tests/test_cb_cache_embeddings.py index ec4514e..009a9a8 100644 --- a/tests/test_cb_cache_embeddings.py +++ b/tests/test_cb_cache_embeddings.py @@ -42,7 +42,7 @@ class mock_qpu(object): def __init__(self): self.edges_per_qpu = { 'Advantage_system4.1': edges_5, - 'Advantage2_prototype2.3': edges_3_5 + 'Advantage2_prototype2.55': edges_3_5 } def __getitem__(self, indx): @@ -50,7 +50,7 @@ def __getitem__(self, indx): parametrize_vals = [ ('Advantage_system4.1', embedding_filenames, json_embeddings_file), - ('Advantage2_prototype2.3', embedding_filenames, json_embeddings_file), + ('Advantage2_prototype2.55', embedding_filenames, json_embeddings_file), ('Advantage88_prototype7.3', embedding_filenames, json_embeddings_file) ] @@ -75,7 +75,7 @@ def run_callback(): if qpu_name_val == 'Advantage_system4.1': assert output[1] == [5] - if qpu_name_val == 'Advantage2_prototype2.3': + if qpu_name_val == 'Advantage2_prototype2.55': assert output[1] == [3, 5] if qpu_name_val == 'Advantage88_prototype7.3': From 14cb97fbc1cfbc397ed5ec62cded744c988fdffa Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Tue, 14 Jan 2025 14:57:24 -0800 Subject: [PATCH 150/170] Fix tests --- app.py | 32 ++++++----- tests/test_cb_cache_embeddings.py | 6 +-- tests/test_cb_simulate.py | 89 +++++++++++++++++-------------- tests/test_cb_submit_job.py | 2 +- 4 files changed, 71 insertions(+), 58 deletions(-) diff --git a/app.py b/app.py index 2b8bead..1b58ef1 100644 --- a/app.py +++ b/app.py @@ -767,22 +767,26 @@ def simulate( try: embedding = find_one_to_one_embedding(spins, qpus[qpu_name].edges) if embedding: - job_submit_state = "EMBEDDING" # Stay another WD to allow caching the embedding - embedding = {spins: embedding} + return SimulateReturn( + wd_job_interval=200, + wd_job_n_intervals=0, + job_submit_state="EMBEDDING", # Stay another WD to allow caching the embedding + embeddings_found={spins: embedding} + ) - else: - job_submit_state = "FAILED" - embedding = "not found" + return SimulateReturn( + btn_simulate_disabled=False, + wd_job_disabled=True, + job_submit_state="FAILED", + embeddings_found="not found" + ) except Exception: - job_submit_state = "FAILED" - embedding = "not found" - - return SimulateReturn( - wd_job_interval=200, - wd_job_n_intervals=0, - job_submit_state=job_submit_state, - embeddings_found=embedding - ) + return SimulateReturn( + btn_simulate_disabled=False, + wd_job_disabled=True, + job_submit_state="FAILED", + embeddings_found="not found" + ) if job_submit_state in ["SUBMITTED", "PENDING", "IN_PROGRESS"]: job_submit_state = get_job_status(client, job_id, job_submit_time) diff --git a/tests/test_cb_cache_embeddings.py b/tests/test_cb_cache_embeddings.py index 009a9a8..d3e40a5 100644 --- a/tests/test_cb_cache_embeddings.py +++ b/tests/test_cb_cache_embeddings.py @@ -73,13 +73,13 @@ def run_callback(): output = ctx.run(run_callback) if qpu_name_val == 'Advantage_system4.1': - assert output[1] == [5] + assert output[1] == "5" if qpu_name_val == 'Advantage2_prototype2.55': - assert output[1] == [3, 5] + assert output[1] == "3, 5" if qpu_name_val == 'Advantage88_prototype7.3': - assert output == ({}, []) + assert output == ({}, "") parametrize_vals = [ ('{"22": {"1": [11], "0": [10], "2": [12]}}', json_embeddings_file), diff --git a/tests/test_cb_simulate.py b/tests/test_cb_simulate.py index 8824f61..d3965f9 100644 --- a/tests/test_cb_simulate.py +++ b/tests/test_cb_simulate.py @@ -21,12 +21,13 @@ import datetime -from app import simulate +from app import SimulateReturn, run_button_click, simulate before_test = datetime.datetime.now().strftime('%c') parametrize_vals = [ - (512, [512, 1024], 'SUBMITTED', no_update), - (2048, [512, 1024], 'EMBEDDING', 'needed')] + (512, "512, 1024", 'SUBMITTED', no_update), + (2048, "512, 1024", 'EMBEDDING', 'needed') +] @pytest.mark.parametrize( 'spins_val, cached_embedding_lengths_val, submit_state_out, embedding_found', @@ -39,16 +40,15 @@ def run_callback(): context_value.set(AttributeDict( **{"triggered_inputs": [{"prop_id": "btn_simulate.n_clicks"}]})) - return simulate(1, 2, 'dummy_job_id', 'READY', before_test, - cached_embedding_lengths_val, spins_val, - 'Advantage_system4.3', 'dummy') + return run_button_click(1, 'READY', cached_embedding_lengths_val, spins_val, + 'Advantage_system4.3') ctx = copy_context() output = ctx.run(run_callback) - assert output[0:5] == (True, False, 0.5*1000, 0, submit_state_out) - assert output[6] == embedding_found + assert output[0:4] == (True, False, 0, submit_state_out) + assert output[5] == embedding_found def mock_get_status(client, job_id, job_submit_time): @@ -87,39 +87,40 @@ def mock_find_embedding(spins, dummy_edges): if spins == 'no': return {} -parametrize_names = 'job_id_val, job_submit_state_in, cached_embedding_lengths_val, ' + \ +parametrize_names = 'job_id_val, job_submit_state_in, ' + \ 'spins_val, embeddings_found_in, btn_simulate_disabled_out, wd_job_disabled_out, ' + \ 'wd_job_intervals_out, wd_job_n_out, job_submit_state_out, job_submit_time_out, ' + \ 'embedding_found_out' parametrize_vals = [ - (-1, 'READY', [512, 1024], 512, 'dummy embeddings found', False, True, - 0, 0, 'ERROR', no_update, no_update), - ('first few attempts', 'SUBMITTED', [512, 1024], 512, 'dummy embeddings found', True, False, - 0.2*1000, 0, 'SUBMITTED', no_update, no_update), - ('first returned status', 'SUBMITTED', [512, 1024], 512, 'dummy embeddings found', True, False, - 1*1000, 0, 'PENDING', no_update, no_update), - ('1', 'PENDING', [512, 1024], 512, 'dummy embeddings found', True, False, - 1*1000, 0, 'IN_PROGRESS', no_update, no_update), - ('1', 'IN_PROGRESS', [512, 1024], 512, 'dummy embeddings found', True, False, - 1*1000, 0, 'IN_PROGRESS', no_update, no_update), - ('2', 'IN_PROGRESS', [512, 1024], 512, 'dummy embeddings found', True, False, - 1*1000, 0, 'COMPLETED', no_update, no_update), - ('2', 'COMPLETED', [512, 1024], 512, 'dummy embeddings found', False, True, - 0.1*1000, 0, no_update, no_update, no_update), - ('3', 'CANCELLED', [512, 1024], 512, 'dummy embeddings found', False, True, - 0.1*1000, 0, no_update, no_update, no_update), - ('4', 'FAILED', [512, 1024], 512, 'dummy embeddings found', False, True, - 0.1*1000, 0, no_update, no_update, no_update), - ('dummy', 'EMBEDDING', [512, 1024], 'yes', 'needed', True, False, - 0.2*1000, 0, 'EMBEDDING', before_test, {'yes': {1: [10], 2: [20]}}), - ('dummy', 'EMBEDDING', [512, 1024], 'no', 'needed', True, False, - 0.2*1000, 0, 'FAILED', before_test, 'not found'), - ('dummy', 'EMBEDDING', [512, 1024], 'no', 'not needed', True, False, - 0.2*1000, 0, 'SUBMITTED', before_test, no_update)] + (-1, 'READY', 512, 'dummy embeddings found', False, True, + no_update, no_update, 'ERROR', no_update, no_update), + ('first few attempts', 'SUBMITTED', 512, 'dummy embeddings found', no_update, no_update, + 200, 0, 'SUBMITTED', no_update, no_update), + ('first returned status', 'SUBMITTED', 512, 'dummy embeddings found', no_update, no_update, + 1000, 0, 'PENDING', no_update, no_update), + ('1', 'PENDING', 512, 'dummy embeddings found', no_update, no_update, + 1000, 0, 'IN_PROGRESS', no_update, no_update), + ('1', 'IN_PROGRESS', 512, 'dummy embeddings found', no_update, no_update, + 1000, 0, 'IN_PROGRESS', no_update, no_update), + ('2', 'IN_PROGRESS', 512, 'dummy embeddings found', no_update, no_update, + 1000, 0, 'COMPLETED', no_update, no_update), + ('2', 'COMPLETED', 512, 'dummy embeddings found', False, True, + no_update, no_update, no_update, no_update, no_update), + ('3', 'CANCELLED', 512, 'dummy embeddings found', False, True, + no_update, no_update, no_update, no_update, no_update), + ('4', 'FAILED', 512, 'dummy embeddings found', False, True, + no_update, no_update, no_update, no_update, no_update), + ('dummy', 'EMBEDDING', 'yes', 'needed', no_update, no_update, + 200, 0, 'EMBEDDING', no_update, {'yes': {1: [10], 2: [20]}}), + ('dummy', 'EMBEDDING', 'no', 'needed', False, True, + no_update, no_update, 'FAILED', no_update, 'not found'), + ('dummy', 'EMBEDDING', 'no', 'not needed', no_update, no_update, + 200, no_update, 'SUBMITTED', before_test, no_update) +] @pytest.mark.parametrize(parametrize_names, parametrize_vals) -def test_simulate_states(mocker, job_id_val, job_submit_state_in, cached_embedding_lengths_val, +def test_simulate_states(mocker, job_id_val, job_submit_state_in, spins_val, embeddings_found_in, btn_simulate_disabled_out, wd_job_disabled_out, wd_job_intervals_out, wd_job_n_out, job_submit_state_out, job_submit_time_out, embedding_found_out): @@ -133,15 +134,23 @@ def run_callback(): context_value.set(AttributeDict( **{"triggered_inputs": [{"prop_id": "wd_job.n_intervals"}]})) - return simulate(1, 1, job_id_val, job_submit_state_in, before_test, - cached_embedding_lengths_val, spins_val, 'Advantage_system4.3', - embeddings_found_in) + return simulate(1, job_id_val, job_submit_state_in, before_test, + spins_val, 'Advantage_system4.3', embeddings_found_in) ctx = copy_context() output = ctx.run(run_callback) - assert output[0:5] == (btn_simulate_disabled_out, wd_job_disabled_out, wd_job_intervals_out, - wd_job_n_out, job_submit_state_out) - assert output[6] == embedding_found_out + expected_output = SimulateReturn( + btn_simulate_disabled=btn_simulate_disabled_out, + wd_job_disabled=wd_job_disabled_out, + wd_job_interval=wd_job_intervals_out, + wd_job_n_intervals=wd_job_n_out, + job_submit_state=job_submit_state_out, + job_submit_time=job_submit_time_out, + embeddings_found=embedding_found_out, + ) + + assert output[0:5] == expected_output[0:5] + assert output[6] == expected_output[6] # One could test ``job_submit_time_out >= before_test`` to little gain, much complication \ No newline at end of file diff --git a/tests/test_cb_submit_job.py b/tests/test_cb_submit_job.py index 635eb7c..9d303ee 100644 --- a/tests/test_cb_submit_job.py +++ b/tests/test_cb_submit_job.py @@ -70,6 +70,6 @@ def run_callback(): ctx = copy_context() output = ctx.run(run_callback) - assert output == (1234, False, False) + assert output == (1234, False, False, 0) From a235b77aa6a4aa0c5b3ea5c6906feddf112f12a5 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Tue, 14 Jan 2025 15:46:17 -0800 Subject: [PATCH 151/170] Bound and upgrade requirements --- requirements.txt | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/requirements.txt b/requirements.txt index 0c25f04..3a8d491 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,8 @@ -dwave-ocean-sdk>=6.9.0 - dash==2.14.1 dash-bootstrap-components==1.5.0 - -pandas +dwave-ocean-sdk>=8.1.0 +pandas>=2.2.3 # Needed only for unit testing -pytest -pytest-mock \ No newline at end of file +pytest>=8.3.4 +pytest-mock>=3.14.0 \ No newline at end of file From 2b0f4a24c649a08210f448c0da0d4593c2c09cf0 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Wed, 15 Jan 2025 15:40:38 -0800 Subject: [PATCH 152/170] Remove embeddings_found, clean up logic --- app.py | 246 ++++++++++++------------------ assets/custom.css | 29 +++- helpers/layouts_components.py | 48 ++---- tests/test_cb_cache_embeddings.py | 27 +--- tests/test_cb_schedule.py | 6 +- tests/test_cb_simulate.py | 74 +++++---- 6 files changed, 178 insertions(+), 252 deletions(-) diff --git a/app.py b/app.py index 1b58ef1..95a600e 100644 --- a/app.py +++ b/app.py @@ -78,7 +78,7 @@ html.Img( src=THUMBNAIL, height="30px", - style={"margin-right": "10px"}, + style={"marginRight": "10px"}, ), ], ), @@ -133,7 +133,6 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: dcc.Store(id="job_submit_time"), dcc.Store(id="job_id"), dcc.Store(id="embeddings_cached", data={}), - dcc.Store(id="embeddings_found", data={}), dcc.Interval( id="wd_job", interval=500, @@ -245,19 +244,20 @@ def update_selected_problem_type( raise PreventUpdate nav_class_names = [""] * len(problem_options) - new_problem_type = ctx.triggered_id["index"] if ctx.triggered_id else ProblemType.KZ.value + problem_type_value = ctx.triggered_id["index"] if ctx.triggered_id else ProblemType.KZ.value + problem_type = ProblemType(problem_type_value) - nav_class_names[new_problem_type] = "active" + nav_class_names[problem_type_value] = "active" return ( nav_class_names, - new_problem_type, - get_kz_graph_radio_options(ProblemType(new_problem_type)), - tooltips(ProblemType(new_problem_type)), - get_anneal_duration_setting(ProblemType(new_problem_type)), - get_coupling_strength_slider(ProblemType(new_problem_type)), - MAIN_HEADER if new_problem_type is ProblemType.KZ.value else MAIN_HEADER_NM, - DESCRIPTION if new_problem_type is ProblemType.KZ.value else DESCRIPTION_NM, + problem_type_value, + get_kz_graph_radio_options(problem_type), + tooltips(problem_type), + get_anneal_duration_setting(problem_type), + get_coupling_strength_slider(problem_type), + MAIN_HEADER if problem_type is ProblemType.KZ else MAIN_HEADER_NM, + DESCRIPTION if problem_type is ProblemType.KZ else DESCRIPTION_NM, ) @@ -306,7 +306,7 @@ def set_schedule(qpu_name): """Set the schedule for the selected QPU.""" schedule_filename = "FALLBACK_SCHEDULE.csv" - schedule_filename_style = {"color": "red", "fontSize": 12} + schedule_filename_style = {"color": "#FFA143", "fontSize": 12} if ctx.triggered_id: for filename in [ @@ -327,47 +327,35 @@ def set_schedule(qpu_name): Output("embedding_is_cached", "children"), inputs=[ Input("qpu_selection", "value"), - Input("embeddings_found", "data"), - State("embeddings_cached", "data"), - ] + ], + prevent_initial_call=True, ) -def cache_embeddings(qpu_name, embeddings_found, embeddings_cached): +def load_cached_embeddings(qpu_name): """Cache embeddings for the selected QPU.""" - if ctx.triggered_id == "qpu_selection": - - embeddings_cached = {} # Wipe out previous QPU's embeddings + embeddings_cached = {} # Wipe out previous QPU's embeddings - for filename in [ - file for file in os.listdir("helpers") if ".json" in file and "emb_" in file - ]: - - if qpu_name == "Diffusion [Classical]": - qpu_name = "Advantage_system6.4" + for filename in [ + file for file in os.listdir("helpers") if ".json" in file and "emb_" in file + ]: - if qpu_name.split('.')[0] in filename: - with open(f"helpers/{filename}", "r") as fp: - embeddings_cached = json.load(fp) - - embeddings_cached = json_to_dict(embeddings_cached) + if qpu_name == "Diffusion [Classical]": + qpu_name = "Advantage_system6.4" - # Validate that loaded embeddings' edges are still available on the selected QPU - for length in list(embeddings_cached.keys()): - source_graph = dimod.to_networkx_graph(create_bqm(num_spins=length)).edges - target_graph = qpus[qpu_name].edges - emb = embeddings_cached[length] + if qpu_name.split('.')[0] in filename: + with open(f"helpers/{filename}", "r") as fp: + embeddings_cached = json.load(fp) - if not is_valid_embedding(emb, source_graph, target_graph): - del embeddings_cached[length] + embeddings_cached = json_to_dict(embeddings_cached) - if ctx.triggered_id == "embeddings_found": - if isinstance(embeddings_found, str): # embeddings_found = 'needed' or 'not found' - raise PreventUpdate + # Validate that loaded embeddings' edges are still available on the selected QPU + for length in list(embeddings_cached.keys()): + source_graph = dimod.to_networkx_graph(create_bqm(num_spins=length)).edges + target_graph = qpus[qpu_name].edges + emb = embeddings_cached[length] - embeddings_cached = json_to_dict(embeddings_cached) - embeddings_found = json_to_dict(embeddings_found) - new_embedding = list(embeddings_found.keys())[0] - embeddings_cached[new_embedding] = embeddings_found[new_embedding] + if not is_valid_embedding(emb, source_graph, target_graph): + del embeddings_cached[length] return embeddings_cached, ", ".join(str(embedding) for embedding in embeddings_cached.keys()) @@ -412,6 +400,8 @@ def display_graphics_kink_density( kz_data, ): """Generate graphics for kink density based on theory and QPU samples.""" + if ctx.triggered_id == "job_submit_state" and job_submit_state != "COMPLETED": + raise PreventUpdate ta_min = 2 ta_max = 350 @@ -421,43 +411,7 @@ def display_graphics_kink_density( # update the maximum anneal time for zne demo ta_max = 1500 - if ctx.triggered_id == "qpu_selection" or ctx.triggered_id == "spins": - coupling_data = {} - zne_estimates = {} - fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], - J_BASELINE, - schedule_filename, - coupling_data, - zne_estimates, - problem_type=problem_type, - ) - - return fig, coupling_data, zne_estimates, False, kz_data - - if ctx.triggered_id in ["zne_graph_display", "coupling_strength", "quench_schedule_filename"]: - fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], - J_BASELINE, - schedule_filename, - coupling_data, - zne_estimates, - problem_type=problem_type, - ) - - if graph_display == "coupling": - zne_estimates, modal_trigger = plot_zne_fitted_line( - fig, coupling_data, qpu_name, zne_estimates, graph_display, str(ta) - ) - - return fig, coupling_data, zne_estimates, False, kz_data - if ctx.triggered_id == "job_submit_state": - if job_submit_state != "COMPLETED": - raise PreventUpdate - embeddings_cached = json_to_dict(embeddings_cached) sampleset_unembedded = get_samples( @@ -503,6 +457,10 @@ def display_graphics_kink_density( return fig, coupling_data, zne_estimates, modal_trigger, kz_data + if ctx.triggered_id == "qpu_selection" or ctx.triggered_id == "spins": + coupling_data = {} + zne_estimates = {} + fig = plot_kink_densities_bg( graph_display, [ta_min, ta_max], @@ -512,27 +470,15 @@ def display_graphics_kink_density( zne_estimates, problem_type=problem_type, ) - return fig, coupling_data, zne_estimates, False, kz_data - if ctx.triggered_id in ["qpu_selection", "spins", "coupling_strength"]: - kz_data = {"k": []} - fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], - J, - schedule_filename, - coupling_data, - zne_estimates, - kz_data=kz_data, - problem_type=problem_type, - ) + if ctx.triggered_id in ["zne_graph_display", "coupling_strength", "quench_schedule_filename"] and graph_display == "coupling": + zne_estimates, modal_trigger = plot_zne_fitted_line( + fig, coupling_data, qpu_name, zne_estimates, graph_display, str(ta) + ) return fig, coupling_data, zne_estimates, False, kz_data if ctx.triggered_id == "job_submit_state": - if job_submit_state != "COMPLETED": - raise PreventUpdate - embeddings_cached = json_to_dict(embeddings_cached) sampleset_unembedded = get_samples( @@ -547,6 +493,9 @@ def display_graphics_kink_density( ) return fig, coupling_data, zne_estimates, False, kz_data + if ctx.triggered_id in ["qpu_selection", "spins", "coupling_strength"]: + kz_data = {"k": []} + fig = plot_kink_densities_bg( graph_display, [ta_min, ta_max], @@ -572,6 +521,7 @@ def display_graphics_kink_density( ) def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_cached): """Generate graphics for spin-ring display.""" + best_sample = None if ctx.triggered_id == "job_submit_state": if job_submit_state != "COMPLETED": @@ -585,13 +535,18 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca best_indx = np.abs(kinks_per_sample - kink_density).argmin() best_sample = sampleset_unembedded.record.sample[best_indx] - fig = plot_spin_orientation(num_spins=spins, sample=best_sample) - return fig - - fig = plot_spin_orientation(num_spins=spins, sample=None) + fig = plot_spin_orientation(num_spins=spins, sample=best_sample) return fig +class SubmitJobReturn(NamedTuple): + """Return type for the ``submit_job`` callback function.""" + + job_id: str = dash.no_update + initial_warning: bool = False + warning_modal_open: bool = False + wd_job_n_intervals: int = 0 + @app.callback( Output("job_id", "data"), Output("initial_warning", "data"), @@ -620,7 +575,7 @@ def submit_job( problem_type, filename, initial_warning, -): +) -> SubmitJobReturn: """Submit job and provide job ID.""" solver = qpus[qpu_name] @@ -632,17 +587,17 @@ def submit_job( annealing_time = ta_ns / 1000 if qpu_name == "Diffusion [Classical]": - bqm_embedded = embed_bqm( - bqm, - embedding, - qpus["Diffusion [Classical]"].adjacency, - ) + bqm_embedded = embed_bqm(bqm, embedding, qpus["Diffusion [Classical]"].adjacency) sampleset = qpus["Diffusion [Classical]"].sample( bqm_embedded, annealing_time=annealing_time ) - return json.dumps(sampleset.to_serializable()), True, not initial_warning, 0 + return SubmitJobReturn( + job_id=json.dumps(sampleset.to_serializable()), + initial_warning=True, + warning_modal_open=not initial_warning + ) bqm_embedded = embed_bqm(bqm, embedding, DWaveSampler(solver=solver.name).adjacency) @@ -666,19 +621,26 @@ def submit_job( label=f"Examples - Kibble-Zurek Simulation, submitted: {job_submit_time}", ) - return computation.wait_id(), False, False, 0 + return SubmitJobReturn(job_id=computation.wait_id()) +class RunButtonClickReturn(NamedTuple): + """Return type for the ``run_button_click`` callback function.""" + + btn_simulate_disabled: bool = True + wd_job_disabled: bool = False + wd_job_n_intervals: int = 0 + job_submit_state: str = dash.no_update + job_submit_time: datetime = dash.no_update + @app.callback( Output("btn_simulate", "disabled"), Output("wd_job", "disabled"), Output("wd_job", "n_intervals", allow_duplicate=True), Output("job_submit_state", "children"), Output("job_submit_time", "data"), - Output("embeddings_found", "data"), inputs=[ Input("btn_simulate", "n_clicks"), - State("job_submit_state", "children"), State("embedding_is_cached", "children"), State("spins", "value"), State("qpu_selection", "value"), @@ -687,32 +649,25 @@ def submit_job( ) def run_button_click( run_btn_click, - job_submit_state, cached_embeddings, spins, qpu_name, -): +) -> RunButtonClickReturn: """Manage simulation: embedding, job submission.""" - if str(spins) in cached_embeddings.split(", ") or qpu_name == "Diffusion [Classical]": - submit_time = datetime.datetime.now().strftime("%c") - if qpu_name == "Diffusion [Classical]": # Hack to fix switch from SA to QPU - submit_time = "SA" - job_submit_state = "SUBMITTED" - embedding = dash.no_update - - else: - submit_time = dash.no_update - job_submit_state = "EMBEDDING" - embedding = "needed" + if qpu_name == "Diffusion [Classical]": + return RunButtonClickReturn( + job_submit_state="SUBMITTED", + job_submit_time="SA", # Hack to fix switch from SA to QPU + ) + + if str(spins) in cached_embeddings.split(", "): # If we have a cached embedding + return RunButtonClickReturn( + job_submit_state="SUBMITTED", + job_submit_time=datetime.datetime.now().strftime("%c"), + ) + + return RunButtonClickReturn(job_submit_state="EMBEDDING") - return ( - True, - False, - 0, - job_submit_state, - submit_time, - embedding, - ) class SimulateReturn(NamedTuple): """Return type for the ``simulate`` callback function.""" @@ -723,7 +678,8 @@ class SimulateReturn(NamedTuple): wd_job_n_intervals: int = dash.no_update job_submit_state: str = dash.no_update job_submit_time: datetime = dash.no_update - embeddings_found: dict|str = dash.no_update + embeddings_cached: dict = dash.no_update + embedding_is_cached: str = dash.no_update @app.callback( Output("btn_simulate", "disabled", allow_duplicate=True), @@ -732,7 +688,8 @@ class SimulateReturn(NamedTuple): Output("wd_job", "n_intervals", allow_duplicate=True), Output("job_submit_state", "children", allow_duplicate=True), Output("job_submit_time", "data", allow_duplicate=True), - Output("embeddings_found", "data", allow_duplicate=True), + Output("embeddings_cached", "data", allow_duplicate=True), + Output("embedding_is_cached", "children", allow_duplicate=True), inputs=[ Input("wd_job", "n_intervals"), State("job_id", "data"), @@ -740,7 +697,7 @@ class SimulateReturn(NamedTuple): State("job_submit_time", "data"), State("spins", "value"), State("qpu_selection", "value"), - State("embeddings_found", "data"), + State("embeddings_cached", "data"), ], prevent_initial_call=True, ) @@ -751,41 +708,36 @@ def simulate( job_submit_time, spins, qpu_name, - embeddings_found, + embeddings_cached, ) -> SimulateReturn: """Manage simulation: embedding, job submission.""" if job_submit_state == "EMBEDDING": - if embeddings_found != "needed": - # Found embedding last WD, so is cached, so now can submit job - return SimulateReturn( - wd_job_interval=200, - job_submit_state="SUBMITTED", - job_submit_time=datetime.datetime.now().strftime("%c"), - ) try: embedding = find_one_to_one_embedding(spins, qpus[qpu_name].edges) if embedding: + embeddings_cached = json_to_dict(embeddings_cached) + embeddings_cached.update({spins: embedding}) + return SimulateReturn( wd_job_interval=200, - wd_job_n_intervals=0, - job_submit_state="EMBEDDING", # Stay another WD to allow caching the embedding - embeddings_found={spins: embedding} + job_submit_state="SUBMITTED", + job_submit_time=datetime.datetime.now().strftime("%c"), + embeddings_cached=embeddings_cached, + embedding_is_cached=", ".join(str(em) for em in embeddings_cached.keys()) ) return SimulateReturn( btn_simulate_disabled=False, wd_job_disabled=True, job_submit_state="FAILED", - embeddings_found="not found" ) except Exception: return SimulateReturn( btn_simulate_disabled=False, wd_job_disabled=True, job_submit_state="FAILED", - embeddings_found="not found" ) if job_submit_state in ["SUBMITTED", "PENDING", "IN_PROGRESS"]: diff --git a/assets/custom.css b/assets/custom.css index 9c16a10..e870dc4 100644 --- a/assets/custom.css +++ b/assets/custom.css @@ -34,10 +34,10 @@ h3 { label { color: rgb(3, 184, 255); - margin-top: 10px; + margin-top: 16px; margin-bottom: 5px; font-weight: 600; - font-size: 1rem; + font-size: 16px; } p { @@ -48,3 +48,28 @@ p { .progress { background: rgba(255, 255, 255, 0.25); } + +#coupling_strength { + padding-left: 5px !important; + padding-right: 5px !important; +} + +#spins label, +#graph_display label { + font-size: 13px; + margin: 0; +} + +#spins label { + color: white; +} + +#spins label:not(:first-child), +#graph_display label:not(:first-child) { + margin-left: 20px; +} + +#graph_display { + margin-top: 10px; + margin-left: 20px; +} diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index d06658b..4278afd 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -47,7 +47,7 @@ def get_anneal_duration_setting(problem_type): {"label": "1280 ns", "value": 1280}, ], value=80, # default value - style={"max-width": "95%"}, + style={"maxWidth": "95%"}, ) return dbc.Input( @@ -57,7 +57,7 @@ def get_anneal_duration_setting(problem_type): max=100, step=1, value=7, - style={"max-width": "95%"}, + style={"maxWidth": "95%"}, ) @@ -66,43 +66,23 @@ def get_kz_graph_radio_options(problem_type): return dcc.RadioItems( id="graph_display", options=[ - { - "label": "Kink density vs Anneal time", - "value": "kink_density", - "disabled": False, - }, - { - "label": "Kink density vs Noise level", - "value": "coupling", - "disabled": False, - }, + {"label": "Kink density vs Anneal time", "value": "kink_density"}, + {"label": "Kink density vs Noise level", "value": "coupling"}, ], value="coupling", - inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, - labelStyle={ - "color": "rgb(3, 184, 255)", - "font-size": 12, - "display": "inline-block", - "marginLeft": 20, - }, + inputStyle={"marginRight": "10px"}, inline=True, ) return dcc.RadioItems( id="graph_display", options=[ - {"label": "Both", "value": "both", "disabled": False}, - {"label": "Kink density", "value": "kink_density", "disabled": False}, - {"label": "Schedule", "value": "schedule", "disabled": False}, + {"label": "Both", "value": "both"}, + {"label": "Kink density", "value": "kink_density"}, + {"label": "Schedule", "value": "schedule"}, ], value="both", - inputStyle={"margin-right": "10px", "margin-bottom": "5px"}, - labelStyle={ - "color": "rgb(3, 184, 255)", - "font-size": 12, - "display": "inline-block", - "marginLeft": 20, - }, + inputStyle={"marginRight": "10px"}, inline=True, ) @@ -110,17 +90,11 @@ def get_kz_graph_radio_options(problem_type): config_spins = dcc.RadioItems( id="spins", options=[ - {"label": f"{length}", "value": length, "disabled": False} + {"label": f"{length}", "value": length} for length in ring_lengths ], value=512, - inputStyle={"margin-right": "10px", "margin-bottom": "10px"}, - labelStyle={ - "color": "white", - "font-size": 12, - "display": "inline-block", - "marginLeft": 20, - }, + inputStyle={"marginRight": "10px"}, inline=True, ) diff --git a/tests/test_cb_cache_embeddings.py b/tests/test_cb_cache_embeddings.py index d3e40a5..d2ce5c8 100644 --- a/tests/test_cb_cache_embeddings.py +++ b/tests/test_cb_cache_embeddings.py @@ -18,9 +18,8 @@ from contextvars import copy_context from dash._callback_context import context_value from dash._utils import AttributeDict -from dash.exceptions import PreventUpdate -from app import cache_embeddings +from app import load_cached_embeddings embedding_filenames = [ 'emb_Advantage_system4.1.json', @@ -67,7 +66,7 @@ def run_callback(): AttributeDict(**{'triggered_inputs': [{'prop_id': 'qpu_selection.value'},]}) ) - return cache_embeddings(qpu_name_val, 'dummy', 'dummy') + return load_cached_embeddings(qpu_name_val) ctx = copy_context() output = ctx.run(run_callback) @@ -86,25 +85,3 @@ def run_callback(): ('needed', json_embeddings_file), ('not found', json_embeddings_file), ] - -@pytest.mark.parametrize(['embeddings_found_val', 'embeddings_cached_val'], -parametrize_vals) -def test_cache_embeddings_found_embedding(embeddings_found_val, embeddings_cached_val): - """Test the caching of embeddings: triggered by found embedding.""" - - def run_callback(): - context_value.set( - AttributeDict(**{'triggered_inputs': [{'prop_id': 'embeddings_found.data'},]}) - ) - - return cache_embeddings('dummy', embeddings_found_val, embeddings_cached_val) - - ctx = copy_context() - - if not isinstance(embeddings_found_val, dict): - with pytest.raises(PreventUpdate): - ctx.run(run_callback) - else: - output = ctx.run(run_callback) - - assert 22 in output[1] diff --git a/tests/test_cb_schedule.py b/tests/test_cb_schedule.py index 9e2ee3d..88c43d0 100644 --- a/tests/test_cb_schedule.py +++ b/tests/test_cb_schedule.py @@ -34,15 +34,15 @@ ('Advantage_system6.4', all_schedules, 1, - {'color': 'red', 'fontSize': 12}), + {'color': '#FFA143', 'fontSize': 12}), ('Advantage2_prototype2.3', all_schedules, 2, - {'color': 'red', 'fontSize': 12}), + {'color': '#FFA143', 'fontSize': 12}), ('Advantage25_system7.9', all_schedules, 3, - {'color': 'red', 'fontSize': 12}), + {'color': '#FFA143', 'fontSize': 12}), ] @pytest.mark.parametrize(['qpu_selection_val', 'schedule_name', 'indx', 'style'], parametrize_vals) diff --git a/tests/test_cb_simulate.py b/tests/test_cb_simulate.py index d3965f9..0ce20d5 100644 --- a/tests/test_cb_simulate.py +++ b/tests/test_cb_simulate.py @@ -25,22 +25,21 @@ before_test = datetime.datetime.now().strftime('%c') parametrize_vals = [ - (512, "512, 1024", 'SUBMITTED', no_update), - (2048, "512, 1024", 'EMBEDDING', 'needed') + (512, "512, 1024", 'SUBMITTED'), + (2048, "512, 1024", 'EMBEDDING') ] @pytest.mark.parametrize( - 'spins_val, cached_embedding_lengths_val, submit_state_out, embedding_found', + 'spins_val, cached_embedding_lengths_val, submit_state_out', parametrize_vals) -def test_simulate_button_press(spins_val, cached_embedding_lengths_val, submit_state_out, - embedding_found): +def test_simulate_button_press(spins_val, cached_embedding_lengths_val, submit_state_out): """Test pressing Simulate button initiates submission.""" def run_callback(): context_value.set(AttributeDict( **{"triggered_inputs": [{"prop_id": "btn_simulate.n_clicks"}]})) - return run_button_click(1, 'READY', cached_embedding_lengths_val, spins_val, + return run_button_click(1, cached_embedding_lengths_val, spins_val, 'Advantage_system4.3') ctx = copy_context() @@ -48,7 +47,6 @@ def run_callback(): output = ctx.run(run_callback) assert output[0:4] == (True, False, 0, submit_state_out) - assert output[5] == embedding_found def mock_get_status(client, job_id, job_submit_time): @@ -88,42 +86,41 @@ def mock_find_embedding(spins, dummy_edges): return {} parametrize_names = 'job_id_val, job_submit_state_in, ' + \ - 'spins_val, embeddings_found_in, btn_simulate_disabled_out, wd_job_disabled_out, ' + \ + 'spins_val, embeddings_cached_in, btn_simulate_disabled_out, wd_job_disabled_out, ' + \ 'wd_job_intervals_out, wd_job_n_out, job_submit_state_out, job_submit_time_out, ' + \ - 'embedding_found_out' + 'embeddings_cached_out, embedding_is_cached_out' parametrize_vals = [ - (-1, 'READY', 512, 'dummy embeddings found', False, True, - no_update, no_update, 'ERROR', no_update, no_update), - ('first few attempts', 'SUBMITTED', 512, 'dummy embeddings found', no_update, no_update, - 200, 0, 'SUBMITTED', no_update, no_update), - ('first returned status', 'SUBMITTED', 512, 'dummy embeddings found', no_update, no_update, - 1000, 0, 'PENDING', no_update, no_update), - ('1', 'PENDING', 512, 'dummy embeddings found', no_update, no_update, - 1000, 0, 'IN_PROGRESS', no_update, no_update), - ('1', 'IN_PROGRESS', 512, 'dummy embeddings found', no_update, no_update, - 1000, 0, 'IN_PROGRESS', no_update, no_update), - ('2', 'IN_PROGRESS', 512, 'dummy embeddings found', no_update, no_update, - 1000, 0, 'COMPLETED', no_update, no_update), - ('2', 'COMPLETED', 512, 'dummy embeddings found', False, True, - no_update, no_update, no_update, no_update, no_update), - ('3', 'CANCELLED', 512, 'dummy embeddings found', False, True, - no_update, no_update, no_update, no_update, no_update), - ('4', 'FAILED', 512, 'dummy embeddings found', False, True, - no_update, no_update, no_update, no_update, no_update), - ('dummy', 'EMBEDDING', 'yes', 'needed', no_update, no_update, - 200, 0, 'EMBEDDING', no_update, {'yes': {1: [10], 2: [20]}}), - ('dummy', 'EMBEDDING', 'no', 'needed', False, True, - no_update, no_update, 'FAILED', no_update, 'not found'), - ('dummy', 'EMBEDDING', 'no', 'not needed', no_update, no_update, - 200, no_update, 'SUBMITTED', before_test, no_update) + (-1, 'READY', 512, {}, False, True, + no_update, no_update, 'ERROR', no_update, no_update, no_update), + ('first few attempts', 'SUBMITTED', 512, {}, no_update, no_update, + 200, 0, 'SUBMITTED', no_update, no_update, no_update), + ('first returned status', 'SUBMITTED', 512, {}, no_update, no_update, + 1000, 0, 'PENDING', no_update, no_update, no_update), + ('1', 'PENDING', 512, {}, no_update, no_update, + 1000, 0, 'IN_PROGRESS', no_update, no_update, no_update), + ('1', 'IN_PROGRESS', 512, {}, no_update, no_update, + 1000, 0, 'IN_PROGRESS', no_update, no_update, no_update), + ('2', 'IN_PROGRESS', 512, {}, no_update, no_update, + 1000, 0, 'COMPLETED', no_update, no_update, no_update), + ('2', 'COMPLETED', 512, {}, False, True, + no_update, no_update, no_update, no_update, no_update, no_update), + ('3', 'CANCELLED', 512, {}, False, True, + no_update, no_update, no_update, no_update, no_update, no_update), + ('4', 'FAILED', 512, {}, False, True, + no_update, no_update, no_update, no_update, no_update, no_update), + ('dummy', 'EMBEDDING', 'yes', {}, no_update, no_update, + 200, no_update, 'SUBMITTED', no_update, {'yes': {1: [10], 2: [20]}}, "yes"), + ('dummy', 'EMBEDDING', 'no', {}, False, True, + no_update, no_update, 'FAILED', no_update, no_update, no_update), ] @pytest.mark.parametrize(parametrize_names, parametrize_vals) def test_simulate_states(mocker, job_id_val, job_submit_state_in, - spins_val, embeddings_found_in, btn_simulate_disabled_out, + spins_val, embeddings_cached_in, btn_simulate_disabled_out, wd_job_disabled_out, wd_job_intervals_out, wd_job_n_out, - job_submit_state_out, job_submit_time_out, embedding_found_out): + job_submit_state_out, job_submit_time_out, embeddings_cached_out, + embedding_is_cached_out): """Test transitions between states.""" mocker.patch('app.get_job_status', new=mock_get_status) @@ -135,7 +132,7 @@ def run_callback(): **{"triggered_inputs": [{"prop_id": "wd_job.n_intervals"}]})) return simulate(1, job_id_val, job_submit_state_in, before_test, - spins_val, 'Advantage_system4.3', embeddings_found_in) + spins_val, 'Advantage_system4.3', embeddings_cached_in) ctx = copy_context() @@ -148,9 +145,10 @@ def run_callback(): wd_job_n_intervals=wd_job_n_out, job_submit_state=job_submit_state_out, job_submit_time=job_submit_time_out, - embeddings_found=embedding_found_out, + embeddings_cached=embeddings_cached_out, + embedding_is_cached=embedding_is_cached_out, ) assert output[0:5] == expected_output[0:5] - assert output[6] == expected_output[6] + assert output[6:] == expected_output[6:] # One could test ``job_submit_time_out >= before_test`` to little gain, much complication \ No newline at end of file From a6e9554a6f0603a991c6adca45264e1ddf9dc07a Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Wed, 15 Jan 2025 15:48:01 -0800 Subject: [PATCH 153/170] Run black and isort --- app.py | 89 ++++---- helpers/kz_calcs.py | 2 +- helpers/layouts_cards.py | 17 +- helpers/layouts_components.py | 10 +- helpers/plots.py | 10 +- helpers/qa.py | 18 +- mock_kz_sampler.py | 1 - src/demo_enums.py | 1 - tests/test_cb_alert_no_solver.py | 19 +- tests/test_cb_cache_embeddings.py | 72 ++++--- tests/test_cb_disable_buttons.py | 63 ++++-- tests/test_cb_graph_kink_density.py | 97 +++++---- tests/test_cb_graph_spins.py | 73 ++++--- tests/test_cb_progress_bar.py | 23 ++- tests/test_cb_schedule.py | 49 ++--- tests/test_cb_simulate.py | 310 +++++++++++++++++++++------- tests/test_cb_submit_job.py | 59 ++++-- tests/test_kz_calcs.py | 44 ++-- tests/test_mock_kz_sampler.py | 1 + tests/test_qa.py | 17 +- 20 files changed, 618 insertions(+), 357 deletions(-) diff --git a/app.py b/app.py index 95a600e..a9efa5a 100644 --- a/app.py +++ b/app.py @@ -12,30 +12,37 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import NamedTuple, Union -import dash -import dash_bootstrap_components as dbc -from dash import ALL, ctx, dcc, html, Input, Output, State -from dash.exceptions import PreventUpdate import datetime import json -from demo_configs import DESCRIPTION, DESCRIPTION_NM, J_BASELINE, MAIN_HEADER, MAIN_HEADER_NM, THUMBNAIL, USE_CLASSICAL -import numpy as np import os +from typing import NamedTuple, Union +import dash +import dash_bootstrap_components as dbc import dimod +import numpy as np +from dash import ALL, Input, Output, State, ctx, dcc, html +from dash.exceptions import PreventUpdate from dwave.cloud import Client from dwave.embedding import embed_bqm, is_valid_embedding from dwave.system import DWaveSampler -from mock_kz_sampler import MockKibbleZurekSampler +from demo_configs import ( + DESCRIPTION, + DESCRIPTION_NM, + J_BASELINE, + MAIN_HEADER, + MAIN_HEADER_NM, + THUMBNAIL, + USE_CLASSICAL, +) from helpers.kz_calcs import * from helpers.layouts_cards import * from helpers.layouts_components import * from helpers.plots import * from helpers.qa import * from helpers.tooltips import tool_tips_demo1, tool_tips_demo2 - +from mock_kz_sampler import MockKibbleZurekSampler from src.demo_enums import ProblemType app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) @@ -45,8 +52,7 @@ try: client = Client.from_config(client="qpu") qpus = { - qpu.name: qpu - for qpu in client.get_solvers(fast_anneal_time_range__covers=[0.005, 0.1]) + qpu.name: qpu for qpu in client.get_solvers(fast_anneal_time_range__covers=[0.005, 0.1]) } if len(qpus) < 1: raise Exception @@ -89,9 +95,10 @@ dbc.NavLink( problem_type.label, id={"type": "problem-type", "index": index}, - active="exact" + active="exact", ) - ) for index, problem_type in enumerate(ProblemType) + ) + for index, problem_type in enumerate(ProblemType) ], pills=True, ), @@ -121,6 +128,7 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: for target, message in tool_tips.items() ] + app.layout = html.Div( [ dcc.Store(id="coupling_data", data={}), @@ -154,7 +162,7 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: init_job_status=init_job_status, ), *dbc_modal("modal_solver"), - html.Div(tooltips(ProblemType.KZ), id="tooltips") + html.Div(tooltips(ProblemType.KZ), id="tooltips"), ], width=4, style={"minWidth": "30rem"}, @@ -195,7 +203,7 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: fluid=True, ) ], - style={"paddingTop": "20px"} + style={"paddingTop": "20px"}, ), ], ) @@ -309,9 +317,7 @@ def set_schedule(qpu_name): schedule_filename_style = {"color": "#FFA143", "fontSize": 12} if ctx.triggered_id: - for filename in [ - file for file in os.listdir("helpers") if "schedule.csv" in file.lower() - ]: + for filename in [file for file in os.listdir("helpers") if "schedule.csv" in file.lower()]: if qpu_name.split(".")[0] in filename: # Accepts & reddens older versions schedule_filename = filename @@ -335,14 +341,12 @@ def load_cached_embeddings(qpu_name): embeddings_cached = {} # Wipe out previous QPU's embeddings - for filename in [ - file for file in os.listdir("helpers") if ".json" in file and "emb_" in file - ]: + for filename in [file for file in os.listdir("helpers") if ".json" in file and "emb_" in file]: if qpu_name == "Diffusion [Classical]": qpu_name = "Advantage_system6.4" - if qpu_name.split('.')[0] in filename: + if qpu_name.split(".")[0] in filename: with open(f"helpers/{filename}", "r") as fp: embeddings_cached = json.load(fp) @@ -381,7 +385,7 @@ def load_cached_embeddings(qpu_name): State("coupling_data", "data"), # access previously stored data State("zne_estimates", "data"), # Access ZNE estimates State("kz_data", "data"), # get kibble zurek data point - ] + ], ) def display_graphics_kink_density( qpu_name, @@ -414,19 +418,17 @@ def display_graphics_kink_density( if ctx.triggered_id == "job_submit_state": embeddings_cached = json_to_dict(embeddings_cached) - sampleset_unembedded = get_samples( - client, job_id, spins, J, embeddings_cached[spins] - ) + sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) _, kink_density = kink_stats(sampleset_unembedded, J) # Calculate lambda (previously kappa) # Added _ to avoid keyword restriction - lambda_ = calclambda_(J=J, qpu_name=qpu_name, schedule_name=schedule_filename, J_baseline=J_BASELINE) - - fig = plot_kink_density( - graph_display, figure, kink_density, ta, J, lambda_ + lambda_ = calclambda_( + J=J, qpu_name=qpu_name, schedule_name=schedule_filename, J_baseline=J_BASELINE ) + fig = plot_kink_density(graph_display, figure, kink_density, ta, J, lambda_) + # Initialize the list for this anneal_time if not present ta_str = str(ta) if ta_str not in coupling_data: @@ -471,7 +473,11 @@ def display_graphics_kink_density( problem_type=problem_type, ) - if ctx.triggered_id in ["zne_graph_display", "coupling_strength", "quench_schedule_filename"] and graph_display == "coupling": + if ( + ctx.triggered_id + in ["zne_graph_display", "coupling_strength", "quench_schedule_filename"] + and graph_display == "coupling" + ): zne_estimates, modal_trigger = plot_zne_fitted_line( fig, coupling_data, qpu_name, zne_estimates, graph_display, str(ta) ) @@ -481,9 +487,7 @@ def display_graphics_kink_density( if ctx.triggered_id == "job_submit_state": embeddings_cached = json_to_dict(embeddings_cached) - sampleset_unembedded = get_samples( - client, job_id, spins, J, embeddings_cached[spins] - ) + sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) _, kink_density = kink_stats(sampleset_unembedded, J) # Append the new data point @@ -517,7 +521,7 @@ def display_graphics_kink_density( State("job_id", "data"), State("coupling_strength", "value"), State("embeddings_cached", "data"), - ] + ], ) def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_cached): """Generate graphics for spin-ring display.""" @@ -528,9 +532,7 @@ def display_graphics_spin_ring(spins, job_submit_state, job_id, J, embeddings_ca raise PreventUpdate embeddings_cached = json_to_dict(embeddings_cached) - sampleset_unembedded = get_samples( - client, job_id, spins, J, embeddings_cached[spins] - ) + sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) kinks_per_sample, kink_density = kink_stats(sampleset_unembedded, J) best_indx = np.abs(kinks_per_sample - kink_density).argmin() best_sample = sampleset_unembedded.record.sample[best_indx] @@ -547,6 +549,7 @@ class SubmitJobReturn(NamedTuple): warning_modal_open: bool = False wd_job_n_intervals: int = 0 + @app.callback( Output("job_id", "data"), Output("initial_warning", "data"), @@ -596,7 +599,7 @@ def submit_job( return SubmitJobReturn( job_id=json.dumps(sampleset.to_serializable()), initial_warning=True, - warning_modal_open=not initial_warning + warning_modal_open=not initial_warning, ) bqm_embedded = embed_bqm(bqm, embedding, DWaveSampler(solver=solver.name).adjacency) @@ -633,6 +636,7 @@ class RunButtonClickReturn(NamedTuple): job_submit_state: str = dash.no_update job_submit_time: datetime = dash.no_update + @app.callback( Output("btn_simulate", "disabled"), Output("wd_job", "disabled"), @@ -681,6 +685,7 @@ class SimulateReturn(NamedTuple): embeddings_cached: dict = dash.no_update embedding_is_cached: str = dash.no_update + @app.callback( Output("btn_simulate", "disabled", allow_duplicate=True), Output("wd_job", "disabled", allow_duplicate=True), @@ -725,7 +730,7 @@ def simulate( job_submit_state="SUBMITTED", job_submit_time=datetime.datetime.now().strftime("%c"), embeddings_cached=embeddings_cached, - embedding_is_cached=", ".join(str(em) for em in embeddings_cached.keys()) + embedding_is_cached=", ".join(str(em) for em in embeddings_cached.keys()), ) return SimulateReturn( @@ -757,7 +762,9 @@ def simulate( return SimulateReturn( btn_simulate_disabled=False, wd_job_disabled=True, - job_submit_state=dash.no_update if job_submit_state in ["COMPLETED", "CANCELLED", "FAILED"] else "ERROR", + job_submit_state=( + dash.no_update if job_submit_state in ["COMPLETED", "CANCELLED", "FAILED"] else "ERROR" + ), ) diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index aeddb4e..1c6142a 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -80,7 +80,7 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=N """ if b is None: b = theoretical_kink_density_prefactor(J, schedule_name) - + return np.power([1e-9 * t * b for t in annealing_times_ns], -0.5) / (2 * np.pi * np.sqrt(2)) diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index 4a464e2..d410cf5 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -12,20 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -from dash import dcc, html import dash_bootstrap_components as dbc - -from demo_configs import DESCRIPTION, MAIN_HEADER -from src.demo_enums import ProblemType import plotly.graph_objects as go +from dash import dcc, html +from demo_configs import DESCRIPTION, MAIN_HEADER from helpers.layouts_components import * +from src.demo_enums import ProblemType __all__ = [ "control_card", "graphs_card", ] + def control_card(solvers={}, init_job_status="READY"): """Lay out the configuration and job-submission card. @@ -61,7 +61,10 @@ def control_card(solvers={}, init_job_status="READY"): ], style={"marginTop": "10px"}, ), - html.P(["Cached Embeddings: ", html.Span(id="embedding_is_cached")], style={"marginTop": 10}), + html.P( + ["Cached Embeddings: ", html.Span(id="embedding_is_cached")], + style={"marginTop": 10}, + ), dbc.Row( [ dbc.Col( @@ -99,7 +102,7 @@ def control_card(solvers={}, init_job_status="READY"): ], justify="start", # Aligns buttons to the left align="end", - style={"marginTop": 40} + style={"marginTop": 40}, ), ], body=True, @@ -123,7 +126,7 @@ def graphs_card(problem_type=ProblemType.KZ): id="sample_vs_theory", figure=go.Figure(), style={"height": "40vh", "minHeight": "20rem"}, - ) + ), ], color="white", style={"height": "100%", "minHeight": "50rem"}, diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 4278afd..8adb616 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -13,8 +13,10 @@ # limitations under the License. from itertools import chain + import dash_bootstrap_components as dbc -from dash import html, dcc +from dash import dcc, html + from src.demo_enums import ProblemType __all__ = [ @@ -89,10 +91,7 @@ def get_kz_graph_radio_options(problem_type): config_spins = dcc.RadioItems( id="spins", - options=[ - {"label": f"{length}", "value": length} - for length in ring_lengths - ], + options=[{"label": f"{length}", "value": length} for length in ring_lengths], value=512, inputStyle={"marginRight": "10px"}, inline=True, @@ -107,6 +106,7 @@ def get_kz_graph_radio_options(problem_type): for val in chain(range(-20, 0, 2), range(2, 12, 2)) } + def get_coupling_strength_slider(problem_type): if problem_type is ProblemType.KZ_NM: return html.Div( diff --git a/helpers/plots.py b/helpers/plots.py index 509433f..55be7e4 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from src.demo_enums import ProblemType import numpy as np import pandas as pd import plotly.graph_objects as go -from helpers.qa import fitted_function from dash.exceptions import PreventUpdate from helpers.kz_calcs import theoretical_kink_density +from helpers.qa import fitted_function +from src.demo_enums import ProblemType __all__ = [ "plot_kink_densities_bg", @@ -336,7 +336,7 @@ def plot_kink_densities_bg( ) fig_data = [predicted_plus, predicted_minus, energy_transverse, energy_problem] - + # Add previously computed kz_data points if kz_data and "k" in kz_data: for pair in kz_data["k"]: @@ -596,9 +596,7 @@ def plot_spin_orientation(num_spins=512, sample=None): return fig -def plot_zne_fitted_line( - fig, coupling_data, qpu_name, zne_estimates, zne_graph_display, ta_str -): +def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_display, ta_str): """ Fit a curve to the coupling data and plot the Zero-Noise Extrapolation (ZNE) estimate. diff --git a/helpers/qa.py b/helpers/qa.py index 7242c16..4fdfbf0 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -13,14 +13,14 @@ # limitations under the License. import json -import numpy as np -from numpy.polynomial.polynomial import Polynomial -import scipy import dimod -from dwave.cloud.api import exceptions, Problems -from dwave.embedding import unembed_sampleset import minorminer +import numpy as np +import scipy +from dwave.cloud.api import Problems, exceptions +from dwave.embedding import unembed_sampleset +from numpy.polynomial.polynomial import Polynomial __all__ = [ "create_bqm", @@ -216,9 +216,7 @@ def y_func_x(x): def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): # Strictly positive form. # TODO: Change to force saturation. Large x should go sigmoidally towards 0.5 - return np.exp(p_3) * ( - 1 + np.exp(p_2) * np.tanh(np.exp(p_1) * (x - np.exp(p_0))) - ) + return np.exp(p_3) * (1 + np.exp(p_2) * np.tanh(np.exp(p_1) * (x - np.exp(p_0)))) # Small lp1 << lp0, and lp0= (maxx-minxx)/2; We can linearize: # lp3*(1 + lp2( lp1 x - lp0)) = lp0*lp2*lp3 + lp1*lp2*lp3 x # WIP @@ -238,9 +236,7 @@ def sigmoidal_crossover(x, p_0, p_1, p_2, p_3): np.log(np.sqrt(lp2lp3)), ) try: - p, _ = scipy.optimize.curve_fit( - f=sigmoidal_crossover, xdata=xdata, ydata=ydata, p0=p0 - ) + p, _ = scipy.optimize.curve_fit(f=sigmoidal_crossover, xdata=xdata, ydata=ydata, p0=p0) except: return None diff --git a/mock_kz_sampler.py b/mock_kz_sampler.py index 2e3398d..e4b6ee9 100644 --- a/mock_kz_sampler.py +++ b/mock_kz_sampler.py @@ -13,7 +13,6 @@ # limitations under the License. import numpy as np - from dimod import SampleSet from dwave.samplers import SimulatedAnnealingSampler from dwave.system.testing import MockDWaveSampler diff --git a/src/demo_enums.py b/src/demo_enums.py index 47e9682..e9d73ea 100644 --- a/src/demo_enums.py +++ b/src/demo_enums.py @@ -25,4 +25,3 @@ def label(self): ProblemType.KZ: "Kibble-Zurek Mechanism", ProblemType.KZ_NM: "Kibble-Zurek Mechanism with Noise Mitigation", }[self] - diff --git a/tests/test_cb_alert_no_solver.py b/tests/test_cb_alert_no_solver.py index 40f8f2d..9cbd3ee 100644 --- a/tests/test_cb_alert_no_solver.py +++ b/tests/test_cb_alert_no_solver.py @@ -12,31 +12,32 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - from contextvars import copy_context + +import pytest from dash._callback_context import context_value from dash._utils import AttributeDict from app import alert_no_solver -@pytest.mark.parametrize('input_val, output_val', - [(0, True), (1, True), (0, False), (1, False)]) + +@pytest.mark.parametrize("input_val, output_val", [(0, True), (1, True), (0, False), (1, False)]) def test_alert_no_solver(mocker, input_val, output_val): """Test that a failed cloud-client client is identified.""" if output_val: - mocker.patch('app.client', None) + mocker.patch("app.client", None) else: - mocker.patch('app.client', 'dummy') + mocker.patch("app.client", "dummy") def run_callback(): - context_value.set(AttributeDict(**{'triggered_inputs': - [{'prop_id': 'btn_simulate.n_clicks'}]})) + context_value.set( + AttributeDict(**{"triggered_inputs": [{"prop_id": "btn_simulate.n_clicks"}]}) + ) return alert_no_solver(input_val) ctx = copy_context() output = ctx.run(run_callback) - assert output == output_val \ No newline at end of file + assert output == output_val diff --git a/tests/test_cb_cache_embeddings.py b/tests/test_cb_cache_embeddings.py index d2ce5c8..7057e97 100644 --- a/tests/test_cb_cache_embeddings.py +++ b/tests/test_cb_cache_embeddings.py @@ -12,19 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest +from contextvars import copy_context from io import StringIO -from contextvars import copy_context +import pytest from dash._callback_context import context_value from dash._utils import AttributeDict from app import load_cached_embeddings embedding_filenames = [ - 'emb_Advantage_system4.1.json', - 'emb_Advantage_system5.4.json', - 'emb_Advantage2_prototype2.3.json', ] + "emb_Advantage_system4.1.json", + "emb_Advantage_system5.4.json", + "emb_Advantage2_prototype2.3.json", +] json_embeddings_file = '{ \ "3": {"1": [11], "0": [10], "2": [12]}, \ @@ -32,38 +33,52 @@ edges_5 = [(10, 11), (11, 12), (12, 13), (13, 14), (14, 10)] edges_3_5 = [(10, 11), (10, 12), (11, 12), (12, 13), (13, 14), (14, 10)] - -class mock_qpu_edges(): + + +class mock_qpu_edges: def __init__(self, edges): self.edges = edges + class mock_qpu(object): def __init__(self): - self.edges_per_qpu = { - 'Advantage_system4.1': edges_5, - 'Advantage2_prototype2.55': edges_3_5 - } - + self.edges_per_qpu = {"Advantage_system4.1": edges_5, "Advantage2_prototype2.55": edges_3_5} + def __getitem__(self, indx): return mock_qpu_edges(self.edges_per_qpu[indx]) - + + parametrize_vals = [ - ('Advantage_system4.1', embedding_filenames, json_embeddings_file), - ('Advantage2_prototype2.55', embedding_filenames, json_embeddings_file), - ('Advantage88_prototype7.3', embedding_filenames, json_embeddings_file) + ("Advantage_system4.1", embedding_filenames, json_embeddings_file), + ("Advantage2_prototype2.55", embedding_filenames, json_embeddings_file), + ("Advantage88_prototype7.3", embedding_filenames, json_embeddings_file), ] -@pytest.mark.parametrize(['qpu_name_val', 'embeddings', 'json_emb_file',], parametrize_vals) + +@pytest.mark.parametrize( + [ + "qpu_name_val", + "embeddings", + "json_emb_file", + ], + parametrize_vals, +) def test_cache_embeddings_qpu_selection(mocker, qpu_name_val, embeddings, json_emb_file): """Test the caching of embeddings: triggered by QPU selection.""" - mocker.patch('app.os.listdir', return_value=embeddings) - mocker.patch('builtins.open', return_value=StringIO(json_emb_file)) - mocker.patch('app.qpus', new=mock_qpu()) + mocker.patch("app.os.listdir", return_value=embeddings) + mocker.patch("builtins.open", return_value=StringIO(json_emb_file)) + mocker.patch("app.qpus", new=mock_qpu()) def run_callback(): context_value.set( - AttributeDict(**{'triggered_inputs': [{'prop_id': 'qpu_selection.value'},]}) + AttributeDict( + **{ + "triggered_inputs": [ + {"prop_id": "qpu_selection.value"}, + ] + } + ) ) return load_cached_embeddings(qpu_name_val) @@ -71,17 +86,18 @@ def run_callback(): ctx = copy_context() output = ctx.run(run_callback) - if qpu_name_val == 'Advantage_system4.1': + if qpu_name_val == "Advantage_system4.1": assert output[1] == "5" - - if qpu_name_val == 'Advantage2_prototype2.55': + + if qpu_name_val == "Advantage2_prototype2.55": assert output[1] == "3, 5" - if qpu_name_val == 'Advantage88_prototype7.3': + if qpu_name_val == "Advantage88_prototype7.3": assert output == ({}, "") + parametrize_vals = [ - ('{"22": {"1": [11], "0": [10], "2": [12]}}', json_embeddings_file), - ('needed', json_embeddings_file), - ('not found', json_embeddings_file), + ('{"22": {"1": [11], "0": [10], "2": [12]}}', json_embeddings_file), + ("needed", json_embeddings_file), + ("not found", json_embeddings_file), ] diff --git a/tests/test_cb_disable_buttons.py b/tests/test_cb_disable_buttons.py index 4c22a9f..d9db1a7 100644 --- a/tests/test_cb_disable_buttons.py +++ b/tests/test_cb_disable_buttons.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - from contextvars import copy_context + +import pytest from dash._callback_context import context_value from dash._utils import AttributeDict from dash.exceptions import PreventUpdate @@ -22,31 +22,49 @@ from app import disable_buttons from helpers.layouts_components import ring_lengths -parametrize_names = ['job_submit_state_val', 'spins_val_in', 'anneal_duration_val', - 'coupling_strength_val', 'spins_val_out', 'qpu_selection_val'] +parametrize_names = [ + "job_submit_state_val", + "spins_val_in", + "anneal_duration_val", + "coupling_strength_val", + "spins_val_out", + "qpu_selection_val", +] -spins_disabled = [{'disabled': True} for _ in ring_lengths] -spins_enabled = [{'disabled': False} for _ in ring_lengths] +spins_disabled = [{"disabled": True} for _ in ring_lengths] +spins_enabled = [{"disabled": False} for _ in ring_lengths] parametrize_vals = [ - ('EMBEDDING', spins_disabled, True, True, spins_disabled, True), - ('SUBMITTED', spins_disabled, True, True, spins_disabled, True), - ('PENDING', spins_disabled, True, True, spins_disabled, True), - ('IN_PROGRESS', spins_disabled, True, True, spins_disabled, True), - ('COMPLETED', spins_enabled, False, False, spins_enabled, False), - ('CANCELLED', spins_enabled, False, False, spins_enabled, False), - ('FAILED', spins_enabled, False, False, spins_enabled, False), - ('FAKE', spins_enabled, False, False, spins_enabled, False) + ("EMBEDDING", spins_disabled, True, True, spins_disabled, True), + ("SUBMITTED", spins_disabled, True, True, spins_disabled, True), + ("PENDING", spins_disabled, True, True, spins_disabled, True), + ("IN_PROGRESS", spins_disabled, True, True, spins_disabled, True), + ("COMPLETED", spins_enabled, False, False, spins_enabled, False), + ("CANCELLED", spins_enabled, False, False, spins_enabled, False), + ("FAILED", spins_enabled, False, False, spins_enabled, False), + ("FAKE", spins_enabled, False, False, spins_enabled, False), ] + @pytest.mark.parametrize(parametrize_names, parametrize_vals) -def test_disable_buttons(job_submit_state_val, spins_val_in, anneal_duration_val, - coupling_strength_val, spins_val_out, qpu_selection_val): +def test_disable_buttons( + job_submit_state_val, + spins_val_in, + anneal_duration_val, + coupling_strength_val, + spins_val_out, + qpu_selection_val, +): """Test disabling buttons used during job submission.""" def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': 'job_submit_state.children'}], - 'state_values': [{'prop_id': 'spins.options'}]})) + context_value.set( + AttributeDict( + **{ + "triggered_inputs": [{"prop_id": "job_submit_state.children"}], + "state_values": [{"prop_id": "spins.options"}], + } + ) + ) return disable_buttons(job_submit_state_val, spins_val_in) @@ -58,5 +76,8 @@ def run_callback(): else: output = ctx.run(run_callback) assert output == ( - anneal_duration_val, coupling_strength_val, spins_val_out, qpu_selection_val - ) \ No newline at end of file + anneal_duration_val, + coupling_strength_val, + spins_val_out, + qpu_selection_val, + ) diff --git a/tests/test_cb_graph_kink_density.py b/tests/test_cb_graph_kink_density.py index b27f379..6bf4030 100644 --- a/tests/test_cb_graph_kink_density.py +++ b/tests/test_cb_graph_kink_density.py @@ -12,34 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - from contextvars import copy_context -from dash.exceptions import PreventUpdate -from dash._callback_context import context_value -from dash._utils import AttributeDict -import numpy as np -import plotly import dimod +import numpy as np +import plotly +import pytest +from dash._callback_context import context_value +from dash._utils import AttributeDict +from dash.exceptions import PreventUpdate from app import display_graphics_kink_density -json_embeddings_file = { \ - "512": {"1": [11], "0": [10], "2": [12]}, \ - "5": {"1": [11], "0": [10], "2": [12], "3": [13], "4": [14]} } +json_embeddings_file = { + "512": {"1": [11], "0": [10], "2": [12]}, + "5": {"1": [11], "0": [10], "2": [12], "3": [13], "4": [14]}, +} -sample_vs_theory = plotly.graph_objects.Figure({ - 'data': [{ - 'type': 'scatter', - 'x': np.array([1, 2, 3], dtype=np.int64), - 'xaxis': 'x', - 'y': np.array([1, 2, 3], dtype=np.int64), - 'yaxis': 'y'}], - 'layout': { - 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'x'}}, - 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'y'}}} -}) +sample_vs_theory = plotly.graph_objects.Figure( + { + "data": [ + { + "type": "scatter", + "x": np.array([1, 2, 3], dtype=np.int64), + "xaxis": "x", + "y": np.array([1, 2, 3], dtype=np.int64), + "yaxis": "y", + } + ], + "layout": { + "xaxis": {"anchor": "y", "domain": [0.0, 1.0], "title": {"text": "x"}}, + "yaxis": {"anchor": "x", "domain": [0.0, 1.0], "title": {"text": "y"}}, + }, + } +) samples = dimod.as_samples( [ @@ -48,28 +54,37 @@ [-1, -1, -1, +1, +1], ] ) -sampleset = dimod.SampleSet.from_samples(samples, 'SPIN', 0) +sampleset = dimod.SampleSet.from_samples(samples, "SPIN", 0) parametrize_vals = [ - ('kz_graph_display.value', 'both', 'dummy'), - ('kz_graph_display.value', 'kink_density', 'dummy'), - ('kz_graph_display.value', 'schedule', 'dummy'), - ('coupling_strength.value', 'schedule', 'dummy'), - ('quench_schedule_filename.children', 'schedule', 'dummy'), - ('job_submit_state.children', 'dummy', 'SUBMITTED'), - ('job_submit_state.children', 'dummy', 'PENDING'), - ('job_submit_state.children', 'dummy', 'COMPLETED') + ("kz_graph_display.value", "both", "dummy"), + ("kz_graph_display.value", "kink_density", "dummy"), + ("kz_graph_display.value", "schedule", "dummy"), + ("coupling_strength.value", "schedule", "dummy"), + ("quench_schedule_filename.children", "schedule", "dummy"), + ("job_submit_state.children", "dummy", "SUBMITTED"), + ("job_submit_state.children", "dummy", "PENDING"), + ("job_submit_state.children", "dummy", "COMPLETED"), ] -@pytest.mark.parametrize('trigger_val, kz_graph_display_val, job_submit_state_val', parametrize_vals) + +@pytest.mark.parametrize( + "trigger_val, kz_graph_display_val, job_submit_state_val", parametrize_vals +) def test_graph_kink_density(mocker, trigger_val, kz_graph_display_val, job_submit_state_val): """Test graph of kink density.""" - mocker.patch('app.get_samples', return_value=sampleset) + mocker.patch("app.get_samples", return_value=sampleset) def run_callback(): context_value.set( - AttributeDict(**{'triggered_inputs': [{'prop_id': trigger_val},]}) + AttributeDict( + **{ + "triggered_inputs": [ + {"prop_id": trigger_val}, + ] + } + ) ) return display_graphics_kink_density( @@ -78,7 +93,7 @@ def run_callback(): 2.5, "FALLBACK_SCHEDULE.csv", job_submit_state_val, - '1234', + "1234", 7, 5, 0, @@ -91,11 +106,15 @@ def run_callback(): ctx = copy_context() - if trigger_val in [ - 'kz_graph_display.value', - 'coupling_strength.value', - 'quench_schedule_filename.children' - ] or job_submit_state_val == "COMPLETED": + if ( + trigger_val + in [ + "kz_graph_display.value", + "coupling_strength.value", + "quench_schedule_filename.children", + ] + or job_submit_state_val == "COMPLETED" + ): output = ctx.run(run_callback) assert type(output[0]) == plotly.graph_objects.Figure diff --git a/tests/test_cb_graph_spins.py b/tests/test_cb_graph_spins.py index 5c29145..4e0ddff 100644 --- a/tests/test_cb_graph_spins.py +++ b/tests/test_cb_graph_spins.py @@ -12,63 +12,86 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - from contextvars import copy_context -from dash.exceptions import PreventUpdate -from dash._callback_context import context_value -from dash._utils import AttributeDict -import plotly import dimod +import plotly +import pytest +from dash._callback_context import context_value +from dash._utils import AttributeDict +from dash.exceptions import PreventUpdate from app import display_graphics_spin_ring -json_embeddings_file = { \ - "512": {"1": [11], "0": [10], "2": [12]}, \ - "5": {"1": [11], "0": [10], "2": [12], "3": [13], "4": [14]} } +json_embeddings_file = { + "512": {"1": [11], "0": [10], "2": [12]}, + "5": {"1": [11], "0": [10], "2": [12], "3": [13], "4": [14]}, +} -samples = dimod.as_samples([ - [-1, -1, -1, +1, +1], - [-1, -1, +1, +1, +1], - [-1, -1, -1, +1, +1],]) -sampleset = dimod.SampleSet.from_samples(samples, 'SPIN', 0) +samples = dimod.as_samples( + [ + [-1, -1, -1, +1, +1], + [-1, -1, +1, +1, +1], + [-1, -1, -1, +1, +1], + ] +) +sampleset = dimod.SampleSet.from_samples(samples, "SPIN", 0) parametrize_vals = [ -(512, 'SUBMITTED', json_embeddings_file), (5, 'COMPLETED', json_embeddings_file)] + (512, "SUBMITTED", json_embeddings_file), + (5, "COMPLETED", json_embeddings_file), +] + -@pytest.mark.parametrize('spins_val, job_submit_state_val, embeddings_cached_val', parametrize_vals) +@pytest.mark.parametrize("spins_val, job_submit_state_val, embeddings_cached_val", parametrize_vals) def test_graph_spins_spin_trigger(spins_val, job_submit_state_val, embeddings_cached_val): """Test graph of spin ring: spins trigger.""" def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': 'spins.value'},]})) + context_value.set( + AttributeDict( + **{ + "triggered_inputs": [ + {"prop_id": "spins.value"}, + ] + } + ) + ) return display_graphics_spin_ring( - spins_val, job_submit_state_val, '1234', 2.5, embeddings_cached_val) + spins_val, job_submit_state_val, "1234", 2.5, embeddings_cached_val + ) ctx = copy_context() output = ctx.run(run_callback) assert type(output) == plotly.graph_objects.Figure -@pytest.mark.parametrize('spins_val, job_submit_state_val, embeddings_cached_val', parametrize_vals) + +@pytest.mark.parametrize("spins_val, job_submit_state_val, embeddings_cached_val", parametrize_vals) def test_graph_spins_job_trigger(mocker, spins_val, job_submit_state_val, embeddings_cached_val): """Test graph of spin ring: job-state trigger.""" - mocker.patch('app.get_samples', return_value=sampleset) + mocker.patch("app.get_samples", return_value=sampleset) def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': 'job_submit_state.children'},]})) + context_value.set( + AttributeDict( + **{ + "triggered_inputs": [ + {"prop_id": "job_submit_state.children"}, + ] + } + ) + ) return display_graphics_spin_ring( - spins_val, job_submit_state_val, '1234', 2.5, embeddings_cached_val) + spins_val, job_submit_state_val, "1234", 2.5, embeddings_cached_val + ) ctx = copy_context() - if job_submit_state_val == 'COMPLETED': + if job_submit_state_val == "COMPLETED": output = ctx.run(run_callback) assert type(output) == plotly.graph_objects.Figure else: diff --git a/tests/test_cb_progress_bar.py b/tests/test_cb_progress_bar.py index b2288af..a2efad9 100644 --- a/tests/test_cb_progress_bar.py +++ b/tests/test_cb_progress_bar.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - from contextvars import copy_context + +import pytest from dash._callback_context import context_value from dash._utils import AttributeDict @@ -22,17 +22,22 @@ from helpers.layouts_components import job_bar_display parametrize_vals = [ -(f'{status}', job_bar_display[status][0], job_bar_display[status][1]) for status in job_bar_display.keys()] -parametrize_vals.extend([tuple(['BREAK FUNCTION', 'exception', 'exception'])]) + (f"{status}", job_bar_display[status][0], job_bar_display[status][1]) + for status in job_bar_display.keys() +] +parametrize_vals.extend([tuple(["BREAK FUNCTION", "exception", "exception"])]) + -@pytest.mark.parametrize('job_submit_state_val, bar_job_status_value, bar_job_status_color', -parametrize_vals) +@pytest.mark.parametrize( + "job_submit_state_val, bar_job_status_value, bar_job_status_color", parametrize_vals +) def test_set_progress_bar(job_submit_state_val, bar_job_status_value, bar_job_status_color): """Test job-submission progress bar.""" def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': 'job_submit_state.children'}]})) + context_value.set( + AttributeDict(**{"triggered_inputs": [{"prop_id": "job_submit_state.children"}]}) + ) return set_progress_bar(job_submit_state_val) @@ -42,4 +47,4 @@ def run_callback(): output = ctx.run(run_callback) assert output == (bar_job_status_value, bar_job_status_color) except KeyError: - assert job_submit_state_val == 'BREAK FUNCTION' \ No newline at end of file + assert job_submit_state_val == "BREAK FUNCTION" diff --git a/tests/test_cb_schedule.py b/tests/test_cb_schedule.py index 88c43d0..2c62e81 100644 --- a/tests/test_cb_schedule.py +++ b/tests/test_cb_schedule.py @@ -12,48 +12,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - from contextvars import copy_context + +import pytest from dash._callback_context import context_value from dash._utils import AttributeDict from app import set_schedule -all_schedules = ['09-1263A-B_Advantage_system4.1_fast_annealing_schedule.csv', - '09-1273A-E_Advantage_system6.3_fast_annealing_schedule.csv', - '09-1302A-F_Advantage2_prototype2.5_fast_annealing_schedule.csv', - 'FALLBACK_SCHEDULE.csv', - ] +all_schedules = [ + "09-1263A-B_Advantage_system4.1_fast_annealing_schedule.csv", + "09-1273A-E_Advantage_system6.3_fast_annealing_schedule.csv", + "09-1302A-F_Advantage2_prototype2.5_fast_annealing_schedule.csv", + "FALLBACK_SCHEDULE.csv", +] parametrize_vals = [ - ('Advantage_system4.1', - all_schedules, - 0, - {'color': 'white', 'fontSize': 12}), - ('Advantage_system6.4', - all_schedules, - 1, - {'color': '#FFA143', 'fontSize': 12}), - ('Advantage2_prototype2.3', - all_schedules, - 2, - {'color': '#FFA143', 'fontSize': 12}), - ('Advantage25_system7.9', - all_schedules, - 3, - {'color': '#FFA143', 'fontSize': 12}), - ] - -@pytest.mark.parametrize(['qpu_selection_val', 'schedule_name', 'indx', 'style'], parametrize_vals) + ("Advantage_system4.1", all_schedules, 0, {"color": "white", "fontSize": 12}), + ("Advantage_system6.4", all_schedules, 1, {"color": "#FFA143", "fontSize": 12}), + ("Advantage2_prototype2.3", all_schedules, 2, {"color": "#FFA143", "fontSize": 12}), + ("Advantage25_system7.9", all_schedules, 3, {"color": "#FFA143", "fontSize": 12}), +] + + +@pytest.mark.parametrize(["qpu_selection_val", "schedule_name", "indx", "style"], parametrize_vals) def test_schedule_selection(mocker, qpu_selection_val, schedule_name, indx, style): """Test schedule selection.""" - mocker.patch('app.os.listdir', return_value=schedule_name) + mocker.patch("app.os.listdir", return_value=schedule_name) def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': 'qpu_selection.value'}]})) + context_value.set( + AttributeDict(**{"triggered_inputs": [{"prop_id": "qpu_selection.value"}]}) + ) return set_schedule(qpu_selection_val) diff --git a/tests/test_cb_simulate.py b/tests/test_cb_simulate.py index 0ce20d5..222ceb3 100644 --- a/tests/test_cb_simulate.py +++ b/tests/test_cb_simulate.py @@ -12,127 +12,283 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - +import datetime from contextvars import copy_context + +import pytest from dash import no_update from dash._callback_context import context_value from dash._utils import AttributeDict -import datetime - from app import SimulateReturn, run_button_click, simulate -before_test = datetime.datetime.now().strftime('%c') -parametrize_vals = [ - (512, "512, 1024", 'SUBMITTED'), - (2048, "512, 1024", 'EMBEDDING') -] +before_test = datetime.datetime.now().strftime("%c") +parametrize_vals = [(512, "512, 1024", "SUBMITTED"), (2048, "512, 1024", "EMBEDDING")] + @pytest.mark.parametrize( - 'spins_val, cached_embedding_lengths_val, submit_state_out', - parametrize_vals) + "spins_val, cached_embedding_lengths_val, submit_state_out", parametrize_vals +) def test_simulate_button_press(spins_val, cached_embedding_lengths_val, submit_state_out): """Test pressing Simulate button initiates submission.""" def run_callback(): - context_value.set(AttributeDict( - **{"triggered_inputs": [{"prop_id": "btn_simulate.n_clicks"}]})) + context_value.set( + AttributeDict(**{"triggered_inputs": [{"prop_id": "btn_simulate.n_clicks"}]}) + ) + + return run_button_click(1, cached_embedding_lengths_val, spins_val, "Advantage_system4.3") - return run_button_click(1, cached_embedding_lengths_val, spins_val, - 'Advantage_system4.3') - ctx = copy_context() output = ctx.run(run_callback) assert output[0:4] == (True, False, 0, submit_state_out) + def mock_get_status(client, job_id, job_submit_time): - if job_id == 'first few attempts': + if job_id == "first few attempts": return None - if job_id == 'first returned status': - return 'PENDING' - if job_id == 'early returning statuses': - return 'PENDING' - if job_id == '-1': - return 'ERROR' - if job_id == '0': - return 'EMBEDDING' - if job_id == '1': - return 'IN_PROGRESS' - if job_id == '2': - return 'COMPLETED' - if job_id == '3': - return 'CANCELLED' - if job_id == '4': - return 'FAILED' - -class mock_qpu_edges(): + if job_id == "first returned status": + return "PENDING" + if job_id == "early returning statuses": + return "PENDING" + if job_id == "-1": + return "ERROR" + if job_id == "0": + return "EMBEDDING" + if job_id == "1": + return "IN_PROGRESS" + if job_id == "2": + return "COMPLETED" + if job_id == "3": + return "CANCELLED" + if job_id == "4": + return "FAILED" + + +class mock_qpu_edges: def __init__(self, edges): self.edges = edges + class mock_qpu(object): def __init__(self): - self.edges_per_qpu = {'Advantage_system4.3': "dummy"} + self.edges_per_qpu = {"Advantage_system4.3": "dummy"} + def __getitem__(self, indx): return mock_qpu_edges(self.edges_per_qpu[indx]) + def mock_find_embedding(spins, dummy_edges): - if spins == 'yes': + if spins == "yes": return {1: [10], 2: [20]} - if spins == 'no': + if spins == "no": return {} -parametrize_names = 'job_id_val, job_submit_state_in, ' + \ - 'spins_val, embeddings_cached_in, btn_simulate_disabled_out, wd_job_disabled_out, ' + \ - 'wd_job_intervals_out, wd_job_n_out, job_submit_state_out, job_submit_time_out, ' + \ - 'embeddings_cached_out, embedding_is_cached_out' + +parametrize_names = ( + "job_id_val, job_submit_state_in, " + + "spins_val, embeddings_cached_in, btn_simulate_disabled_out, wd_job_disabled_out, " + + "wd_job_intervals_out, wd_job_n_out, job_submit_state_out, job_submit_time_out, " + + "embeddings_cached_out, embedding_is_cached_out" +) parametrize_vals = [ - (-1, 'READY', 512, {}, False, True, - no_update, no_update, 'ERROR', no_update, no_update, no_update), - ('first few attempts', 'SUBMITTED', 512, {}, no_update, no_update, - 200, 0, 'SUBMITTED', no_update, no_update, no_update), - ('first returned status', 'SUBMITTED', 512, {}, no_update, no_update, - 1000, 0, 'PENDING', no_update, no_update, no_update), - ('1', 'PENDING', 512, {}, no_update, no_update, - 1000, 0, 'IN_PROGRESS', no_update, no_update, no_update), - ('1', 'IN_PROGRESS', 512, {}, no_update, no_update, - 1000, 0, 'IN_PROGRESS', no_update, no_update, no_update), - ('2', 'IN_PROGRESS', 512, {}, no_update, no_update, - 1000, 0, 'COMPLETED', no_update, no_update, no_update), - ('2', 'COMPLETED', 512, {}, False, True, - no_update, no_update, no_update, no_update, no_update, no_update), - ('3', 'CANCELLED', 512, {}, False, True, - no_update, no_update, no_update, no_update, no_update, no_update), - ('4', 'FAILED', 512, {}, False, True, - no_update, no_update, no_update, no_update, no_update, no_update), - ('dummy', 'EMBEDDING', 'yes', {}, no_update, no_update, - 200, no_update, 'SUBMITTED', no_update, {'yes': {1: [10], 2: [20]}}, "yes"), - ('dummy', 'EMBEDDING', 'no', {}, False, True, - no_update, no_update, 'FAILED', no_update, no_update, no_update), + ( + -1, + "READY", + 512, + {}, + False, + True, + no_update, + no_update, + "ERROR", + no_update, + no_update, + no_update, + ), + ( + "first few attempts", + "SUBMITTED", + 512, + {}, + no_update, + no_update, + 200, + 0, + "SUBMITTED", + no_update, + no_update, + no_update, + ), + ( + "first returned status", + "SUBMITTED", + 512, + {}, + no_update, + no_update, + 1000, + 0, + "PENDING", + no_update, + no_update, + no_update, + ), + ( + "1", + "PENDING", + 512, + {}, + no_update, + no_update, + 1000, + 0, + "IN_PROGRESS", + no_update, + no_update, + no_update, + ), + ( + "1", + "IN_PROGRESS", + 512, + {}, + no_update, + no_update, + 1000, + 0, + "IN_PROGRESS", + no_update, + no_update, + no_update, + ), + ( + "2", + "IN_PROGRESS", + 512, + {}, + no_update, + no_update, + 1000, + 0, + "COMPLETED", + no_update, + no_update, + no_update, + ), + ( + "2", + "COMPLETED", + 512, + {}, + False, + True, + no_update, + no_update, + no_update, + no_update, + no_update, + no_update, + ), + ( + "3", + "CANCELLED", + 512, + {}, + False, + True, + no_update, + no_update, + no_update, + no_update, + no_update, + no_update, + ), + ( + "4", + "FAILED", + 512, + {}, + False, + True, + no_update, + no_update, + no_update, + no_update, + no_update, + no_update, + ), + ( + "dummy", + "EMBEDDING", + "yes", + {}, + no_update, + no_update, + 200, + no_update, + "SUBMITTED", + no_update, + {"yes": {1: [10], 2: [20]}}, + "yes", + ), + ( + "dummy", + "EMBEDDING", + "no", + {}, + False, + True, + no_update, + no_update, + "FAILED", + no_update, + no_update, + no_update, + ), ] + @pytest.mark.parametrize(parametrize_names, parametrize_vals) -def test_simulate_states(mocker, job_id_val, job_submit_state_in, - spins_val, embeddings_cached_in, btn_simulate_disabled_out, - wd_job_disabled_out, wd_job_intervals_out, wd_job_n_out, - job_submit_state_out, job_submit_time_out, embeddings_cached_out, - embedding_is_cached_out): +def test_simulate_states( + mocker, + job_id_val, + job_submit_state_in, + spins_val, + embeddings_cached_in, + btn_simulate_disabled_out, + wd_job_disabled_out, + wd_job_intervals_out, + wd_job_n_out, + job_submit_state_out, + job_submit_time_out, + embeddings_cached_out, + embedding_is_cached_out, +): """Test transitions between states.""" - mocker.patch('app.get_job_status', new=mock_get_status) - mocker.patch('app.qpus', new=mock_qpu()) - mocker.patch('app.find_one_to_one_embedding', new=mock_find_embedding) + mocker.patch("app.get_job_status", new=mock_get_status) + mocker.patch("app.qpus", new=mock_qpu()) + mocker.patch("app.find_one_to_one_embedding", new=mock_find_embedding) def run_callback(): - context_value.set(AttributeDict( - **{"triggered_inputs": [{"prop_id": "wd_job.n_intervals"}]})) + context_value.set( + AttributeDict(**{"triggered_inputs": [{"prop_id": "wd_job.n_intervals"}]}) + ) - return simulate(1, job_id_val, job_submit_state_in, before_test, - spins_val, 'Advantage_system4.3', embeddings_cached_in) + return simulate( + 1, + job_id_val, + job_submit_state_in, + before_test, + spins_val, + "Advantage_system4.3", + embeddings_cached_in, + ) ctx = copy_context() @@ -151,4 +307,4 @@ def run_callback(): assert output[0:5] == expected_output[0:5] assert output[6:] == expected_output[6:] - # One could test ``job_submit_time_out >= before_test`` to little gain, much complication \ No newline at end of file + # One could test ``job_submit_time_out >= before_test`` to little gain, much complication diff --git a/tests/test_cb_submit_job.py b/tests/test_cb_submit_job.py index 9d303ee..941674c 100644 --- a/tests/test_cb_submit_job.py +++ b/tests/test_cb_submit_job.py @@ -13,51 +13,70 @@ # limitations under the License. from contextvars import copy_context + from dash._callback_context import context_value from dash._utils import AttributeDict from app import submit_job -json_embeddings_file = { \ - "3": {"1": [11], "0": [10], "2": [12]}, \ - "5": {"1": [11], "0": [10], "2": [12], "3": [13], "4": [14]} } +json_embeddings_file = { + "3": {"1": [11], "0": [10], "2": [12]}, + "5": {"1": [11], "0": [10], "2": [12], "3": [13], "4": [14]}, +} + -class mock_computation(): - def wait_id(self): +class mock_computation: + def wait_id(self): return 1234 -class mock_solver(): + +class mock_solver: def __init__(self): self.name = "dummy" + def sample_bqm(self, **kwargs): return mock_computation() -class mock_qpus(): + +class mock_qpus: def __init__(self): - self.solvers = {'Advantage_system88.4': mock_solver()} + self.solvers = {"Advantage_system88.4": mock_solver()} + def __getitem__(self, indx): return self.solvers[indx] -class dwave_sampler(): + +class dwave_sampler: def __init__(self, solver): self.adjacency = { 10: {11, 12, 13, 14}, 11: {10, 12, 13, 14}, - 12: {10, 11, 13, 14},} - -def test_job_submission(mocker,): + 12: {10, 11, 13, 14}, + } + + +def test_job_submission( + mocker, +): """Test job submission.""" - mocker.patch('app.qpus', new=mock_qpus()) - mocker.patch('app.DWaveSampler', new=dwave_sampler) + mocker.patch("app.qpus", new=mock_qpus()) + mocker.patch("app.DWaveSampler", new=dwave_sampler) def run_callback(): - context_value.set(AttributeDict(** - {'triggered_inputs': [{'prop_id': 'job_submit_time.children'},]})) + context_value.set( + AttributeDict( + **{ + "triggered_inputs": [ + {"prop_id": "job_submit_time.children"}, + ] + } + ) + ) return submit_job( - '11:45AM', - 'Advantage_system88.4', + "11:45AM", + "Advantage_system88.4", 3, 2.3, 7, @@ -69,7 +88,5 @@ def run_callback(): ctx = copy_context() output = ctx.run(run_callback) - - assert output == (1234, False, False, 0) - + assert output == (1234, False, False, 0) diff --git a/tests/test_kz_calcs.py b/tests/test_kz_calcs.py index 5063a04..286c0e9 100644 --- a/tests/test_kz_calcs.py +++ b/tests/test_kz_calcs.py @@ -13,47 +13,57 @@ # limitations under the License. import os -import pytest -import pandas as pd import dimod +import pandas as pd +import pytest from helpers.kz_calcs import * project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -schedule = pd.read_csv(project_dir + '/helpers/FALLBACK_SCHEDULE.csv') +schedule = pd.read_csv(project_dir + "/helpers/FALLBACK_SCHEDULE.csv") -@pytest.mark.parametrize('t_a_ns, J1, J2', - [([7, 25], -1.0, -0.3), ([10, 30], 1.0, 0.3), ]) + +@pytest.mark.parametrize( + "t_a_ns, J1, J2", + [ + ([7, 25], -1.0, -0.3), + ([10, 30], 1.0, 0.3), + ], +) def test_kz_theory(t_a_ns, J1, J2): """Test predicted kink density.""" output1 = theoretical_kink_density( - annealing_times_ns=t_a_ns, - J=J1, - schedule_name='FALLBACK_SCHEDULE.csv', + annealing_times_ns=t_a_ns, + J=J1, + schedule_name="FALLBACK_SCHEDULE.csv", ) - + output2 = theoretical_kink_density( - annealing_times_ns=t_a_ns, - J=J2, - schedule_name='FALLBACK_SCHEDULE.csv', + annealing_times_ns=t_a_ns, + J=J2, + schedule_name="FALLBACK_SCHEDULE.csv", ) assert output1[0] > output1[1] assert output1[0] < output2[0] assert output1[1] < output2[1] + def test_kz_stats(): """Test kink density statistics.""" - samples = dimod.as_samples([ - [-1, -1, -1, +1, +1], - [-1, -1, +1, +1, +1], - [-1, -1, -1, +1, +1],]) + samples = dimod.as_samples( + [ + [-1, -1, -1, +1, +1], + [-1, -1, +1, +1, +1], + [-1, -1, -1, +1, +1], + ] + ) - sampleset = dimod.SampleSet.from_samples(samples, 'SPIN', 0) + sampleset = dimod.SampleSet.from_samples(samples, "SPIN", 0) output = kink_stats(sampleset, J=-1.0) diff --git a/tests/test_mock_kz_sampler.py b/tests/test_mock_kz_sampler.py index 554c460..c6924a3 100644 --- a/tests/test_mock_kz_sampler.py +++ b/tests/test_mock_kz_sampler.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_init(): """Test initialization of MockKibbleZurekSampler""" pass diff --git a/tests/test_qa.py b/tests/test_qa.py index f8b0c8f..7ddc9df 100644 --- a/tests/test_qa.py +++ b/tests/test_qa.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest -import pandas as pd - import dimod +import pandas as pd +import pytest from helpers.qa import * + def test_create_bqm(): """Test BQM creation.""" @@ -32,10 +32,11 @@ def test_create_bqm(): assert output.linear == {0: 0.0, 1: 0.0} assert output.quadratic == {(1, 0): 1.0} + def test_embedding(): """Test embedder.""" - edges = [(0, 1), (1, 2), (0, 2)] + edges = [(0, 1), (1, 2), (0, 2)] output = find_one_to_one_embedding(spins=2, sampler_edgelist=edges) assert len(output) == 2 @@ -43,14 +44,12 @@ def test_embedding(): output = find_one_to_one_embedding(spins=3, sampler_edgelist=edges) assert len(output) == 3 + def test_format_converter(): """Test embedder.""" - json_embedding = { \ - "512": {"1": [11], "0": [10], "2": [12]}, \ - "5": {"1": [11], "0": [10]} } - + json_embedding = {"512": {"1": [11], "0": [10], "2": [12]}, "5": {"1": [11], "0": [10]}} + output = json_to_dict(json_embedding) assert output == {5: {0: [10], 1: [11]}, 512: {0: [10], 1: [11], 2: [12]}} - From 84c75ae11fc533b506cc78e32100c0547dc97ce3 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 17 Jan 2025 09:01:45 -0800 Subject: [PATCH 154/170] Update date type Co-authored-by: Theodor Isacsson --- README.md | 1 - app.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 41a713a..031c98b 100644 --- a/README.md +++ b/README.md @@ -213,7 +213,6 @@ In [this paper](https://arxiv.org/abs/2311.01306), we demonstrate a practical im of zero-noise extrapolation as a method of quantum error mitigation specifically used for quantum annealing. - For various coupling strengths at the same annealing time, we used a fitting function—quadratic for the Advantage solver and a multi-polynomial for the MockDwaveSampler to calculate the theoretical zero-noise point. As the experiment runs for a longer time, we expect this zero-noise diff --git a/app.py b/app.py index a9efa5a..b4d6b77 100644 --- a/app.py +++ b/app.py @@ -634,7 +634,7 @@ class RunButtonClickReturn(NamedTuple): wd_job_disabled: bool = False wd_job_n_intervals: int = 0 job_submit_state: str = dash.no_update - job_submit_time: datetime = dash.no_update + job_submit_time: str = dash.no_update @app.callback( @@ -681,7 +681,7 @@ class SimulateReturn(NamedTuple): wd_job_interval: int = dash.no_update wd_job_n_intervals: int = dash.no_update job_submit_state: str = dash.no_update - job_submit_time: datetime = dash.no_update + job_submit_time: str = dash.no_update embeddings_cached: dict = dash.no_update embedding_is_cached: str = dash.no_update From 22a690ed4989ade6da97a270f2dfa9d921c87b1e Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 17 Jan 2025 09:32:24 -0800 Subject: [PATCH 155/170] Fix anneal duration tooltip --- helpers/layouts_components.py | 1 + helpers/tooltips.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 8adb616..7ceee64 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -50,6 +50,7 @@ def get_anneal_duration_setting(problem_type): ], value=80, # default value style={"maxWidth": "95%"}, + clearable=False, ) return dbc.Input( diff --git a/helpers/tooltips.py b/helpers/tooltips.py index fc4326a..858004f 100644 --- a/helpers/tooltips.py +++ b/helpers/tooltips.py @@ -13,7 +13,7 @@ # limitations under the License. tool_tips_demo2 = { - "anneal_duration": f"""Duration of the quantum anneal. Range of 5 to 320 nanoseconds.""", + "anneal-duration-dropdown": f"""Duration of the quantum anneal. Range of 5 to 1280 nanoseconds.""", "spins": f"""Number of spins in the 1D ring.""", "coupling_strength": f"""Coupling strength between spins in the ferromagnetic ring. Range of -1.8 to -0.6. @@ -25,7 +25,7 @@ } tool_tips_demo1 = { - "anneal_duration": f"""Duration of the quantum anneal. Range of 5 to 100 nanoseconds.""", + "anneal-duration-dropdown": f"""Duration of the quantum anneal. Range of 5 to 100 nanoseconds.""", "spins": f"""Number of spins in the 1D ring.""", "coupling_strength": f"""Coupling strength between spins in the ring. Range of -2 (ferromagnetic) to +1 (anti-ferromagnetic). From 8a88a8408cb38288ed0aab1fa6e6cca24ef72b82 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 17 Jan 2025 10:22:19 -0800 Subject: [PATCH 156/170] Update README, remove navbar Container --- README.md | 61 ++++++++++++++++++----------------------------- app.py | 52 +++++++++++++++++++--------------------- assets/custom.css | 7 ++++++ 3 files changed, 55 insertions(+), 65 deletions(-) diff --git a/README.md b/README.md index 031c98b..4002368 100644 --- a/README.md +++ b/README.md @@ -98,40 +98,27 @@ average length increases as a function of the square root of the anneal time. ## Installation -You can run this example without installation in cloud-based IDEs that support -the [Development Containers specification](https://containers.dev/supporting) -(aka "devcontainers"). +You can run this example without installation in cloud-based IDEs that support the +[Development Containers specification](https://containers.dev/supporting) (aka "devcontainers") +such as GitHub Codespaces. -For development environments that do not support ``devcontainers``, install -requirements: +For development environments that do not support `devcontainers`, install requirements: - pip install -r requirements.txt +```bash +pip install -r requirements.txt +``` -If you are cloning the repo to your local system, working in a -[virtual environment](https://docs.python.org/3/library/venv.html) is -recommended. +If you are cloning the repo to your local system, working in a +[virtual environment](https://docs.python.org/3/library/venv.html) is recommended. ## Usage -Your development environment should be configured to -[access Leap’s Solvers](https://docs.ocean.dwavesys.com/en/stable/overview/sapi.html). -You can see information about supported IDEs and authorizing access to your -Leap account [here](https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html). +Your development environment should be configured to access the +[Leap™ quantum cloud service](https://docs.ocean.dwavesys.com/en/stable/overview/sapi.html). +You can see information about supported IDEs and authorizing access to your Leap account +[here](https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html). -The default configuration uses `DWaveSampler` with specific models accessed through the Leap API. -To run experiments using `MockDKibbleZurekSampler` locally, set the environment variable in your -terminal before running the application. - -**Windows terminal**: -``` -set ZNE=YES -``` -**Unix terminal**: -``` -export ZNE=YES -``` - -To run the demo: +Run the following terminal command to start the Dash application: ```bash python app.py @@ -139,9 +126,10 @@ python app.py Access the user interface with your browser at http://127.0.0.1:8050/. -The demo program opens an interface where you can configure -problems, submit these problems to a quantum computer, and compare the results -to the Kibble-Zurek predictions. +The demo program opens an interface where you can configure problems and submit these problems to +a solver. + +Configuration options can be found in the [demo_configs.py](demo_configs.py) file. *Hover over an input field to see a description of the input and its range of* *supported values.* @@ -208,15 +196,12 @@ the kink density away from the predicted value. ## Zero-Noise Extrapolation -Another feature showcased in this demo is the result achieved in Quantum Error Mitigation. -In [this paper](https://arxiv.org/abs/2311.01306), we demonstrate a practical implementation -of zero-noise extrapolation as a method of quantum error mitigation specifically used for quantum -annealing. +Zero-Noise Extrapolation (ZNE) is a quantum error mitigation method used for quantum annealing +as described in this [paper](https://arxiv.org/abs/2311.01306). -For various coupling strengths at the same annealing time, we used a fitting function—quadratic -for the Advantage solver and a multi-polynomial for the MockDwaveSampler to calculate the -theoretical zero-noise point. As the experiment runs for a longer time, we expect this zero-noise -point to follow the same trend as the other data points. +A fitting function—quadratic for the Advantage solver and a multi-polynomial for the +MockDwaveSampler can be used to calculate the theoretical zero-noise point for various coupling +strengths at the same annealing time. Experimental results diff --git a/app.py b/app.py index b4d6b77..eaedbe3 100644 --- a/app.py +++ b/app.py @@ -76,34 +76,32 @@ # Define the Navbar with two tabs navbar = dbc.Navbar( - dbc.Container( - [ - # Navbar Brand/Logo - dbc.NavbarBrand( - [ - html.Img( - src=THUMBNAIL, - height="30px", - style={"marginRight": "10px"}, - ), - ], - ), - # Navbar Tabs - dbc.Nav( - [ - dbc.NavItem( - dbc.NavLink( - problem_type.label, - id={"type": "problem-type", "index": index}, - active="exact", - ) + [ + # Navbar Brand/Logo + dbc.NavbarBrand( + [ + html.Img( + src=THUMBNAIL, + height="30px", + style={"marginRight": "10px"}, + ), + ], + ), + # Navbar Tabs + dbc.Nav( + [ + dbc.NavItem( + dbc.NavLink( + problem_type.label, + id={"type": "problem-type", "index": index}, + active="exact", ) - for index, problem_type in enumerate(ProblemType) - ], - pills=True, - ), - ] - ), + ) + for index, problem_type in enumerate(ProblemType) + ], + pills=True, + ), + ], color="dark", dark=True, sticky="top", diff --git a/assets/custom.css b/assets/custom.css index e870dc4..dcacef3 100644 --- a/assets/custom.css +++ b/assets/custom.css @@ -73,3 +73,10 @@ p { margin-top: 10px; margin-left: 20px; } + +.navbar { + display: flex; + justify-content: space-between; + padding-left: 20px; + padding-right: 20px; +} From 7423be0c5106f83885b48b985fe0f878b2f687b4 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 17 Jan 2025 13:12:11 -0800 Subject: [PATCH 157/170] Fix plot bugs --- helpers/plots.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 55be7e4..6886dd1 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -639,7 +639,7 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_ y_func_x = fitted_function( x, y, - method="mixture_of_exponentials" if len(np.unique(x)) > 1 else "pure_quadratic", + method="mixture_of_exponentials" if qpu_name == "Diffusion [Classical]" else "pure_quadratic", ) if y_func_x is not None: @@ -653,8 +653,8 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_ fig.data = [ trace for trace in fig.data - if ( - trace.name not in ["Fitting Curve", "ZNE Estimate"] + if not ( + trace.name in ["Fitting Curve", "ZNE Estimate"] and trace.legendgroup == f"ta_{ta_str}" ) ] From 024e290cd0d2a2672d34df8871294d70b884beef Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 17 Jan 2025 14:36:27 -0800 Subject: [PATCH 158/170] Fix typos, code clean up --- helpers/plots.py | 27 ++++++++++----------------- mock_kz_sampler.py | 2 +- requirements.txt | 2 +- tests/test_cb_graph_kink_density.py | 28 ++++++++++++++-------------- tests/test_cb_submit_job.py | 18 +++++++++--------- 5 files changed, 35 insertions(+), 42 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 6886dd1..edce6c5 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -118,7 +118,7 @@ def plot_kink_densities_bg( x=np.asarray(time_range), y=np.asarray(1.1 * n), mode="lines", - name="Predicted (±10%)", + name="Predicted (±10%)", xaxis="x1", yaxis="y1", line_color="black", @@ -169,7 +169,7 @@ def plot_kink_densities_bg( ) x_axis1 = dict( - title="Quench Duration [ns]", + title="Quench Duration [ns]", type="log", range=[np.log10(time_range[0] - 1), np.log10(time_range[1] + 10)], ) @@ -178,7 +178,7 @@ def plot_kink_densities_bg( y_max = (1.1 * n).max() y_axis1 = dict( - title="Kink Density", + title="Kink Density", type="log", range=[np.log10(y_min), np.log10(y_max)], ) @@ -201,7 +201,7 @@ def plot_kink_densities_bg( type="linear", ) - x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[-1, 4]) + x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[-1, 4]) if display == "kink_density": fig_layout = go.Layout( @@ -224,11 +224,8 @@ def plot_kink_densities_bg( _J = point["coupling_strength"] color = coupling_color_theme[_J] - if not _coupling_label[_J]: - legend = True - _coupling_label[_J] = True - else: - legend = False + legend = not _coupling_label[_J] + _coupling_label[_J] = True kink_density = point["kink_density"] @@ -440,7 +437,6 @@ def plot_kink_density( if display == "coupling": color = ta_color_theme[ta_value] - # kappa = -1.8 / J fig.add_trace( go.Scatter( x=[lambda_], @@ -463,11 +459,8 @@ def plot_kink_density( color = coupling_color_theme[J] if display == "kink_density" else "black" - if not coupling_label[J]: - legend = True - coupling_label[J] = True - else: - legend = False + legend = not coupling_label[J] + coupling_label[J] = True fig.add_trace( go.Scatter( @@ -642,7 +635,7 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_ method="mixture_of_exponentials" if qpu_name == "Diffusion [Classical]" else "pure_quadratic", ) - if y_func_x is not None: + if y_func_x: zne_estimates[ta_str] = y_func_x(0) x_fit = np.linspace(0, max(x), 100) y_fit = y_func_x(x_fit) @@ -659,7 +652,7 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_ ) ] - if zne_graph_display == "coupling" and y_func_x is not None: + if zne_graph_display == "coupling" and y_func_x: x_axis = "x3" y_axis = "y1" x_zne = 0 diff --git a/mock_kz_sampler.py b/mock_kz_sampler.py index e4b6ee9..1654a3d 100644 --- a/mock_kz_sampler.py +++ b/mock_kz_sampler.py @@ -59,7 +59,7 @@ def __init__( self.parameters.update({"num_sweeps": []}) def sample(self, bqm, **kwargs): - # TODO: corrupt bqsm with noise proportional to annealing_time + # TODO: corrupt bqm with noise proportional to annealing_time _bqm = bqm.change_vartype("SPIN", inplace=False) # Extract annealing_time from kwargs (if provided) diff --git a/requirements.txt b/requirements.txt index 3a8d491..fa85859 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,4 +5,4 @@ pandas>=2.2.3 # Needed only for unit testing pytest>=8.3.4 -pytest-mock>=3.14.0 \ No newline at end of file +pytest-mock>=3.14.0 diff --git a/tests/test_cb_graph_kink_density.py b/tests/test_cb_graph_kink_density.py index 6bf4030..fa0f44b 100644 --- a/tests/test_cb_graph_kink_density.py +++ b/tests/test_cb_graph_kink_density.py @@ -88,20 +88,20 @@ def run_callback(): ) return display_graphics_kink_density( - None, - kz_graph_display_val, - 2.5, - "FALLBACK_SCHEDULE.csv", - job_submit_state_val, - "1234", - 7, - 5, - 0, - json_embeddings_file, - sample_vs_theory, - {}, - {}, - {"k": []}, + qpu_name=None, + graph_display=kz_graph_display_val, + J=2.5, + schedule_filename="FALLBACK_SCHEDULE.csv", + job_submit_state=job_submit_state_val, + job_id="1234", + ta=7, + spins=5, + problem_type=0, + embeddings_cached=json_embeddings_file, + figure=sample_vs_theory, + coupling_data={}, + zne_estimates={}, + kz_data={"k": []}, ) ctx = copy_context() diff --git a/tests/test_cb_submit_job.py b/tests/test_cb_submit_job.py index 941674c..95bd379 100644 --- a/tests/test_cb_submit_job.py +++ b/tests/test_cb_submit_job.py @@ -75,15 +75,15 @@ def run_callback(): ) return submit_job( - "11:45AM", - "Advantage_system88.4", - 3, - 2.3, - 7, - json_embeddings_file, - 0, - "FALLBACK_SCHEDULE.csv", - False, + job_submit_time="11:45AM", + qpu_name="Advantage_system88.4", + spins=3, + J=2.3, + ta_ns=7, + embeddings_cached=json_embeddings_file, + problem_type=0, + filename="FALLBACK_SCHEDULE.csv", + initial_warning=False, ) ctx = copy_context() From b990c25564711035eb66c6f15166a882e230c84c Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Mon, 20 Jan 2025 11:24:06 -0800 Subject: [PATCH 159/170] Add QPU default --- app.py | 36 +++++++++++++++++------------------ demo_configs.py | 2 ++ helpers/layouts_components.py | 3 +++ 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/app.py b/app.py index eaedbe3..6bcce2e 100644 --- a/app.py +++ b/app.py @@ -314,7 +314,7 @@ def set_schedule(qpu_name): schedule_filename = "FALLBACK_SCHEDULE.csv" schedule_filename_style = {"color": "#FFA143", "fontSize": 12} - if ctx.triggered_id: + if qpu_name: for filename in [file for file in os.listdir("helpers") if "schedule.csv" in file.lower()]: if qpu_name.split(".")[0] in filename: # Accepts & reddens older versions @@ -329,35 +329,33 @@ def set_schedule(qpu_name): @app.callback( Output("embeddings_cached", "data"), Output("embedding_is_cached", "children"), - inputs=[ - Input("qpu_selection", "value"), - ], - prevent_initial_call=True, + Input("qpu_selection", "value"), ) def load_cached_embeddings(qpu_name): """Cache embeddings for the selected QPU.""" embeddings_cached = {} # Wipe out previous QPU's embeddings - for filename in [file for file in os.listdir("helpers") if ".json" in file and "emb_" in file]: + if qpu_name: + for filename in [file for file in os.listdir("helpers") if ".json" in file and "emb_" in file]: - if qpu_name == "Diffusion [Classical]": - qpu_name = "Advantage_system6.4" + if qpu_name == "Diffusion [Classical]": + qpu_name = "Advantage_system6.4" - if qpu_name.split(".")[0] in filename: - with open(f"helpers/{filename}", "r") as fp: - embeddings_cached = json.load(fp) + if qpu_name.split(".")[0] in filename: + with open(f"helpers/{filename}", "r") as fp: + embeddings_cached = json.load(fp) - embeddings_cached = json_to_dict(embeddings_cached) + embeddings_cached = json_to_dict(embeddings_cached) - # Validate that loaded embeddings' edges are still available on the selected QPU - for length in list(embeddings_cached.keys()): - source_graph = dimod.to_networkx_graph(create_bqm(num_spins=length)).edges - target_graph = qpus[qpu_name].edges - emb = embeddings_cached[length] + # Validate that loaded embeddings' edges are still available on the selected QPU + for length in list(embeddings_cached.keys()): + source_graph = dimod.to_networkx_graph(create_bqm(num_spins=length)).edges + target_graph = qpus[qpu_name].edges + emb = embeddings_cached[length] - if not is_valid_embedding(emb, source_graph, target_graph): - del embeddings_cached[length] + if not is_valid_embedding(emb, source_graph, target_graph): + del embeddings_cached[length] return embeddings_cached, ", ".join(str(embedding) for embedding in embeddings_cached.keys()) diff --git a/demo_configs.py b/demo_configs.py index 880a3e5..ba57616 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -32,3 +32,5 @@ USE_CLASSICAL = True J_BASELINE = -1.8 + +DEFAULT_QPU = "Advantage2_prototype2.6" # If not available, the first returned will be default diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 7ceee64..865f302 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -17,6 +17,7 @@ import dash_bootstrap_components as dbc from dash import dcc, html +from demo_configs import DEFAULT_QPU from src.demo_enums import ProblemType __all__ = [ @@ -142,6 +143,8 @@ def config_qpu_selection(solvers): id="qpu_selection", options=[{"label": qpu_name, "value": qpu_name} for qpu_name in solvers], placeholder="Select a quantum computer", + value=DEFAULT_QPU if DEFAULT_QPU in solvers else list(solvers.keys())[0], + clearable=False, ) From 5c005c9d38a85f83c1d890fb389736234ed54c68 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Mon, 20 Jan 2025 15:10:37 -0800 Subject: [PATCH 160/170] Update find_one_to_one_embedding to use find_subgraph --- app.py | 2 +- helpers/qa.py | 16 ++++++---------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/app.py b/app.py index 6bcce2e..23afed8 100644 --- a/app.py +++ b/app.py @@ -134,7 +134,7 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: dcc.Store(id="zne_estimates", data={}), dcc.Store(id="modal_trigger", data=False), dcc.Store(id="initial_warning", data=False), - dcc.Store(id="kz_data", data={}), + dcc.Store(id="kz_data", data={"k": []}), dcc.Store(id="selected-problem"), dcc.Store(id="job_submit_time"), dcc.Store(id="job_id"), diff --git a/helpers/qa.py b/helpers/qa.py index 4fdfbf0..46f0ad2 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -15,12 +15,12 @@ import json import dimod -import minorminer import numpy as np import scipy from dwave.cloud.api import Problems, exceptions from dwave.embedding import unembed_sampleset from numpy.polynomial.polynomial import Polynomial +from minorminer.subgraph import find_subgraph __all__ = [ "create_bqm", @@ -51,26 +51,22 @@ def create_bqm(num_spins=512, coupling_strength=-1.4): return bqm -def find_one_to_one_embedding(spins, sampler_edgelist): +def find_one_to_one_embedding(spins, sampler_edgelist, timeout=60): """ Find an embedding with chains of length one for the ring of spins. Args: spins: Number of spins. sampler_edgelist: Edges (couplers) of the QPU. + timeout: Maximum time allowed for search. Returns: Embedding, as a dict of format {spin: [qubit]}. """ - bqm = create_bqm(spins) + ring_edges = {(i, (i+1) % spins) for i in range(spins)} + emb_1to1 = find_subgraph(ring_edges, sampler_edgelist, timeout=timeout) - for _ in range(5): # 4 out of 5 times will find an embedding - embedding = minorminer.find_embedding(bqm.quadratic, sampler_edgelist) - - if max(len(val) for val in embedding.values()) == 1: - return embedding - - return {} + return {k: (v,) for k, v in emb_1to1.items()} def get_job_status(client, job_id, job_submit_time): From a3fd6f4d5520f8e280c1ab644715117a8ba2a886 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Tue, 21 Jan 2025 10:02:49 -0800 Subject: [PATCH 161/170] Add fit line for 2 points, start x axis at 0 --- app.py | 9 ---- helpers/plots.py | 133 +++++++++++++++++++++++------------------------ 2 files changed, 66 insertions(+), 76 deletions(-) diff --git a/app.py b/app.py index 23afed8..a0ac599 100644 --- a/app.py +++ b/app.py @@ -469,15 +469,6 @@ def display_graphics_kink_density( problem_type=problem_type, ) - if ( - ctx.triggered_id - in ["zne_graph_display", "coupling_strength", "quench_schedule_filename"] - and graph_display == "coupling" - ): - zne_estimates, modal_trigger = plot_zne_fitted_line( - fig, coupling_data, qpu_name, zne_estimates, graph_display, str(ta) - ) - return fig, coupling_data, zne_estimates, False, kz_data if ctx.triggered_id == "job_submit_state": diff --git a/helpers/plots.py b/helpers/plots.py index edce6c5..225d0bd 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -201,7 +201,7 @@ def plot_kink_densities_bg( type="linear", ) - x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[-1, 4]) + x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[0, 4]) if display == "kink_density": fig_layout = go.Layout( @@ -619,75 +619,74 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_ due to ill conditioned data for fitting. """ modal_trigger = False + data_points = coupling_data[ta_str] + x = np.array([point["lambda"] for point in data_points]) + y = np.array([point["kink_density"] for point in data_points]) + + if len(np.unique(x)) < 2: + return zne_estimates, modal_trigger + + # Fit a 1st degree polynomial (linear fit) + # Fancy non-linear function or pure quadratic (see paper) # y = a + b x^2 + y_func_x = fitted_function( + x, + y, + method="mixture_of_exponentials" if qpu_name == "Diffusion [Classical]" else "pure_quadratic", + ) - if ta_str in coupling_data.keys() and len(coupling_data[ta_str]) > 2: - data_points = coupling_data[ta_str] - x = np.array([point["lambda"] for point in data_points]) - y = np.array([point["kink_density"] for point in data_points]) - - # Ensure there are enough unique x values for fitting - if len(np.unique(x)) > 1: - # Fit a 1st degree polynomial (linear fit) - # Fancy non-linear function or pure quadratic (see paper) # y = a + b x^2 - y_func_x = fitted_function( - x, - y, - method="mixture_of_exponentials" if qpu_name == "Diffusion [Classical]" else "pure_quadratic", - ) + if y_func_x: + zne_estimates[ta_str] = y_func_x(0) + x_fit = np.linspace(0, max(x), 100) + y_fit = y_func_x(x_fit) + else: + modal_trigger = True + + # Remove existing fitting curve traces and ZNE Estimate traces to prevent duplication + fig.data = [ + trace + for trace in fig.data + if not ( + trace.name in ["Fitting Curve", "ZNE Estimate"] + and trace.legendgroup == f"ta_{ta_str}" + ) + ] - if y_func_x: - zne_estimates[ta_str] = y_func_x(0) - x_fit = np.linspace(0, max(x), 100) - y_fit = y_func_x(x_fit) - else: - modal_trigger = True - - # Remove existing fitting curve traces and ZNE Estimate traces to prevent duplication - fig.data = [ - trace - for trace in fig.data - if not ( - trace.name in ["Fitting Curve", "ZNE Estimate"] - and trace.legendgroup == f"ta_{ta_str}" - ) - ] - - if zne_graph_display == "coupling" and y_func_x: - x_axis = "x3" - y_axis = "y1" - x_zne = 0 - # Add the new fitting curve - fig.add_trace( - go.Scatter( - x=x_fit, - y=y_fit, - mode="lines", - name="Fitting Curve", - legendgroup=f"ta_{ta_str}", - line=dict(color="green", dash="dash"), - showlegend=True, - xaxis=x_axis, - yaxis=y_axis, - ) - ) + if zne_graph_display == "coupling" and y_func_x: + x_axis = "x3" + y_axis = "y1" + x_zne = 0 + # Add the new fitting curve + fig.add_trace( + go.Scatter( + x=x_fit, + y=y_fit, + mode="lines", + name="Fitting Curve", + legendgroup=f"ta_{ta_str}", + line=dict(color="green", dash="dash"), + showlegend=True, + xaxis=x_axis, + yaxis=y_axis, + ) + ) - fig.add_trace( - go.Scatter( - x=[x_zne], - y=[zne_estimates[ta_str]], - mode="markers", - name="ZNE Estimate", - legendgroup=f"ta_{ta_str}", - marker=dict(size=12, color="purple", symbol="diamond"), - showlegend=False, - xaxis=x_axis, - yaxis=y_axis, - ) - ) + fig.add_trace( + go.Scatter( + x=[x_zne], + y=[zne_estimates[ta_str]], + mode="markers", + name="ZNE Estimate", + legendgroup=f"ta_{ta_str}", + marker=dict(size=12, color="purple", symbol="diamond"), + showlegend=False, + xaxis=x_axis, + yaxis=y_axis, + ) + ) - else: - x_axis = "x1" - y_axis = "y1" - x_zne = float(ta_str) + else: + x_axis = "x1" + y_axis = "y1" + x_zne = float(ta_str) return zne_estimates, modal_trigger From 3b1c836d5a31c56cc05beeecc760220d0f2ace19 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 24 Jan 2025 15:28:58 -0800 Subject: [PATCH 162/170] Refactor graphing functions, separate noise mitigation graphs --- app.py | 301 +++++++------ assets/custom.css | 17 +- demo_configs.py | 3 + ..._prototype2.6_fast_annealing_schedule.csv} | 0 helpers/kz_calcs.py | 15 +- helpers/layouts_cards.py | 36 +- helpers/layouts_components.py | 143 +++--- helpers/plots.py | 423 +++++++++--------- helpers/tooltips.py | 31 +- tests/test_cb_graph_kink_density.py | 172 +++++-- 10 files changed, 635 insertions(+), 506 deletions(-) rename helpers/{09-1302A-G_Advantage2_prototype2_6_fast_annealing_schedule.csv => 09-1302A-G_Advantage2_prototype2.6_fast_annealing_schedule.csv} (100%) diff --git a/app.py b/app.py index a0ac599..e8440ea 100644 --- a/app.py +++ b/app.py @@ -28,6 +28,7 @@ from dwave.system import DWaveSampler from demo_configs import ( + DEBUG, DESCRIPTION, DESCRIPTION_NM, J_BASELINE, @@ -41,7 +42,7 @@ from helpers.layouts_components import * from helpers.plots import * from helpers.qa import * -from helpers.tooltips import tool_tips_demo1, tool_tips_demo2 +from helpers.tooltips import tool_tips_kz, tool_tips_kz_nm from mock_kz_sampler import MockKibbleZurekSampler from src.demo_enums import ProblemType @@ -68,12 +69,6 @@ topology_type="pegasus", topology_shape=[16] ) -init_job_status = "READY" - -if not client: - client = "dummy" - - # Define the Navbar with two tabs navbar = dbc.Navbar( [ @@ -114,7 +109,7 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: Args: problem_type: Either ProblemType.KZ or ProblemType.KZ_NM. """ - tool_tips = tool_tips_demo1 if problem_type is ProblemType.KZ else tool_tips_demo2 + tool_tips = tool_tips_kz if problem_type is ProblemType.KZ else tool_tips_kz_nm return [ dbc.Tooltip( @@ -129,12 +124,11 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: app.layout = html.Div( [ - dcc.Store(id="coupling_data", data={}), - # store zero noise extrapolation - dcc.Store(id="zne_estimates", data={}), + dcc.Store(id="coupling_data", data={}), # KZ NM plot points + dcc.Store(id="zne_estimates", data={}), # store zero noise extrapolation points dcc.Store(id="modal_trigger", data=False), dcc.Store(id="initial_warning", data=False), - dcc.Store(id="kz_data", data={"k": []}), + dcc.Store(id="kz_data", data=[]), # KZ plot point dcc.Store(id="selected-problem"), dcc.Store(id="job_submit_time"), dcc.Store(id="job_id"), @@ -159,16 +153,14 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: solvers=qpus, init_job_status=init_job_status, ), - *dbc_modal("modal_solver"), + *dbc_modal(), html.Div(tooltips(ProblemType.KZ), id="tooltips"), ], width=4, - style={"minWidth": "30rem"}, ), dbc.Col( # Right: display area - graphs_card(problem_type=ProblemType.KZ), + graphs_card(), width=8, - style={"minWidth": "60rem"}, ), ] ), @@ -213,7 +205,8 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: @dash.callback( Output({"type": "problem-type", "index": ALL}, "className"), Output("selected-problem", "data"), - Output("graph-radio-options", "children"), + Output("kz-graphs", "className"), + Output("kz-nm-graphs", "className"), Output("tooltips", "children"), Output("anneal-duration-dropdown", "children"), Output("coupling-strength-slider", "children"), @@ -252,18 +245,20 @@ def update_selected_problem_type( nav_class_names = [""] * len(problem_options) problem_type_value = ctx.triggered_id["index"] if ctx.triggered_id else ProblemType.KZ.value problem_type = ProblemType(problem_type_value) + isKZ = problem_type is ProblemType.KZ nav_class_names[problem_type_value] = "active" return ( nav_class_names, problem_type_value, - get_kz_graph_radio_options(problem_type), + "" if isKZ else "display-none", + "display-none" if isKZ else "", tooltips(problem_type), get_anneal_duration_setting(problem_type), get_coupling_strength_slider(problem_type), - MAIN_HEADER if problem_type is ProblemType.KZ else MAIN_HEADER_NM, - DESCRIPTION if problem_type is ProblemType.KZ else DESCRIPTION_NM, + MAIN_HEADER if isKZ else MAIN_HEADER_NM, + DESCRIPTION if isKZ else DESCRIPTION_NM, ) @@ -361,147 +356,196 @@ def load_cached_embeddings(qpu_name): @app.callback( - Output("sample_vs_theory", "figure"), - Output("coupling_data", "data"), # store data using dcc - Output("zne_estimates", "data"), # update zne_estimates - Output("modal_trigger", "data"), - Output("kz_data", "data"), + Output("sample-v-theory-graph", "figure", allow_duplicate=True), + Output("kz_data", "data", allow_duplicate=True), inputs=[ - Input("qpu_selection", "value"), - Input("graph_display", "value"), - Input("coupling_strength", "value"), # previously input - Input("quench_schedule_filename", "children"), Input("job_submit_state", "children"), - Input("job_id", "data"), - Input("anneal_duration", "value"), - Input("spins", "value"), - Input("selected-problem", "data"), + State("graph-selection-radio", "value"), + State("coupling_strength", "value"), # previously input + State("job_id", "data"), + State("anneal_duration", "value"), + State("spins", "value"), + State("selected-problem", "data"), State("embeddings_cached", "data"), - State("sample_vs_theory", "figure"), - State("coupling_data", "data"), # access previously stored data - State("zne_estimates", "data"), # Access ZNE estimates + State("sample-v-theory-graph", "figure"), State("kz_data", "data"), # get kibble zurek data point ], + prevent_initial_call=True, ) -def display_graphics_kink_density( - qpu_name, - graph_display, - J, - schedule_filename, +def add_graph_point_kz( job_submit_state, + graph_selection, + J, job_id, ta, spins, problem_type, embeddings_cached, figure, - coupling_data, - zne_estimates, kz_data, ): - """Generate graphics for kink density based on theory and QPU samples.""" - if ctx.triggered_id == "job_submit_state" and job_submit_state != "COMPLETED": + """Add new point to kink density graph when KZ job finishes.""" + if job_submit_state != "COMPLETED" or problem_type is ProblemType.KZ_NM.value: raise PreventUpdate - ta_min = 2 - ta_max = 350 - problem_type = ProblemType(problem_type) - - if problem_type is ProblemType.KZ_NM: - # update the maximum anneal time for zne demo - ta_max = 1500 - - if ctx.triggered_id == "job_submit_state": - embeddings_cached = json_to_dict(embeddings_cached) - - sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) - _, kink_density = kink_stats(sampleset_unembedded, J) - - # Calculate lambda (previously kappa) - # Added _ to avoid keyword restriction - lambda_ = calclambda_( - J=J, qpu_name=qpu_name, schedule_name=schedule_filename, J_baseline=J_BASELINE - ) - - fig = plot_kink_density(graph_display, figure, kink_density, ta, J, lambda_) - - # Initialize the list for this anneal_time if not present - ta_str = str(ta) - if ta_str not in coupling_data: - coupling_data[ta_str] = [] - # Append the new data point - coupling_data[ta_str].append( - { - "lambda": lambda_, - "kink_density": kink_density, - "coupling_strength": J, - } - ) + embeddings_cached = json_to_dict(embeddings_cached) + sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) + _, kink_density = kink_stats(sampleset_unembedded, J) - zne_estimates, modal_trigger = plot_zne_fitted_line( - fig, coupling_data, qpu_name, zne_estimates, graph_display, ta_str - ) + # Append the new data point + kz_data.append((kink_density, ta)) - if graph_display == "kink_density": - fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], - J_BASELINE, - schedule_filename, - coupling_data, - zne_estimates, - problem_type=problem_type, - ) + fig = dash.no_update if graph_selection == "schedule" else plot_kink_density( + graph_selection, figure, kink_density, ta, J, problem_type=ProblemType.KZ + ) + return fig, kz_data - return fig, coupling_data, zne_estimates, modal_trigger, kz_data - if ctx.triggered_id == "qpu_selection" or ctx.triggered_id == "spins": - coupling_data = {} - zne_estimates = {} +@app.callback( + Output("kink-v-noise-graph", "figure", allow_duplicate=True), + Output("kink-v-anneal-graph", "figure", allow_duplicate=True), + Output("coupling_data", "data", allow_duplicate=True), # store data using dcc + Output("zne_estimates", "data", allow_duplicate=True), # update zne_estimates + Output("modal_trigger", "data"), + inputs=[ + Input("job_submit_state", "children"), + State("qpu_selection", "value"), + State("coupling_strength", "value"), # previously input + State("quench_schedule_filename", "children"), + State("job_id", "data"), + State("anneal_duration", "value"), + State("spins", "value"), + State("selected-problem", "data"), + State("embeddings_cached", "data"), + State("kink-v-noise-graph", "figure"), + State("kink-v-anneal-graph", "figure"), + State("coupling_data", "data"), # access previously stored data + State("zne_estimates", "data"), # Access ZNE estimates + ], + prevent_initial_call=True, +) +def add_graph_point_kz_nm( + job_submit_state, + qpu_name, + J, + schedule_filename, + job_id, + ta, + spins, + problem_type, + embeddings_cached, + figure_noise, + figure_anneal, + coupling_data, + zne_estimates, +): + """Add new point to Noise Ratio and Annealing Duration graphs when KZ Noise Mitigation job + finishes.""" + if job_submit_state != "COMPLETED" or problem_type is ProblemType.KZ.value: + raise PreventUpdate - fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], - J_BASELINE, - schedule_filename, - coupling_data, - zne_estimates, - problem_type=problem_type, - ) + embeddings_cached = json_to_dict(embeddings_cached) + sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) + _, kink_density = kink_stats(sampleset_unembedded, J) + + # Calculate lambda (previously kappa) + # Added _ to avoid keyword restriction + lambda_ = calclambda_(J=J, qpu_name=qpu_name, schedule_name=schedule_filename) + + fig_noise = plot_kink_density("coupling", figure_noise, kink_density, ta, J, lambda_) + fig_anneal = plot_kink_density("kink_density", figure_anneal, kink_density, ta, J, lambda_) + + # Initialize the list for this anneal_time if not present + ta_str = str(ta) + if ta_str not in coupling_data: + coupling_data[ta_str] = [] + + # Append the new data point + coupling_data[ta_str].append( + { + "lambda": lambda_, + "kink_density": kink_density, + "coupling_strength": J, + } + ) - return fig, coupling_data, zne_estimates, False, kz_data + zne_estimates, modal_trigger = plot_zne_fitted_line( + fig_noise, coupling_data, qpu_name, zne_estimates, ta_str + ) + fig_anneal = plot_ze_estimates(fig_anneal, zne_estimates) - if ctx.triggered_id == "job_submit_state": - embeddings_cached = json_to_dict(embeddings_cached) + return fig_noise, fig_anneal, coupling_data, zne_estimates, modal_trigger - sampleset_unembedded = get_samples(client, job_id, spins, J, embeddings_cached[spins]) - _, kink_density = kink_stats(sampleset_unembedded, J) - # Append the new data point - kz_data["k"].append((kink_density, ta)) - fig = plot_kink_density( - graph_display, figure, kink_density, ta, J, problem_type=problem_type - ) - return fig, coupling_data, zne_estimates, False, kz_data +@app.callback( + Output("sample-v-theory-graph", "figure"), + Output("kz_data", "data"), + inputs=[ + Input("selected-problem", "data"), + Input("graph-selection-radio", "value"), + Input("qpu_selection", "value"), + Input("coupling_strength", "value"), # previously input + Input("spins", "value"), + Input("anneal_duration", "value"), + State("quench_schedule_filename", "children"), + State("kz_data", "data"), # get kibble zurek data point + ], +) +def load_new_graph_kz( + problem_type, + graph_selection, + qpu_name, + J, + spins, + ta, + schedule_filename, + kz_data, +): + """Initiates graphics for kink density based on theory and QPU samples on page load and when + when settings change.""" + if problem_type is ProblemType.KZ_NM.value: + raise PreventUpdate if ctx.triggered_id in ["qpu_selection", "spins", "coupling_strength"]: - kz_data = {"k": []} + kz_data = [] fig = plot_kink_densities_bg( - graph_display, - [ta_min, ta_max], + graph_selection, + [2, 350], J, schedule_filename, - coupling_data, - zne_estimates, kz_data, - problem_type=problem_type, ) - return fig, coupling_data, zne_estimates, False, kz_data + return fig, kz_data + + +@app.callback( + Output("kink-v-noise-graph", "figure"), + Output("kink-v-anneal-graph", "figure"), + Output("coupling_data", "data"), # store data using dcc + Output("zne_estimates", "data"), # update zne_estimates + inputs=[ + Input("quench_schedule_filename", "children"), + Input("spins", "value"), + ], +) +def load_new_graphs_kz_nm(schedule_filename, spins): + """Initiates KZ Noise Mitigation graphs on page load and when settings change.""" + time_range = [2, 1500] + + if not schedule_filename: + schedule_filename = "FALLBACK_SCHEDULE.csv" + + n = theoretical_kink_density(time_range, J_BASELINE, schedule_filename) + + fig_noise = kink_v_noise_init_graph(n) + fig_anneal = kink_v_anneal_init_graph(time_range, n) + + return fig_noise, fig_anneal, {}, {} @app.callback( - Output("spin_orientation", "figure"), + Output("spin-orientation-graph", "figure"), inputs=[ Input("spins", "value"), Input("job_submit_state", "children"), @@ -593,13 +637,13 @@ def submit_job( # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes, # shouldn't change for MockSampler. In which case recalculate as - # ta_multiplier=calclambda_(coupling_strength, schedule, J_baseline=-1.8) as a function of the + # ta_multiplier=calclambda_(coupling_strength, schedule) as a function of the # correct schedule # State("ta_multiplier", "value") ? Should recalculate when J or schedule changes IFF noise mitigation tab? ta_multiplier = 1 if problem_type is ProblemType.KZ_NM.value: - ta_multiplier = calclambda_(J, schedule_name=filename, J_baseline=J_BASELINE) + ta_multiplier = calclambda_(J, schedule_name=filename) computation = solver.sample_bqm( bqm=bqm_embedded, @@ -644,7 +688,7 @@ def run_button_click( spins, qpu_name, ) -> RunButtonClickReturn: - """Manage simulation: embedding, job submission.""" + """Start simulation run when button is clicked.""" if qpu_name == "Diffusion [Classical]": return RunButtonClickReturn( job_submit_state="SUBMITTED", @@ -705,7 +749,6 @@ def simulate( """Manage simulation: embedding, job submission.""" if job_submit_state == "EMBEDDING": - try: embedding = find_one_to_one_embedding(spins, qpus[qpu_name].edges) if embedding: @@ -779,4 +822,4 @@ def toggle_modal(trigger, is_open): if __name__ == "__main__": - app.run_server(debug=True) + app.run_server(debug=DEBUG) diff --git a/assets/custom.css b/assets/custom.css index dcacef3..05cbdb9 100644 --- a/assets/custom.css +++ b/assets/custom.css @@ -55,9 +55,10 @@ p { } #spins label, -#graph_display label { +#graph-selection-radio label { font-size: 13px; - margin: 0; + margin: 0 0 16px; + color: #2A7DE1; } #spins label { @@ -65,11 +66,11 @@ p { } #spins label:not(:first-child), -#graph_display label:not(:first-child) { +#graph-selection-radio label:not(:first-child) { margin-left: 20px; } -#graph_display { +#graph-selection-radio { margin-top: 10px; margin-left: 20px; } @@ -80,3 +81,11 @@ p { padding-left: 20px; padding-right: 20px; } + +.form-control { + width: auto; +} + +.display-none { + display: none; +} diff --git a/demo_configs.py b/demo_configs.py index ba57616..c6c21c4 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -16,6 +16,8 @@ THUMBNAIL = "assets/dwave_logo.png" +DEBUG = True + APP_TITLE = "Coherent Annealing" MAIN_HEADER = "Coherent Annealing: KZ Simulation" DESCRIPTION = """\ @@ -32,5 +34,6 @@ USE_CLASSICAL = True J_BASELINE = -1.8 +J_OPTIONS = [-1.8, -1.6, -1.4, -1.2, -1, -0.9, -0.8, -0.7] DEFAULT_QPU = "Advantage2_prototype2.6" # If not available, the first returned will be default diff --git a/helpers/09-1302A-G_Advantage2_prototype2_6_fast_annealing_schedule.csv b/helpers/09-1302A-G_Advantage2_prototype2.6_fast_annealing_schedule.csv similarity index 100% rename from helpers/09-1302A-G_Advantage2_prototype2_6_fast_annealing_schedule.csv rename to helpers/09-1302A-G_Advantage2_prototype2.6_fast_annealing_schedule.csv diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 1c6142a..562194a 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from demo_configs import J_BASELINE import numpy as np import pandas as pd @@ -84,27 +85,27 @@ def theoretical_kink_density(annealing_times_ns, J=None, schedule_name=None, b=N return np.power([1e-9 * t * b for t in annealing_times_ns], -0.5) / (2 * np.pi * np.sqrt(2)) -def calc_kappa(J, J_baseline=-1.8): +def calc_kappa(J): """Coupling ratio See "Quantum error mitigation in quantum annealing" usage.""" - return abs(J_baseline / J) + return abs(J_BASELINE / J) -def calclambda_(J, *, qpu_name=None, schedule_name=None, J_baseline=-1.8): - """Time rescaling factor (relative to J_baseline) +def calclambda_(J, *, qpu_name=None, schedule_name=None): + """Time rescaling factor (relative to J_BASELINE) Rate through the transition is modified non-linearly by the - rescaling of J. If |J| is smaller than |J_baseline| we effectively move + rescaling of J. If |J| is smaller than |J_BASELINE| we effectively move more slowly through the critical region, the ratio of timescales is > 1. See "Quantum error mitigation in quantum annealing" usage. """ if qpu_name == "Diffusion [Classical]": # Fallback, assume ideal linear schedule - kappa = calc_kappa(J, J_baseline) + kappa = calc_kappa(J, J_BASELINE) return kappa - b_ref = theoretical_kink_density_prefactor(J_baseline, schedule_name) + b_ref = theoretical_kink_density_prefactor(J_BASELINE, schedule_name) b = theoretical_kink_density_prefactor(J, schedule_name) return b_ref / b diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index d410cf5..7b70ccc 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -47,7 +47,7 @@ def control_card(solvers={}, init_job_status="READY"): html.Div(config_spins), html.Label("Coupling Strength (J)"), html.Div(get_coupling_strength_slider(ProblemType.KZ), id="coupling-strength-slider"), - html.Label("Quench Duration [ns]"), + html.Label("Quench/Anneal Duration [ns]"), html.Div(get_anneal_duration_setting(ProblemType.KZ), id="anneal-duration-dropdown"), html.Label("QPU"), html.Div(config_qpu_selection(solvers)), @@ -110,23 +110,29 @@ def control_card(solvers={}, init_job_status="READY"): style={"height": "100%", "minHeight": "50rem"}, ) +def default_graph(title, id, load_radio=False): + return [ + html.H3(title), + html.Div(get_graph_radio_options(), id="graph-radio-options") if load_radio else "", + dcc.Graph( + id=f"{id}-graph", + figure=go.Figure(), + style={"height": "40vh", "minHeight": "20rem"}, + ), + ] -def graphs_card(problem_type=ProblemType.KZ): + +def graphs_card(): return dbc.Card( [ - html.H3("Spin States of Qubits in a 1D Ring"), - dcc.Graph( - id="spin_orientation", - figure=go.Figure(), - style={"height": "40vh", "minHeight": "20rem"}, - ), - html.H3("QPU Samples Vs. Kibble-Zurek Prediction"), - html.Div(get_kz_graph_radio_options(problem_type), id="graph-radio-options"), - dcc.Graph( - id="sample_vs_theory", - figure=go.Figure(), - style={"height": "40vh", "minHeight": "20rem"}, - ), + html.Div([ + *default_graph("Kink Density vs Noise Ratio", "kink-v-noise"), + *default_graph("Kink Density vs Annealing Duration", "kink-v-anneal"), + ], id="kz-nm-graphs", className="display-none"), + html.Div([ + *default_graph("Spin States of Qubits in a 1D Ring", "spin-orientation"), + *default_graph("QPU Samples vs Kibble-Zurek Prediction", "sample-v-theory", True), + ], id="kz-graphs"), ], color="white", style={"height": "100%", "minHeight": "50rem"}, diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index 865f302..a9808e1 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -17,12 +17,12 @@ import dash_bootstrap_components as dbc from dash import dcc, html -from demo_configs import DEFAULT_QPU +from demo_configs import DEFAULT_QPU, J_OPTIONS from src.demo_enums import ProblemType __all__ = [ "get_anneal_duration_setting", - "get_kz_graph_radio_options", + "get_graph_radio_options", "config_spins", "get_coupling_strength_slider", "config_qpu_selection", @@ -50,7 +50,6 @@ def get_anneal_duration_setting(problem_type): {"label": "1280 ns", "value": 1280}, ], value=80, # default value - style={"maxWidth": "95%"}, clearable=False, ) @@ -61,28 +60,16 @@ def get_anneal_duration_setting(problem_type): max=100, step=1, value=7, - style={"maxWidth": "95%"}, ) -def get_kz_graph_radio_options(problem_type): - if problem_type is ProblemType.KZ_NM: - return dcc.RadioItems( - id="graph_display", - options=[ - {"label": "Kink density vs Anneal time", "value": "kink_density"}, - {"label": "Kink density vs Noise level", "value": "coupling"}, - ], - value="coupling", - inputStyle={"marginRight": "10px"}, - inline=True, - ) +def get_graph_radio_options(): return dcc.RadioItems( - id="graph_display", + id="graph-selection-radio", options=[ {"label": "Both", "value": "both"}, - {"label": "Kink density", "value": "kink_density"}, + {"label": "Kink Density", "value": "kink_density"}, {"label": "Schedule", "value": "schedule"}, ], value="both", @@ -99,38 +86,24 @@ def get_kz_graph_radio_options(problem_type): inline=True, ) -j_marks = { - round(0.1 * val) if val % 10 == 0 else round(0.1 * val, 1): ( - {"label": f"{round(0.1*val)}", "style": {"color": "white"}} - if val % 10 == 0 - else {"label": f"{round(0.1*val, 1)}", "style": {"color": "white"}} - ) - for val in chain(range(-20, 0, 2), range(2, 12, 2)) -} - - def get_coupling_strength_slider(problem_type): + if problem_type is ProblemType.KZ_NM: - return html.Div( - [ - dcc.Slider( - id="coupling_strength", - value=-1.8, - marks=j_marks, - min=-1.8, - max=-0.6, - step=None, - tooltip={"placement": "bottom", "always_visible": True}, - ) - ] - ) + marks = J_OPTIONS + value = -1.8 + else: + marks = [ + round(0.1 * val) if val % 10 == 0 else round(0.1 * val, 1) + for val in chain(range(-20, 0, 2), range(2, 12, 2)) + ] + value = -1.4 return html.Div( [ dcc.Slider( id="coupling_strength", - value=-1.4, - marks=j_marks, + value=value, + marks={mark: f"{mark}" for mark in marks}, step=None, tooltip={"placement": "bottom", "always_visible": True}, ) @@ -154,64 +127,60 @@ def config_qpu_selection(solvers): "NO SOLVER": [100, "danger"], "SUBMITTED": [40, "info"], "PENDING": [60, "primary"], - "IN_PROGRESS": [85, "dark"], + "IN_PROGRESS": [85, "#2A7DE1"], "COMPLETED": [100, "success"], "CANCELLED": [100, "light"], "FAILED": [100, "danger"], } -modal_texts = { - "solver": [ - "Leap's Quantum Computers Inaccessible", - [ - html.Div( - [ - html.Div("Could not connect to a Leap quantum computer."), - html.Div( - [ - """ - If you are running locally, set environment variables or a - dwave-cloud-client configuration file as described in the - """, - dcc.Link( - children=[html.Div(" Ocean")], - href="https://docs.ocean.dwavesys.com/en/stable/overview/sapi.html", - style={"display": "inline-block"}, - ), - "documentation.", - ], - style={"display": "inline-block"}, - ), - html.Div( - [ - "If you are running in an online IDE, see the ", - dcc.Link( - children=[html.Div("system documentation")], - href="https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html", - style={"display": "inline-block"}, - ), - " on supported IDEs.", - ], - style={"display": "inline-block"}, - ), - ] - ) - ], +model_contents = [ + "Leap's Quantum Computers Inaccessible", + [ + html.Div( + [ + html.Div("Could not connect to a Leap quantum computer."), + html.Div( + [ + """ + If you are running locally, set environment variables or a + dwave-cloud-client configuration file as described in the + """, + dcc.Link( + children=[html.Div(" Ocean")], + href="https://docs.ocean.dwavesys.com/en/stable/overview/sapi.html", + style={"display": "inline-block"}, + ), + "documentation.", + ], + style={"display": "inline-block"}, + ), + html.Div( + [ + "If you are running in an online IDE, see the ", + dcc.Link( + children=[html.Div("system documentation")], + href="https://docs.dwavesys.com/docs/latest/doc_leap_dev_env.html", + style={"display": "inline-block"}, + ), + " on supported IDEs.", + ], + style={"display": "inline-block"}, + ), + ] + ) ], -} +] -def dbc_modal(name): - name = name.split("_")[1] +def dbc_modal(): return [ html.Div( [ dbc.Modal( [ - dbc.ModalHeader(dbc.ModalTitle(modal_texts[name][0])), - dbc.ModalBody(modal_texts[name][1]), + dbc.ModalHeader(dbc.ModalTitle(model_contents[0])), + dbc.ModalBody(model_contents[1]), ], - id=f"{name}_modal", size="sm", ) ] diff --git a/helpers/plots.py b/helpers/plots.py index 225d0bd..bbdeb8f 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -12,51 +12,144 @@ # See the License for the specific language governing permissions and # limitations under the License. +from demo_configs import J_OPTIONS import numpy as np import pandas as pd import plotly.graph_objects as go -from dash.exceptions import PreventUpdate from helpers.kz_calcs import theoretical_kink_density from helpers.qa import fitted_function from src.demo_enums import ProblemType __all__ = [ + "kink_v_anneal_init_graph", + "kink_v_noise_init_graph", "plot_kink_densities_bg", "plot_kink_density", "plot_spin_orientation", "plot_zne_fitted_line", + "plot_ze_estimates", ] ta_color_theme = { 5: "#1F77B4", # Dark Blue 10: "#FF7F0E", # Dark Orange 20: "#2CA02C", # Dark Green - 40: "#D62728", # Dark Red + 40: "#949494", # Grey 80: "#9467BD", # Dark Purple 160: "#8C564B", # Brown 320: "#E377C2", # Dark Pink 640: "#17BECF", # Teal 1280: "#BCBD22", # Olive Green } -coupling_color_theme = { - -1.8: "#1F77B4", # Dark Blue - -1.6: "#FF7F0E", # Dark Orange - -1.4: "#E377C2", # Dark Pink - -1.2: "#2CA02C", # Dark Green - -1: "#D62728", # Dark Red - -0.8: "#9467BD", # Dark Purple - -0.6: "#8C564B", # Brown -} -coupling_label = { - -1.8: False, - -1.6: False, - -1.4: False, - -1.2: False, - -1: False, - -0.8: False, - -0.6: False, -} + +palette = [ + "#1F77B4", # Dark Blue + "#FF7F0E", # Dark Orange + "#E377C2", # Dark Pink + "#2CA02C", # Dark Green + "#949494", # Grey + "#9467BD", # Dark Purple + "#8C564B", # Brown + "#17BECF", # Teal +] * (len(J_OPTIONS) // 8 + 1) + +coupling_color_theme = {j: palette[i] for i, j in enumerate(J_OPTIONS)} +coupling_label = {j: False for j in J_OPTIONS} + +def add_conherent_thermalized_labels(fig, time_range, n): + """Adds Conherent and Thermalized annotations to a Plotly fig.""" + fig.add_annotation( + xref="x", + yref="y", + x=np.log10(0.25 * (time_range[1])), + y=np.log10(1.0 * n.min()), + text="Coherent", + axref="x", + ayref="y", + ax=np.log10(0.50 * (time_range[1])), + ay=np.log10(1.0 * n.min()), + arrowhead=5, + ) + + fig.add_annotation( + xref="x", + yref="y", + x=np.log10(0.5 * (time_range[1])), + y=np.log10(1.2 * n.min()), + text="Thermalized", + axref="x", + ayref="y", + ax=np.log10(0.3 * (time_range[1])), + ay=np.log10(1.2 * n.min()), + arrowhead=5, + ) + + return fig + +def plot_predicted_area(time_range, n): + """Returns predicted area scatter plots.""" + predicted_plus = go.Scatter( + x=np.asarray(time_range), + y=np.asarray(1.1 * n), + mode="lines", + name="Predicted (±10%)", + xaxis="x1", + yaxis="y1", + line_color="black", + line_width=1, + ) + + predicted_minus = go.Scatter( + x=np.asarray(time_range), + y=np.asarray(0.90 * n), + mode="lines", + xaxis="x1", + yaxis="y1", + line_color="black", + line_width=1, + fill="tonexty", + fillcolor="white", + showlegend=False, + ) + return [predicted_plus, predicted_minus] + + +def get_kink_density_axis(n): + """Returns Kink Density y axis label.""" + y_min = (0.9 * n).min() + y_max = (1.1 * n).max() + + return dict( + title="Kink Density", + type="log", + range=[np.log10(y_min), np.log10(y_max)], + ) + + +def plot_ze_estimates(fig, zne_estimates): + """Plots zero noise estimate points.""" + # Remove existing estimates + fig["data"] = tuple( + trace for trace in fig["data"] + if "name" not in trace or trace["name"] != "ZNE Estimate" + ) + + for ta_str, a in zne_estimates.items(): + fig.add_trace( + go.Scatter( + x=[ta_str], + y=[a], + mode="markers", + name="ZNE Estimate", + marker=dict(size=12, color="purple", symbol="diamond"), + showlegend=False, + xaxis="x1", + yaxis="y1", + ) + ) + + return fig def plot_kink_densities_bg( @@ -64,10 +157,7 @@ def plot_kink_densities_bg( time_range, J_base, schedule_name, - coupling_data, - zne_estimates, - kz_data=None, - problem_type=None, + kz_data, ): """ Plot the background of theoretical kink density and QPU energy scales. @@ -75,7 +165,7 @@ def plot_kink_densities_bg( This function generates a Plotly figure that displays the theoretical predictions for kink densities along with QPU energy scales based on the provided anneal schedule. It supports different display modes - such as "both", "kink_density", "schedule", and "coupling". + such as "both", "kink_density", and "schedule". Args: display (str): The type of plot to display. Options are: @@ -88,11 +178,6 @@ def plot_kink_densities_bg( J_base (float): The base coupling strength between spins in the ring. schedule_name (str): The filename of the anneal schedule CSV file. If not provided, a fallback schedule is used. - coupling_data (dict): A dictionary containing coupling-related data - structured as {ta_str: [data_points]}, where each data point - includes "coupling_strength" and "kink_density". - zne_estimates (dict): A dictionary to store Zero-Noise Extrapolation - (ZNE) estimates structured as {ta_str: estimate}. Returns: plotly.graph_objs.Figure: A Plotly figure object containing the @@ -114,30 +199,6 @@ def plot_kink_densities_bg( n = theoretical_kink_density(time_range, J_base, schedule_name) - predicted_plus = go.Scatter( - x=np.asarray(time_range), - y=np.asarray(1.1 * n), - mode="lines", - name="Predicted (±10%)", - xaxis="x1", - yaxis="y1", - line_color="black", - line_width=1, - ) - - predicted_minus = go.Scatter( - x=np.asarray(time_range), - y=np.asarray(0.90 * n), - mode="lines", - xaxis="x1", - yaxis="y1", - line_color="black", - line_width=1, - fill="tonexty", - fillcolor="white", - showlegend=False, - ) - x_axis = "x2" y_axis = "y2" opacity = 0.15 @@ -174,14 +235,7 @@ def plot_kink_densities_bg( range=[np.log10(time_range[0] - 1), np.log10(time_range[1] + 10)], ) - y_min = (0.9 * n).min() - y_max = (1.1 * n).max() - - y_axis1 = dict( - title="Kink Density", - type="log", - range=[np.log10(y_min), np.log10(y_max)], - ) + y_axis1 = get_kink_density_axis(n) x_axis2 = dict( title={ @@ -201,75 +255,13 @@ def plot_kink_densities_bg( type="linear", ) - x_axis3 = dict(title="Noise Level (-1.8/J)", type="linear", range=[0, 4]) + fig_data = plot_predicted_area(time_range, n) if display == "kink_density": fig_layout = go.Layout( xaxis=x_axis1, yaxis=y_axis1, ) - if problem_type is ProblemType.KZ_NM: - _coupling_label = { - -1.8: False, - -1.6: False, - -1.4: False, - -1.2: False, - -1: False, - -0.8: False, - -0.6: False, - } - fig_data = [predicted_plus, predicted_minus] - for ta_str, data_points in coupling_data.items(): - for point in data_points: - _J = point["coupling_strength"] - color = coupling_color_theme[_J] - - legend = not _coupling_label[_J] - _coupling_label[_J] = True - - kink_density = point["kink_density"] - - fig_data.append( - go.Scatter( - x=[ta_str], - y=[kink_density], - xaxis="x1", - yaxis="y1", - mode="markers", - name=f"Coupling Strength: {_J}", - showlegend=legend, - marker=dict(size=10, color=color, symbol="x"), - ) - ) - # Plot ZNE estimates - for ta_str, a in zne_estimates.items(): - fig_data.append( - go.Scatter( - x=[ta_str], - y=[a], - mode="markers", - name="ZNE Estimate", - marker=dict(size=12, color="purple", symbol="diamond"), - showlegend=False, - xaxis="x1", - yaxis="y1", - ) - ) - else: - fig_data = [predicted_plus, predicted_minus] - if "k" in kz_data: - for pair in kz_data["k"]: - fig_data.append( - go.Scatter( - x=[pair[1]], - y=[pair[0]], - mode="markers", - marker=dict(size=10, color="black", symbol="x"), - xaxis="x1", - yaxis="y1", - showlegend=False, - ) - ) elif display == "schedule": fig_layout = go.Layout( @@ -279,48 +271,6 @@ def plot_kink_densities_bg( fig_data = [energy_transverse, energy_problem] - elif display == "coupling": - fig_layout = go.Layout( - xaxis3=x_axis3, - yaxis1=y_axis1, - ) - - fig_data = [] - - # Plot data points from 'coupling_data' - for ta_str, data_points in coupling_data.items(): - label = False - ta_value = float(ta_str) - color = ta_color_theme[ta_value] - for point in data_points: - lambda_ = point["lambda"] - kink_density = point["kink_density"] - if not label: - fig_data.append( - go.Scatter( - x=[lambda_], - y=[kink_density], - xaxis="x3", - yaxis="y1", - mode="markers", - name=f"Anneal Time: {ta_value} ns", - showlegend=True, - marker=dict(size=10, color=color, symbol="x"), - ) - ) - label = True - else: - fig_data.append( - go.Scatter( - x=[lambda_], - y=[kink_density], - xaxis="x3", - yaxis="y1", - showlegend=False, - marker=dict(size=10, color=color, symbol="x"), - ) - ) - else: # Display both plots together x_axis2.update({"overlaying": "x1"}) y_axis2.update({"overlaying": "y1"}) @@ -332,53 +282,89 @@ def plot_kink_densities_bg( yaxis2=y_axis2, ) - fig_data = [predicted_plus, predicted_minus, energy_transverse, energy_problem] + fig_data.extend([energy_transverse, energy_problem]) + if display != "schedule": # Add previously computed kz_data points - if kz_data and "k" in kz_data: - for pair in kz_data["k"]: - fig_data.append( - go.Scatter( - x=[pair[1]], - y=[pair[0]], - mode="markers", - marker=dict(size=10, color="black", symbol="x"), - xaxis="x1", - yaxis="y1", - showlegend=False, - ) + for pair in kz_data: + fig_data.append( + go.Scatter( + x=[pair[1]], + y=[pair[0]], + mode="markers", + marker=dict(size=10, color="black", symbol="x"), + xaxis="x1", + yaxis="y1", + showlegend=False, ) + ) fig = go.Figure(data=fig_data, layout=fig_layout) fig.update_layout(legend=dict(x=0.1, y=0.1), margin=dict(b=5, l=5, r=20, t=10)) - if display != "schedule" and display != "coupling": - fig.add_annotation( - xref="x", - yref="y", - x=np.log10(0.25 * (time_range[1])), - y=np.log10(1.0 * n.min()), - text="Coherent", - axref="x", - ayref="y", - ax=np.log10(0.50 * (time_range[1])), - ay=np.log10(1.0 * n.min()), - arrowhead=5, - ) + if display != "schedule": + add_conherent_thermalized_labels(fig, time_range, n) - fig.add_annotation( - xref="x", - yref="y", - x=np.log10(0.5 * (time_range[1])), - y=np.log10(1.2 * n.min()), - text="Thermalized", - axref="x", - ayref="y", - ax=np.log10(0.3 * (time_range[1])), - ay=np.log10(1.2 * n.min()), - arrowhead=5, - ) + return fig + + +def kink_v_noise_init_graph(n): + """Initiates plot for Kink Density vs Noise Ratio. + + Args: + n: TODO + + Returns: + plotly.graph_objs.Figure: A Plotly figure object. + """ + fig_layout = go.Layout( + xaxis3=dict(title="Noise Ratio (-1.8/J)", type="linear", range=[0, 3]), + yaxis1=get_kink_density_axis(n), + ) + + fig = go.Figure(data=[], layout=fig_layout) + + fig.update_layout( + legend=dict( + yanchor="bottom", + y=0.05, + xanchor="right", + x=0.97 + ), + margin=dict(b=5, l=5, r=20, t=10) + ) + + return fig + + +def kink_v_anneal_init_graph(time_range, n): + """Initiates plot for Kink Density vs Anneal Duration. + + Args: + time_range (list of float): A list containing the minimum and maximum + quench times [min_quench_time, max_quench_time] in nanoseconds. + n: TODO + + Returns: + plotly.graph_objs.Figure: A Plotly figure object. + """ + fig_layout = go.Layout( + xaxis=dict( + title="Quench Duration [ns]", + type="log", + range=[np.log10(time_range[0] - 1), np.log10(time_range[1] + 10)], + ), + yaxis=get_kink_density_axis(n), + ) + + fig_data = plot_predicted_area(time_range, n) + + fig = go.Figure(data=fig_data, layout=fig_layout) + + fig.update_layout(legend=dict(x=0.1, y=0.1), margin=dict(b=5, l=5, r=20, t=10)) + + add_conherent_thermalized_labels(fig, time_range, n) return fig @@ -410,9 +396,6 @@ def plot_kink_density( - If display is "schedule", returns `no_update` indicating no changes. - Otherwise, returns the updated Plotly figure with the new kink density marker. """ - if display == "schedule": - raise PreventUpdate - fig = go.Figure(fig_dict) if problem_type is ProblemType.KZ: @@ -433,17 +416,17 @@ def plot_kink_density( return fig - ta_value = float(anneal_time) - if display == "coupling": - color = ta_color_theme[ta_value] + color = ta_color_theme[anneal_time] fig.add_trace( go.Scatter( x=[lambda_], y=[kink_density], xaxis="x3", yaxis="y1", - showlegend=False, + mode="markers", + name=f"Anneal Time: {anneal_time} ns", + showlegend=True, marker=dict( size=10, color=color, @@ -451,6 +434,14 @@ def plot_kink_density( ), ) ) + + # Remove duplicate legend values + names = set() + fig.for_each_trace( + lambda trace: + trace.update(showlegend=False) + if (trace.name in names) else names.add(trace.name)) + fig.update_layout( xaxis3=fig.layout.xaxis3, yaxis1=fig.layout.yaxis1, @@ -589,7 +580,7 @@ def plot_spin_orientation(num_spins=512, sample=None): return fig -def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_display, ta_str): +def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, ta_str): """ Fit a curve to the coupling data and plot the Zero-Noise Extrapolation (ZNE) estimate. @@ -651,7 +642,7 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_ ) ] - if zne_graph_display == "coupling" and y_func_x: + if y_func_x: x_axis = "x3" y_axis = "y1" x_zne = 0 @@ -663,8 +654,8 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, zne_graph_ mode="lines", name="Fitting Curve", legendgroup=f"ta_{ta_str}", - line=dict(color="green", dash="dash"), - showlegend=True, + line=dict(color=ta_color_theme[int(ta_str)], dash="dash"), + showlegend=False, xaxis=x_axis, yaxis=y_axis, ) diff --git a/helpers/tooltips.py b/helpers/tooltips.py index 858004f..6c3ef23 100644 --- a/helpers/tooltips.py +++ b/helpers/tooltips.py @@ -12,26 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -tool_tips_demo2 = { - "anneal-duration-dropdown": f"""Duration of the quantum anneal. Range of 5 to 1280 nanoseconds.""", - "spins": f"""Number of spins in the 1D ring.""", - "coupling_strength": f"""Coupling strength between spins in the ferromagnetic ring. -Range of -1.8 to -0.6. -""", - "qpu_selection": f"""Selection from quantum computers available to your account/project token.""", - "quench_schedule_filename": f"""The fast-anneal schedule for the selected quantum computer. -If none exists, one from a different quantum computer is used (expect inaccuracies). -""", +tool_tips_kz_nm = { + "coupling_strength": "Coupling strength between spins in the ferromagnetic ring.", + "qpu_selection": "Quantum computers available to your account/project token.", + "quench_schedule_filename": """The fast-anneal schedule for the selected quantum computer. + If none exists, one from a different quantum computer is used (expect inaccuracies).""", } -tool_tips_demo1 = { - "anneal-duration-dropdown": f"""Duration of the quantum anneal. Range of 5 to 100 nanoseconds.""", - "spins": f"""Number of spins in the 1D ring.""", - "coupling_strength": f"""Coupling strength between spins in the ring. -Range of -2 (ferromagnetic) to +1 (anti-ferromagnetic). -""", - "qpu_selection": f"""Selection from quantum computers available to your account/project token.""", - "quench_schedule_filename": f"""The fast-anneal schedule for the selected quantum computer. -If none exists, one from a different quantum computer is used (expect inaccuracies). -""", +tool_tips_kz = { + "coupling_strength": """Coupling strength between spins in the ring. + Range of -2 (ferromagnetic) to +1 (anti-ferromagnetic).""", + "qpu_selection": "Quantum computers available to your account/project token.", + "quench_schedule_filename": """The fast-anneal schedule for the selected quantum computer. + If none exists, one from a different quantum computer is used (expect inaccuracies).""", } diff --git a/tests/test_cb_graph_kink_density.py b/tests/test_cb_graph_kink_density.py index fa0f44b..27c52eb 100644 --- a/tests/test_cb_graph_kink_density.py +++ b/tests/test_cb_graph_kink_density.py @@ -22,7 +22,7 @@ from dash._utils import AttributeDict from dash.exceptions import PreventUpdate -from app import display_graphics_kink_density +from app import add_graph_point_kz, add_graph_point_kz_nm, load_new_graph_kz, load_new_graphs_kz_nm json_embeddings_file = { "512": {"1": [11], "0": [10], "2": [12]}, @@ -42,6 +42,7 @@ ], "layout": { "xaxis": {"anchor": "y", "domain": [0.0, 1.0], "title": {"text": "x"}}, + "xaxis3": {"anchor": "y", "domain": [0.0, 1.0], "title": {"text": "x"}}, "yaxis": {"anchor": "x", "domain": [0.0, 1.0], "title": {"text": "y"}}, }, } @@ -57,21 +58,22 @@ sampleset = dimod.SampleSet.from_samples(samples, "SPIN", 0) parametrize_vals = [ - ("kz_graph_display.value", "both", "dummy"), - ("kz_graph_display.value", "kink_density", "dummy"), - ("kz_graph_display.value", "schedule", "dummy"), - ("coupling_strength.value", "schedule", "dummy"), - ("quench_schedule_filename.children", "schedule", "dummy"), - ("job_submit_state.children", "dummy", "SUBMITTED"), - ("job_submit_state.children", "dummy", "PENDING"), - ("job_submit_state.children", "dummy", "COMPLETED"), + ("kz_graph_display", "both", "", 0), + ("kz_graph_display", "kink_density", "", 0), + ("kz_graph_display", "schedule", "", 0), + ("coupling_strength", "schedule", "", 0), + ("quench_schedule_filename", "schedule", "", 0), + ("job_submit_state", "", "SUBMITTED", 1), + ("job_submit_state", "", "PENDING", 1), + ("job_submit_state", "", "COMPLETED", 0), + ("job_submit_state", "", "COMPLETED", 1), ] @pytest.mark.parametrize( - "trigger_val, kz_graph_display_val, job_submit_state_val", parametrize_vals + "trigger_val, kz_graph_display_val, job_submit_state_val, problem_type", parametrize_vals ) -def test_graph_kink_density(mocker, trigger_val, kz_graph_display_val, job_submit_state_val): +def test_add_graph_point_kz(mocker, trigger_val, kz_graph_display_val, job_submit_state_val, problem_type): """Test graph of kink density.""" mocker.patch("app.get_samples", return_value=sampleset) @@ -87,37 +89,151 @@ def run_callback(): ) ) - return display_graphics_kink_density( - qpu_name=None, - graph_display=kz_graph_display_val, - J=2.5, - schedule_filename="FALLBACK_SCHEDULE.csv", + return add_graph_point_kz( job_submit_state=job_submit_state_val, + graph_selection=kz_graph_display_val, + J=-1.4, job_id="1234", - ta=7, + ta=10, spins=5, - problem_type=0, + problem_type=problem_type, embeddings_cached=json_embeddings_file, figure=sample_vs_theory, + kz_data=[], + ) + + ctx = copy_context() + + if job_submit_state_val == "COMPLETED" and problem_type == 0: + output = ctx.run(run_callback) + + assert type(output[0]) == plotly.graph_objects.Figure + assert output[1][0][1] == 10 + else: + with pytest.raises(PreventUpdate): + ctx.run(run_callback) + + +@pytest.mark.parametrize( + "trigger_val, kz_graph_display_val, job_submit_state_val, problem_type", parametrize_vals +) +def test_add_graph_point_kz_nm(mocker, trigger_val, kz_graph_display_val, job_submit_state_val, problem_type): + """Test graph of kink density.""" + + mocker.patch("app.get_samples", return_value=sampleset) + + def run_callback(): + context_value.set( + AttributeDict( + **{ + "triggered_inputs": [ + {"prop_id": trigger_val}, + ] + } + ) + ) + + return add_graph_point_kz_nm( + job_submit_state=job_submit_state_val, + qpu_name=None, + J=-1.4, + schedule_filename="FALLBACK_SCHEDULE.csv", + job_id="1234", + ta=10, + spins=5, + problem_type=problem_type, + embeddings_cached=json_embeddings_file, + figure_noise=sample_vs_theory, + figure_anneal=sample_vs_theory, coupling_data={}, zne_estimates={}, - kz_data={"k": []}, ) ctx = copy_context() - if ( - trigger_val - in [ - "kz_graph_display.value", - "coupling_strength.value", - "quench_schedule_filename.children", - ] - or job_submit_state_val == "COMPLETED" - ): + if job_submit_state_val == "COMPLETED" and problem_type == 1: + output = ctx.run(run_callback) + + assert type(output[0]) == plotly.graph_objects.Figure + assert type(output[1]) == plotly.graph_objects.Figure + assert "10" in output[2] + assert output[3] == {} + assert output[4] == False + else: + with pytest.raises(PreventUpdate): + ctx.run(run_callback) + + +@pytest.mark.parametrize( + "trigger_val, kz_graph_display_val, job_submit_state_val, problem_type", parametrize_vals +) +def test_load_new_graph_kz(mocker, trigger_val, kz_graph_display_val, job_submit_state_val, problem_type): + """Test graph of kink density.""" + + mocker.patch("app.get_samples", return_value=sampleset) + + def run_callback(): + context_value.set( + AttributeDict( + **{ + "triggered_inputs": [ + {"prop_id": trigger_val}, + ] + } + ) + ) + + return load_new_graph_kz( + problem_type=problem_type, + graph_selection=kz_graph_display_val, + qpu_name=None, + J=-1.4, + spins=5, + ta=10, + schedule_filename="FALLBACK_SCHEDULE.csv", + kz_data=[], + ) + + ctx = copy_context() + + if problem_type == 0: output = ctx.run(run_callback) assert type(output[0]) == plotly.graph_objects.Figure + assert output[1] == [] else: with pytest.raises(PreventUpdate): ctx.run(run_callback) + + +@pytest.mark.parametrize( + "trigger_val, kz_graph_display_val, job_submit_state_val, problem_type", parametrize_vals +) +def test_load_new_graphs_kz_nm(mocker, trigger_val, kz_graph_display_val, job_submit_state_val, problem_type): + """Test graph of kink density.""" + + mocker.patch("app.get_samples", return_value=sampleset) + + def run_callback(): + context_value.set( + AttributeDict( + **{ + "triggered_inputs": [ + {"prop_id": trigger_val}, + ] + } + ) + ) + + return load_new_graphs_kz_nm( + schedule_filename="FALLBACK_SCHEDULE.csv", + spins=5, + ) + + ctx = copy_context() + output = ctx.run(run_callback) + + assert type(output[0]) == plotly.graph_objects.Figure + assert type(output[1]) == plotly.graph_objects.Figure + assert output[2] == {} + assert output[3] == {} From b02fe2208ac2e9c8ecc5598298cb20af0583b93e Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Tue, 28 Jan 2025 11:28:53 -0800 Subject: [PATCH 163/170] Update colors and legend order, fix coupling strength legend bug --- demo_configs.py | 2 +- helpers/plots.py | 49 ++++++++++++++++++++++++------------------------ 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/demo_configs.py b/demo_configs.py index c6c21c4..8c7cca4 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -16,7 +16,7 @@ THUMBNAIL = "assets/dwave_logo.png" -DEBUG = True +DEBUG = False APP_TITLE = "Coherent Annealing" MAIN_HEADER = "Coherent Annealing: KZ Simulation" diff --git a/helpers/plots.py b/helpers/plots.py index bbdeb8f..bf0a322 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -15,6 +15,7 @@ from demo_configs import J_OPTIONS import numpy as np import pandas as pd +import plotly.express as px import plotly.graph_objects as go from helpers.kz_calcs import theoretical_kink_density @@ -31,31 +32,22 @@ "plot_ze_estimates", ] +ta_values = [5, 10, 20, 40, 80, 160, 320, 640, 1280] + +colorscale = "Portland" +colors_ta = px.colors.sample_colorscale( + colorscale, [n / (len(ta_values) - 1) for n in range(len(ta_values))] +) + ta_color_theme = { - 5: "#1F77B4", # Dark Blue - 10: "#FF7F0E", # Dark Orange - 20: "#2CA02C", # Dark Green - 40: "#949494", # Grey - 80: "#9467BD", # Dark Purple - 160: "#8C564B", # Brown - 320: "#E377C2", # Dark Pink - 640: "#17BECF", # Teal - 1280: "#BCBD22", # Olive Green + ta_value: colors_ta[len(ta_values) - i - 1] for i, ta_value in enumerate(ta_values) } -palette = [ - "#1F77B4", # Dark Blue - "#FF7F0E", # Dark Orange - "#E377C2", # Dark Pink - "#2CA02C", # Dark Green - "#949494", # Grey - "#9467BD", # Dark Purple - "#8C564B", # Brown - "#17BECF", # Teal -] * (len(J_OPTIONS) // 8 + 1) +colors_coupling = px.colors.sample_colorscale( + colorscale, [n / (len(J_OPTIONS) - 1) for n in range(len(J_OPTIONS))] +) -coupling_color_theme = {j: palette[i] for i, j in enumerate(J_OPTIONS)} -coupling_label = {j: False for j in J_OPTIONS} +coupling_color_theme = {j: colors_coupling[i] for i, j in enumerate(J_OPTIONS)} def add_conherent_thermalized_labels(fig, time_range, n): """Adds Conherent and Thermalized annotations to a Plotly fig.""" @@ -98,6 +90,7 @@ def plot_predicted_area(time_range, n): yaxis="y1", line_color="black", line_width=1, + legendrank=-1, ) predicted_minus = go.Scatter( @@ -427,6 +420,7 @@ def plot_kink_density( mode="markers", name=f"Anneal Time: {anneal_time} ns", showlegend=True, + legendrank=anneal_time, marker=dict( size=10, color=color, @@ -450,9 +444,6 @@ def plot_kink_density( color = coupling_color_theme[J] if display == "kink_density" else "black" - legend = not coupling_label[J] - coupling_label[J] = True - fig.add_trace( go.Scatter( x=[anneal_time], @@ -461,7 +452,8 @@ def plot_kink_density( yaxis="y1", mode="markers", name=f"Coupling Strength: {J}", - showlegend=legend, + showlegend=True, + legendrank=J_OPTIONS.index(J), marker=dict( size=10, color=color, @@ -470,6 +462,13 @@ def plot_kink_density( ) ) + # Remove duplicate legend values + names = set() + fig.for_each_trace( + lambda trace: + trace.update(showlegend=False) + if (trace.name in names) else names.add(trace.name)) + return fig From 8b61930f92295383140ba119ed20739023428cd4 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Wed, 29 Jan 2025 10:55:14 -0800 Subject: [PATCH 164/170] Remove classical mock sampler --- app.py | 56 +----------------------- demo_configs.py | 1 - helpers/kz_calcs.py | 7 +-- helpers/plots.py | 2 +- helpers/qa.py | 4 -- mock_kz_sampler.py | 81 ----------------------------------- tests/test_cb_simulate.py | 2 +- tests/test_cb_submit_job.py | 3 +- tests/test_mock_kz_sampler.py | 23 ---------- 9 files changed, 5 insertions(+), 174 deletions(-) delete mode 100644 mock_kz_sampler.py delete mode 100644 tests/test_mock_kz_sampler.py diff --git a/app.py b/app.py index e8440ea..bd1c5eb 100644 --- a/app.py +++ b/app.py @@ -35,7 +35,6 @@ MAIN_HEADER, MAIN_HEADER_NM, THUMBNAIL, - USE_CLASSICAL, ) from helpers.kz_calcs import * from helpers.layouts_cards import * @@ -43,7 +42,6 @@ from helpers.plots import * from helpers.qa import * from helpers.tooltips import tool_tips_kz, tool_tips_kz_nm -from mock_kz_sampler import MockKibbleZurekSampler from src.demo_enums import ProblemType app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) @@ -63,11 +61,6 @@ client = None init_job_status = "NO SOLVER" -# Load base coupling strength and user configuration for mock sampler -if USE_CLASSICAL: - qpus["Diffusion [Classical]"] = MockKibbleZurekSampler( - topology_type="pegasus", topology_shape=[16] - ) # Define the Navbar with two tabs navbar = dbc.Navbar( @@ -127,7 +120,6 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: dcc.Store(id="coupling_data", data={}), # KZ NM plot points dcc.Store(id="zne_estimates", data={}), # store zero noise extrapolation points dcc.Store(id="modal_trigger", data=False), - dcc.Store(id="initial_warning", data=False), dcc.Store(id="kz_data", data=[]), # KZ plot point dcc.Store(id="selected-problem"), dcc.Store(id="job_submit_time"), @@ -174,21 +166,6 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: id="error-modal", is_open=False, ), - dbc.Modal( - [ - dbc.ModalHeader( - dbc.ModalTitle( - "Warning", style={"color": "orange", "fontWeight": "bold"} - ) - ), - dbc.ModalBody( - "The Classical [diffusion] option executes a Markov Chain method locally for purposes of testing the demo interface. Kinks diffuse to annihilate, but are also created/destroyed by thermal fluctuations. The number of updates performed is set proportional to the annealing time. In the limit of no thermal noise, kinks diffuse to eliminate producing a power law, this process produces a power-law but for reasons independent of the Kibble-Zurek mechanism. In the noise mitigation demo we fit the impact of thermal fluctuations with a mixture of exponentials, by contrast with the quadratic fit appropriate to quantum dynamics.", - style={"color": "black", "fontSize": "16px"}, - ), - ], - id="warning-modal", - is_open=False, - ), ], fluid=True, ) @@ -333,10 +310,6 @@ def load_cached_embeddings(qpu_name): if qpu_name: for filename in [file for file in os.listdir("helpers") if ".json" in file and "emb_" in file]: - - if qpu_name == "Diffusion [Classical]": - qpu_name = "Advantage_system6.4" - if qpu_name.split(".")[0] in filename: with open(f"helpers/{filename}", "r") as fp: embeddings_cached = json.load(fp) @@ -450,7 +423,7 @@ def add_graph_point_kz_nm( # Calculate lambda (previously kappa) # Added _ to avoid keyword restriction - lambda_ = calclambda_(J=J, qpu_name=qpu_name, schedule_name=schedule_filename) + lambda_ = calclambda_(J=J, schedule_name=schedule_filename) fig_noise = plot_kink_density("coupling", figure_noise, kink_density, ta, J, lambda_) fig_anneal = plot_kink_density("kink_density", figure_anneal, kink_density, ta, J, lambda_) @@ -576,15 +549,11 @@ class SubmitJobReturn(NamedTuple): """Return type for the ``submit_job`` callback function.""" job_id: str = dash.no_update - initial_warning: bool = False - warning_modal_open: bool = False wd_job_n_intervals: int = 0 @app.callback( Output("job_id", "data"), - Output("initial_warning", "data"), - Output("warning-modal", "is_open"), Output("wd_job", "n_intervals"), inputs=[ Input("job_submit_time", "data"), @@ -595,7 +564,6 @@ class SubmitJobReturn(NamedTuple): State("embeddings_cached", "data"), State("selected-problem", "data"), State("quench_schedule_filename", "children"), - State("initial_warning", "data"), ], prevent_initial_call=True, ) @@ -608,7 +576,6 @@ def submit_job( embeddings_cached, problem_type, filename, - initial_warning, ) -> SubmitJobReturn: """Submit job and provide job ID.""" @@ -620,19 +587,6 @@ def submit_job( embedding = embeddings_cached[spins] annealing_time = ta_ns / 1000 - if qpu_name == "Diffusion [Classical]": - bqm_embedded = embed_bqm(bqm, embedding, qpus["Diffusion [Classical]"].adjacency) - - sampleset = qpus["Diffusion [Classical]"].sample( - bqm_embedded, annealing_time=annealing_time - ) - - return SubmitJobReturn( - job_id=json.dumps(sampleset.to_serializable()), - initial_warning=True, - warning_modal_open=not initial_warning, - ) - bqm_embedded = embed_bqm(bqm, embedding, DWaveSampler(solver=solver.name).adjacency) # ta_multiplier should be 1, unless (withNoiseMitigation and [J or schedule]) changes, @@ -678,7 +632,6 @@ class RunButtonClickReturn(NamedTuple): Input("btn_simulate", "n_clicks"), State("embedding_is_cached", "children"), State("spins", "value"), - State("qpu_selection", "value"), ], prevent_initial_call=True, ) @@ -686,15 +639,8 @@ def run_button_click( run_btn_click, cached_embeddings, spins, - qpu_name, ) -> RunButtonClickReturn: """Start simulation run when button is clicked.""" - if qpu_name == "Diffusion [Classical]": - return RunButtonClickReturn( - job_submit_state="SUBMITTED", - job_submit_time="SA", # Hack to fix switch from SA to QPU - ) - if str(spins) in cached_embeddings.split(", "): # If we have a cached embedding return RunButtonClickReturn( job_submit_state="SUBMITTED", diff --git a/demo_configs.py b/demo_configs.py index 8c7cca4..f2dc25e 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -32,7 +32,6 @@ lowest noise level. """ -USE_CLASSICAL = True J_BASELINE = -1.8 J_OPTIONS = [-1.8, -1.6, -1.4, -1.2, -1, -0.9, -0.8, -0.7] diff --git a/helpers/kz_calcs.py b/helpers/kz_calcs.py index 562194a..d5753b1 100644 --- a/helpers/kz_calcs.py +++ b/helpers/kz_calcs.py @@ -92,7 +92,7 @@ def calc_kappa(J): return abs(J_BASELINE / J) -def calclambda_(J, *, qpu_name=None, schedule_name=None): +def calclambda_(J, *, schedule_name=None): """Time rescaling factor (relative to J_BASELINE) Rate through the transition is modified non-linearly by the @@ -100,11 +100,6 @@ def calclambda_(J, *, qpu_name=None, schedule_name=None): more slowly through the critical region, the ratio of timescales is > 1. See "Quantum error mitigation in quantum annealing" usage. """ - if qpu_name == "Diffusion [Classical]": - # Fallback, assume ideal linear schedule - kappa = calc_kappa(J, J_BASELINE) - return kappa - b_ref = theoretical_kink_density_prefactor(J_BASELINE, schedule_name) b = theoretical_kink_density_prefactor(J, schedule_name) diff --git a/helpers/plots.py b/helpers/plots.py index bf0a322..f08ef9e 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -621,7 +621,7 @@ def plot_zne_fitted_line(fig, coupling_data, qpu_name, zne_estimates, ta_str): y_func_x = fitted_function( x, y, - method="mixture_of_exponentials" if qpu_name == "Diffusion [Classical]" else "pure_quadratic", + method="pure_quadratic", ) if y_func_x: diff --git a/helpers/qa.py b/helpers/qa.py index 46f0ad2..ffb668d 100644 --- a/helpers/qa.py +++ b/helpers/qa.py @@ -80,10 +80,6 @@ def get_job_status(client, job_id, job_submit_time): Returns: Embedding, as a dict of format ``{spin: [qubit]}``. """ - - if '"type": "SampleSet"' in job_id and job_submit_time == "SA": - return "COMPLETED" - p = Problems.from_config(client.config) try: diff --git a/mock_kz_sampler.py b/mock_kz_sampler.py deleted file mode 100644 index 1654a3d..0000000 --- a/mock_kz_sampler.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2025 D-Wave -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -from dimod import SampleSet -from dwave.samplers import SimulatedAnnealingSampler -from dwave.system.testing import MockDWaveSampler - - -class MockKibbleZurekSampler(MockDWaveSampler): - """Perform a quench (fixed beta = 1/temperature) evolution. - - The MockSampler is configured to use standard Markov Chain Monte Carlo - with Gibbs acceptance criteria from a random initial condition. - Defects diffuse (power law 1/2) and eliminate, but are also - created by thermal excitations. We will seek to take a limit of high - coupling strength where thermal excitations are removed, leaving only the - diffusion. - """ - - def __init__( - self, - topology_type="pegasus", - topology_shape=[16], - kink_density_limit_absJ1=0.04, - ): - substitute_sampler = SimulatedAnnealingSampler() - # At equilibrium = (t^{L-1} + t)/(1 + t^L), t = -tanh(beta J) - # At large time (equilibrium) for long chains - # lessthansimilarto t, - # At J=-1 we want a kink density to bottom out. Therefore: - beta = np.arctanh(1 - 2 * kink_density_limit_absJ1) - substitute_kwargs = { - "beta_range": [beta, beta], # Quench - "randomize_order": True, - "num_reads": 1000, - "proposal_acceptance_criteria": "Gibbs", - } - super().__init__( - topology_type=topology_type, - topology_shape=topology_shape, - substitute_sampler=substitute_sampler, - substitute_kwargs=substitute_kwargs, - ) - self.sampler_type = "mock" - self.mocked_parameters.add("annealing_time") - self.mocked_parameters.add("num_sweeps") - self.parameters.update({"num_sweeps": []}) - - def sample(self, bqm, **kwargs): - # TODO: corrupt bqm with noise proportional to annealing_time - _bqm = bqm.change_vartype("SPIN", inplace=False) - - # Extract annealing_time from kwargs (if provided) - annealing_time = kwargs.pop("annealing_time", 20) # 20us default. - num_sweeps = int(annealing_time * 1000) # 1000 sweeps per microsecond - - ss = super().sample(bqm=_bqm, num_sweeps=num_sweeps, **kwargs) - - ss.change_vartype(bqm.vartype) # Not required but safe - - ss = SampleSet.from_samples_bqm(ss, bqm) - - return ss - - def get_sampler(self): - """ - Return the sampler instance. - """ - return self diff --git a/tests/test_cb_simulate.py b/tests/test_cb_simulate.py index 222ceb3..d75b998 100644 --- a/tests/test_cb_simulate.py +++ b/tests/test_cb_simulate.py @@ -37,7 +37,7 @@ def run_callback(): AttributeDict(**{"triggered_inputs": [{"prop_id": "btn_simulate.n_clicks"}]}) ) - return run_button_click(1, cached_embedding_lengths_val, spins_val, "Advantage_system4.3") + return run_button_click(1, cached_embedding_lengths_val, spins_val) ctx = copy_context() diff --git a/tests/test_cb_submit_job.py b/tests/test_cb_submit_job.py index 95bd379..85d85ed 100644 --- a/tests/test_cb_submit_job.py +++ b/tests/test_cb_submit_job.py @@ -83,10 +83,9 @@ def run_callback(): embeddings_cached=json_embeddings_file, problem_type=0, filename="FALLBACK_SCHEDULE.csv", - initial_warning=False, ) ctx = copy_context() output = ctx.run(run_callback) - assert output == (1234, False, False, 0) + assert output == (1234, 0) diff --git a/tests/test_mock_kz_sampler.py b/tests/test_mock_kz_sampler.py deleted file mode 100644 index c6924a3..0000000 --- a/tests/test_mock_kz_sampler.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2025 D-Wave -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def test_init(): - """Test initialization of MockKibbleZurekSampler""" - pass - - -def test_sample(): - """Test the MockKibbleZurekSampler ``sample`` method""" - pass From 5647e3513f6c91fd5d3e2d37b49388532d4c7b47 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Wed, 29 Jan 2025 10:57:38 -0800 Subject: [PATCH 165/170] Update NM description --- demo_configs.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/demo_configs.py b/demo_configs.py index f2dc25e..712284c 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -26,10 +26,11 @@ """ MAIN_HEADER_NM = "Coherent Annealing: Zero-Noise Extrapolation" DESCRIPTION_NM = """\ -Simulate zero-temperature and zero-time extrapolations on a quantum computer using -the Kibble-Zurek mechanism. Fitting occurs once three or more data points are -plotted, with -1.8 representing the highest energy scale corresponding to the -lowest noise level. +Owing to thermal noise, coupled chains depart from closed system dynamics +(Kibble-Zurek power law 1/2 scaling) for longer quench duration. Experiments at smaller +coupling strength allow for the modelling of higher noise environments. We can model at +a range of coupilng strengths (noise levels), and extrapolate towards the noise-free regime; +thereby improving agreement with theory (closed system dynamics) to larger quench durations. """ J_BASELINE = -1.8 From d18c3187168efa9e386db0c12ad18b73ba7ca8d0 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Wed, 5 Feb 2025 15:17:45 -0800 Subject: [PATCH 166/170] Update titles and README cite --- README.md | 13 ++++++++----- demo_configs.py | 4 ++-- helpers/layouts_cards.py | 4 ++-- src/demo_enums.py | 4 ++-- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 4002368..c230118 100644 --- a/README.md +++ b/README.md @@ -196,12 +196,11 @@ the kink density away from the predicted value. ## Zero-Noise Extrapolation -Zero-Noise Extrapolation (ZNE) is a quantum error mitigation method used for quantum annealing -as described in this [paper](https://arxiv.org/abs/2311.01306). +Zero-Noise Extrapolation (ZNE) is a quantum error mitigation method used for quantum +annealing, as described in [[3]](#3). -A fitting function—quadratic for the Advantage solver and a multi-polynomial for the -MockDwaveSampler can be used to calculate the theoretical zero-noise point for various coupling -strengths at the same annealing time. +A fitting function—quadratic for the Advantage solver can be used to calculate the theoretical +zero-noise point for various coupling strengths at the same annealing time. Experimental results @@ -274,6 +273,10 @@ Nat. Phys. 18, 1324–1328 (2022). https://doi.org/10.1038/s41567-022-01741-6 Computational supremacy in quantum simulation. https://arxiv.org/abs/2403.00910 +[3] Amin, M.H., King, A.D., Raymond, J. et al. +Quantum error mitigation in quantum annealing. +https://arxiv.org/abs/2311.01306 + ## License Released under the Apache License 2.0. See [LICENSE](LICENSE) file. diff --git a/demo_configs.py b/demo_configs.py index 712284c..cc4ca67 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -19,12 +19,12 @@ DEBUG = False APP_TITLE = "Coherent Annealing" -MAIN_HEADER = "Coherent Annealing: KZ Simulation" +MAIN_HEADER = "Kibble-Zurek Simulation" DESCRIPTION = """\ Use a quantum computer to simulate the formation of topological defects in a 1D ring of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. """ -MAIN_HEADER_NM = "Coherent Annealing: Zero-Noise Extrapolation" +MAIN_HEADER_NM = "Zero-Noise Extrapolation" DESCRIPTION_NM = """\ Owing to thermal noise, coupled chains depart from closed system dynamics (Kibble-Zurek power law 1/2 scaling) for longer quench duration. Experiments at smaller diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index 7b70ccc..ba2596d 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -126,8 +126,8 @@ def graphs_card(): return dbc.Card( [ html.Div([ - *default_graph("Kink Density vs Noise Ratio", "kink-v-noise"), - *default_graph("Kink Density vs Annealing Duration", "kink-v-anneal"), + *default_graph("Extrapolating Zero-Noise Density", "kink-v-noise"), + *default_graph("Measured and Extrapolated Kink Densities", "kink-v-anneal"), ], id="kz-nm-graphs", className="display-none"), html.Div([ *default_graph("Spin States of Qubits in a 1D Ring", "spin-orientation"), diff --git a/src/demo_enums.py b/src/demo_enums.py index e9d73ea..f697951 100644 --- a/src/demo_enums.py +++ b/src/demo_enums.py @@ -22,6 +22,6 @@ class ProblemType(Enum): @property def label(self): return { - ProblemType.KZ: "Kibble-Zurek Mechanism", - ProblemType.KZ_NM: "Kibble-Zurek Mechanism with Noise Mitigation", + ProblemType.KZ: "Kibble-Zurek Simulation", + ProblemType.KZ_NM: "Zero-Noise Extrapolation", }[self] From 6a3a94e9e2c0902a316d79d1590bf7e6c27a3405 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Tue, 11 Feb 2025 11:06:17 -0800 Subject: [PATCH 167/170] Add SHOW_TOOLTIPS config, update anneal to quench --- app.py | 10 +++++++--- demo_configs.py | 2 ++ helpers/layouts_cards.py | 4 ++-- helpers/layouts_components.py | 4 ++-- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/app.py b/app.py index bd1c5eb..4840164 100644 --- a/app.py +++ b/app.py @@ -34,6 +34,7 @@ J_BASELINE, MAIN_HEADER, MAIN_HEADER_NM, + SHOW_TOOLTIPS, THUMBNAIL, ) from helpers.kz_calcs import * @@ -102,6 +103,9 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: Args: problem_type: Either ProblemType.KZ or ProblemType.KZ_NM. """ + if not SHOW_TOOLTIPS: + return [] + tool_tips = tool_tips_kz if problem_type is ProblemType.KZ else tool_tips_kz_nm return [ @@ -185,7 +189,7 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: Output("kz-graphs", "className"), Output("kz-nm-graphs", "className"), Output("tooltips", "children"), - Output("anneal-duration-dropdown", "children"), + Output("quench-duration-dropdown", "children"), Output("coupling-strength-slider", "children"), Output("main-header", "children"), Output("main-description", "children"), @@ -211,7 +215,7 @@ def update_selected_problem_type( KZ_NM (``1`` or ``ProblemType.KZ_NM``). graph-radio-options: The radio options for the graph. tooltips: The tooltips for the settings form. - anneal-duration-dropdown: The duration dropdown setting. + quench-duration-dropdown: The duration dropdown setting. coupling-strength-slider: The coupling strength slider setting. main-header: The main header of the problem in the left column. main-description: The description of the problem in the left column. @@ -232,7 +236,7 @@ def update_selected_problem_type( "" if isKZ else "display-none", "display-none" if isKZ else "", tooltips(problem_type), - get_anneal_duration_setting(problem_type), + get_quench_duration_setting(problem_type), get_coupling_strength_slider(problem_type), MAIN_HEADER if isKZ else MAIN_HEADER_NM, DESCRIPTION if isKZ else DESCRIPTION_NM, diff --git a/demo_configs.py b/demo_configs.py index cc4ca67..7205336 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -37,3 +37,5 @@ J_OPTIONS = [-1.8, -1.6, -1.4, -1.2, -1, -0.9, -0.8, -0.7] DEFAULT_QPU = "Advantage2_prototype2.6" # If not available, the first returned will be default + +SHOW_TOOLTIPS = False # Determines whether tooltips are on or off diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index ba2596d..8996366 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -47,8 +47,8 @@ def control_card(solvers={}, init_job_status="READY"): html.Div(config_spins), html.Label("Coupling Strength (J)"), html.Div(get_coupling_strength_slider(ProblemType.KZ), id="coupling-strength-slider"), - html.Label("Quench/Anneal Duration [ns]"), - html.Div(get_anneal_duration_setting(ProblemType.KZ), id="anneal-duration-dropdown"), + html.Label("Quench Duration [ns]"), + html.Div(get_quench_duration_setting(ProblemType.KZ), id="quench-duration-dropdown"), html.Label("QPU"), html.Div(config_qpu_selection(solvers)), html.P( diff --git a/helpers/layouts_components.py b/helpers/layouts_components.py index a9808e1..66e4d56 100644 --- a/helpers/layouts_components.py +++ b/helpers/layouts_components.py @@ -21,7 +21,7 @@ from src.demo_enums import ProblemType __all__ = [ - "get_anneal_duration_setting", + "get_quench_duration_setting", "get_graph_radio_options", "config_spins", "get_coupling_strength_slider", @@ -34,7 +34,7 @@ ring_lengths = [512, 1024, 2048] -def get_anneal_duration_setting(problem_type): +def get_quench_duration_setting(problem_type): if problem_type is ProblemType.KZ_NM: return dcc.Dropdown( id="anneal_duration", From d9c9374a3f71ff2abbf39e101f983afd6832ccc1 Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 14 Feb 2025 09:59:26 -0800 Subject: [PATCH 168/170] Update copy --- app.py | 3 +++ demo_configs.py | 11 ++++++----- helpers/layouts_cards.py | 4 ++-- helpers/plots.py | 4 ++-- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/app.py b/app.py index 4840164..5030955 100644 --- a/app.py +++ b/app.py @@ -193,6 +193,7 @@ def tooltips(problem_type: Union[ProblemType, int]) -> list[dbc.Tooltip]: Output("coupling-strength-slider", "children"), Output("main-header", "children"), Output("main-description", "children"), + Output("quench-duration-label", "children"), inputs=[ Input({"type": "problem-type", "index": ALL}, "n_clicks"), State("selected-problem", "data"), @@ -219,6 +220,7 @@ def update_selected_problem_type( coupling-strength-slider: The coupling strength slider setting. main-header: The main header of the problem in the left column. main-description: The description of the problem in the left column. + quench-duration-label: The label for the Quench Duration setting. """ if ctx.triggered_id and selected_problem == ctx.triggered_id["index"]: raise PreventUpdate @@ -240,6 +242,7 @@ def update_selected_problem_type( get_coupling_strength_slider(problem_type), MAIN_HEADER if isKZ else MAIN_HEADER_NM, DESCRIPTION if isKZ else DESCRIPTION_NM, + "Quench Duration [ns]" if isKZ else "Target Quench Duration [ns]", ) diff --git a/demo_configs.py b/demo_configs.py index 7205336..09fdb09 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -26,11 +26,12 @@ """ MAIN_HEADER_NM = "Zero-Noise Extrapolation" DESCRIPTION_NM = """\ -Owing to thermal noise, coupled chains depart from closed system dynamics -(Kibble-Zurek power law 1/2 scaling) for longer quench duration. Experiments at smaller -coupling strength allow for the modelling of higher noise environments. We can model at -a range of coupilng strengths (noise levels), and extrapolate towards the noise-free regime; -thereby improving agreement with theory (closed system dynamics) to larger quench durations. +Statistics of a (target) J=-1.8 chain at quench duration t_target, can be inferred by running at +weaker coupling and longer quench duration (t_programmed). Longer programmed times (at weaker +coupling) are subject to more noise. When collecting data at several noise levels, an extrapolation +to a denoised result is possible. At short target time scales, there is weak environmental coupling +and denoising has little impact. At long target time scales, there is strong environmental coupling +and denoising improves agreement with Kibble-Zurek theory. """ J_BASELINE = -1.8 diff --git a/helpers/layouts_cards.py b/helpers/layouts_cards.py index 8996366..3231194 100644 --- a/helpers/layouts_cards.py +++ b/helpers/layouts_cards.py @@ -47,7 +47,7 @@ def control_card(solvers={}, init_job_status="READY"): html.Div(config_spins), html.Label("Coupling Strength (J)"), html.Div(get_coupling_strength_slider(ProblemType.KZ), id="coupling-strength-slider"), - html.Label("Quench Duration [ns]"), + html.Label("Quench Duration [ns]", id="quench-duration-label"), html.Div(get_quench_duration_setting(ProblemType.KZ), id="quench-duration-dropdown"), html.Label("QPU"), html.Div(config_qpu_selection(solvers)), @@ -126,7 +126,7 @@ def graphs_card(): return dbc.Card( [ html.Div([ - *default_graph("Extrapolating Zero-Noise Density", "kink-v-noise"), + *default_graph("Zero-noise Extrapolation of Kink Density", "kink-v-noise"), *default_graph("Measured and Extrapolated Kink Densities", "kink-v-anneal"), ], id="kz-nm-graphs", className="display-none"), html.Div([ diff --git a/helpers/plots.py b/helpers/plots.py index f08ef9e..9efaf67 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -312,7 +312,7 @@ def kink_v_noise_init_graph(n): plotly.graph_objs.Figure: A Plotly figure object. """ fig_layout = go.Layout( - xaxis3=dict(title="Noise Ratio (-1.8/J)", type="linear", range=[0, 3]), + xaxis3=dict(title="Noise ratio (t_{programmed}/t_{target})", type="linear", range=[0, 3]), yaxis1=get_kink_density_axis(n), ) @@ -344,7 +344,7 @@ def kink_v_anneal_init_graph(time_range, n): """ fig_layout = go.Layout( xaxis=dict( - title="Quench Duration [ns]", + title="Target Quench Duration [ns]", type="log", range=[np.log10(time_range[0] - 1), np.log10(time_range[1] + 10)], ), From 833b08fdf76342fc2c9294130881b76961ace63a Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 14 Feb 2025 11:42:50 -0800 Subject: [PATCH 169/170] Apply suggestions from code review Co-authored-by: Theodor Isacsson --- demo_configs.py | 2 ++ helpers/plots.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/demo_configs.py b/demo_configs.py index 09fdb09..c362e28 100644 --- a/demo_configs.py +++ b/demo_configs.py @@ -24,6 +24,8 @@ Use a quantum computer to simulate the formation of topological defects in a 1D ring of spins undergoing a phase transition, described by the Kibble-Zurek mechanism. """ + +# config settings for ZNE tab MAIN_HEADER_NM = "Zero-Noise Extrapolation" DESCRIPTION_NM = """\ Statistics of a (target) J=-1.8 chain at quench duration t_target, can be inferred by running at diff --git a/helpers/plots.py b/helpers/plots.py index 9efaf67..75152a3 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -312,7 +312,7 @@ def kink_v_noise_init_graph(n): plotly.graph_objs.Figure: A Plotly figure object. """ fig_layout = go.Layout( - xaxis3=dict(title="Noise ratio (t_{programmed}/t_{target})", type="linear", range=[0, 3]), + xaxis3=dict(title="Noise ratio (t_programmed/t_target)", type="linear", range=[0, 3]), yaxis1=get_kink_density_axis(n), ) From a8e6d14c08754976a1ded5e1300c60751df154ad Mon Sep 17 00:00:00 2001 From: Kate Culver Date: Fri, 14 Feb 2025 11:47:46 -0800 Subject: [PATCH 170/170] Remove TODOs --- helpers/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helpers/plots.py b/helpers/plots.py index 75152a3..a3db32d 100644 --- a/helpers/plots.py +++ b/helpers/plots.py @@ -306,7 +306,7 @@ def kink_v_noise_init_graph(n): """Initiates plot for Kink Density vs Noise Ratio. Args: - n: TODO + n: Kink density per anneal time, as a NumPy array. Returns: plotly.graph_objs.Figure: A Plotly figure object. @@ -337,7 +337,7 @@ def kink_v_anneal_init_graph(time_range, n): Args: time_range (list of float): A list containing the minimum and maximum quench times [min_quench_time, max_quench_time] in nanoseconds. - n: TODO + n: Kink density per anneal time, as a NumPy array. Returns: plotly.graph_objs.Figure: A Plotly figure object.