From da7c4ecad9b32d1c7f73b6a471a4004365ef566b Mon Sep 17 00:00:00 2001
From: Code7G <111247923+Code7G@users.noreply.github.com>
Date: Thu, 23 Nov 2023 19:37:03 +0100
Subject: [PATCH] Major changes to the code and documentation
Updated: (DAIA.py, run.py, think.py, OS_controll dir, utils dir, README.md, Design dir)
Added: (setup.py, GPT4V, OS_controller.py, Evaluator dir)
Removed: (openaicaller.py, OS_controll dir files, Actions dir)
And added much more documentation.
---
DAIA.py | 13 +-
.../Actions/DVAI.py | 1 -
.../Actions/action_runner.py | 1 -
.../Actions/action_viewer.py | 1 -
.../OS_control/DAIA_client_control.py | 27 -
.../OS_control/Server_files/python-installer | 7 -
.../OS_control/Server_files/python_install.sh | 53 -
.../OS_control/Server_files/server.py | 36 -
.../Server_files/server_activate.sh | 3 -
.../OS_control/Server_files/server_setup.md | 5 -
.../Thinker/thinking.py | 458 ---------
DAIA_(GPT-4-Turbo-with-Vision)/run.py | 300 ------
DAIA_GPT4V/DVAI/GPT_4_with_Vision.py | 74 ++
.../Memory/memory.py | 0
DAIA_GPT4V/OS_control/os_controller.py | 39 +
.../Optimizer/optimization.py | 0
DAIA_GPT4V/Thinker/thinking.py | 593 ++++++++++++
DAIA_GPT4V/run.py | 170 ++++
Design/.$DAIA (GPT Vision).drawio.bkp | 800 +++++++++++++++
Design/DAIA (GPT Vision) progress.drawio | 914 ++++++++++++++++++
Design/DAIA (GPT Vision) progress.png | Bin 0 -> 498336 bytes
Design/DAIA (GPT Vision).drawio | 16 +-
Design/DAIA.drawio | 376 +++----
README.md | 49 +-
requirements.txt | 3 +-
utils/openaicaller.py | 219 -----
utils/openaicaller.py.md | 137 ---
utils/setup.py | 10 +
utils/tokens.py | 39 -
29 files changed, 2837 insertions(+), 1507 deletions(-)
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/Actions/DVAI.py
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/Actions/action_runner.py
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/Actions/action_viewer.py
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/OS_control/DAIA_client_control.py
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/python-installer
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/python_install.sh
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server.py
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server_activate.sh
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server_setup.md
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/Thinker/thinking.py
delete mode 100644 DAIA_(GPT-4-Turbo-with-Vision)/run.py
create mode 100644 DAIA_GPT4V/DVAI/GPT_4_with_Vision.py
rename {DAIA_(GPT-4-Turbo-with-Vision) => DAIA_GPT4V}/Memory/memory.py (100%)
create mode 100644 DAIA_GPT4V/OS_control/os_controller.py
rename {DAIA_(GPT-4-Turbo-with-Vision) => DAIA_GPT4V}/Optimizer/optimization.py (100%)
create mode 100644 DAIA_GPT4V/Thinker/thinking.py
create mode 100644 DAIA_GPT4V/run.py
create mode 100644 Design/.$DAIA (GPT Vision).drawio.bkp
create mode 100644 Design/DAIA (GPT Vision) progress.drawio
create mode 100644 Design/DAIA (GPT Vision) progress.png
delete mode 100644 utils/openaicaller.py
delete mode 100644 utils/openaicaller.py.md
create mode 100644 utils/setup.py
delete mode 100644 utils/tokens.py
diff --git a/DAIA.py b/DAIA.py
index 4de2f6b..0240ce0 100644
--- a/DAIA.py
+++ b/DAIA.py
@@ -1,9 +1,8 @@
-import asyncio
-#from
+from DAIA_GPT4V.run import run
from config import openai_api_key
-async def main(key: str = None) -> None:
+def main(key: str = None) -> None:
# Checking API Key
if not key or len(key) <= 0 or key == "":
input_api_key = ""
@@ -21,7 +20,9 @@ async def main(key: str = None) -> None:
# User Interaction
try:
option = int(
- input("\nOptions\n[1] DAIA_GoalTimed\n[2] DAIA_Constant\n\nSelect Option: ")
+ input(
+ "\nOptions\n[1] DAIA_GPT-4-with-Vision\n[2] DAIA_Continues\n\nSelect Option: "
+ )
)
except ValueError:
@@ -30,7 +31,7 @@ async def main(key: str = None) -> None:
match option:
case 1:
- return await run(api_key=key)
+ return run(api_key=key)
case 2:
return print("Currently Unavaiable.")
@@ -40,4 +41,4 @@ async def main(key: str = None) -> None:
if __name__ == "__main__":
- asyncio.run(main(openai_api_key))
+ main(openai_api_key)
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/Actions/DVAI.py b/DAIA_(GPT-4-Turbo-with-Vision)/Actions/DVAI.py
deleted file mode 100644
index 6451ea7..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/Actions/DVAI.py
+++ /dev/null
@@ -1 +0,0 @@
-# Digital Vision Artificial Intelligence
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/Actions/action_runner.py b/DAIA_(GPT-4-Turbo-with-Vision)/Actions/action_runner.py
deleted file mode 100644
index b5a3956..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/Actions/action_runner.py
+++ /dev/null
@@ -1 +0,0 @@
-# Functions for runing actions of the DAIA
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/Actions/action_viewer.py b/DAIA_(GPT-4-Turbo-with-Vision)/Actions/action_viewer.py
deleted file mode 100644
index 2c75e1e..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/Actions/action_viewer.py
+++ /dev/null
@@ -1 +0,0 @@
-# Functions for viewing the resoult of the runned functions
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/DAIA_client_control.py b/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/DAIA_client_control.py
deleted file mode 100644
index 8aa79e6..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/DAIA_client_control.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import socket
-
-
-def send_instruction(host_ip, port, instruction):
- HOST = host_ip # Server IP address
- PORT = port # Port number
-
- # Create a socket object
- client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
- # Connect to the server
- client_socket.connect((HOST, PORT))
-
- # Send instructions to the server
- client_socket.sendall(instruction.encode())
-
- # Receive feedback from the server
- feedback = client_socket.recv(1024).decode()
-
- # Close the socket
- client_socket.close()
-
- return feedback
-
-
-feedback = send_instruction("IP address", "port as int", "the first test")
-print(feedback)
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/python-installer b/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/python-installer
deleted file mode 100644
index 99d83c0..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/python-installer
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
404 Not Found
-
-404 Not Found
-
nginx
-
-
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/python_install.sh b/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/python_install.sh
deleted file mode 100644
index bd47357..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/python_install.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-
-# Check the operating system
-unameOut="$(uname -s)"
-case "${unameOut}" in
- Linux*) os="linux";;
- Darwin*) os="mac";;
- CYGWIN*|MINGW*|MSYS*) os="windows";;
- *) os="unknown";;
-esac
-
-# Determine the download URL based on the operating system
-if [ "${os}" = "linux" ]; then
- url="https://www.python.org/ftp/python/3.x.x/Python-3.x.x.tgz" # Replace 3.x.x with the desired version
-
-elif [ "${os}" = "mac" ]; then
- url="https://www.python.org/ftp/python/3.x.x/python-3.x.x-macosx10.x.pkg" # Replace 3.x.x with the desired version
-
-elif [ "${os}" = "windows" ]; then
- url="https://www.python.org/ftp/python/3.x.x/python-3.x.x.exe" # Replace 3.x.x with the desired version
-
-else
- echo "Unsupported operating system."
- exit 1
-fi
-
-# Download and install Python
-echo "Downloading Python..."
-curl -o python-installer.${ext} ${url} # Replace python-installer.${ext} with the desired output file name and extension
-
-echo "Installing Python..."
-
-if [ "${os}" = "linux" ]; then
- tar -xzvf python-installer.${ext} # Extract the downloaded tarball
- cd Python-3.x.x/ # Replace Python-3.x.x with the extracted directory name
- ./configure --enable-optimizations
- make -j8
- sudo make altinstall
-
-elif [ "${os}" = "mac" ]; then
- sudo installer -pkg python-installer.${ext} -target / # Replace python-installer.${ext} with the downloaded package file name
-
-elif [ "${os}" = "windows" ]; then
- python-installer.${ext} # Replace python-installer.${ext} with the downloaded installer file name
-
-else
- echo "Unsupported operating system."
- exit 1
-fi
-
-# Verify the installation
-echo "Python installation completed."
-python3 --version
\ No newline at end of file
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server.py b/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server.py
deleted file mode 100644
index b4fc3dc..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import socket
-
-
-HOST = "0.0.0.0" # Client IP address
-PORT = 1234 # Port number
-
-# Create a socket object
-server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-
-# Bind the socket to a specific address and port
-server_socket.bind((HOST, PORT))
-
-# Listen for incoming connections
-server_socket.listen(1)
-
-# Accept a connection from a client
-client_socket, client_address = server_socket.accept()
-
-# Receive instructions from the client
-instructions = client_socket.recv(1024).decode()
-
-# Example: Process instructions and perform actions
-feedback = ""
-if instructions == "do_action":
- print("Do action (test)")
-# ... add more instructions and corresponding feedback as needed
-
-# Take a screen_shot and save it as feedback
-feedback = "screen-shot*"
-
-# Send feedback back to the client
-client_socket.sendall(feedback.encode())
-
-# Close the connection
-client_socket.close()
-server_socket.close()
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server_activate.sh b/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server_activate.sh
deleted file mode 100644
index bf8a75c..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server_activate.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/Server_files
-
-python3 server.py
\ No newline at end of file
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server_setup.md b/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server_setup.md
deleted file mode 100644
index e07f036..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/OS_control/Server_files/server_setup.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# This will be needed for the DAIA client connection to function
-
-Please add the current directory `Server_filesΒ΄ to the machine that you want to be the server.
-After that run the python_install.sh file to install python if python is not yet installed.
-Then run the `server_activate.shΒ΄ file on the server machine.
\ No newline at end of file
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/Thinker/thinking.py b/DAIA_(GPT-4-Turbo-with-Vision)/Thinker/thinking.py
deleted file mode 100644
index 9164d9e..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/Thinker/thinking.py
+++ /dev/null
@@ -1,458 +0,0 @@
-from utils.openaicaller import caller, models_max_tokens
-from DAIA_GoalTimed.Memory.memory import Memory
-from utils.tokens import num_tokens_from_messages
-
-
-class Think:
- def __init__(self, key: str, goal: str, goal_id: int):
- self.openai_api_key = key
- self.goal = goal
- self.goal_id = goal_id
-
- async def goal_completer(self, suggestions: str):
- real_suggestions = self.get_suggestions(suggestions)
- for suggestion in real_suggestions:
- sub_suggestions = await self.suggestion_splitter(suggestion)
-
- # Process the first level of suggestions
- # for suggestion in self.suggestions:
- # explanation = self.suggestion_explainer(suggestion, ...)
- # action = self.action(explanation, ...)
- #
- # # Continue processing sub-suggestions as long as the action is False
- # while action == False:
- # sub_suggestions = self.suggestion_splitter(explanation, ...)
- #
- # # Process each sub-suggestion
- # for sub_suggestion in sub_suggestions:
- # sub_explanation = self.suggestion_explainer(sub_suggestion, ...)
- # sub_action = self.action(sub_explanation, ...)
- #
- # # Continue processing sub-sub-suggestions as long as the sub-action is False
- # while sub_action == False:
- # sub_sub_suggestions = self.suggestion_splitter(sub_explanation, ...)
- #
- # # Process each sub-sub-suggestion
- # for sub_sub_suggestion in sub_sub_suggestions:
- # sub_sub_explanation = self.suggestion_explainer(sub_sub_suggestion, ...)
- # sub_sub_action = self.action(sub_sub_explanation)
- #
- # if sub_sub_action == False:
- # # Continue processing sub-sub-suggestions
- # continue
- # else:
- # # Do the action for sub-sub-suggestion
- # pass
- #
- # # Check if there are more sub-sub-suggestions
- # if not sub_sub_suggestions:
- # sub_action = True # Exit the sub-action loop if there are no more sub-sub-suggestions
- #
- # # Do the action for sub-suggestion
- # pass
- #
- # # Check if there are more sub-suggestions
- # if not sub_suggestions:
- # action = True # Exit the action loop if there are no more sub-suggestions
- #
- # print(f'Action {suggestion} done')
- # pass
-
- # OR
-
- # for suggestion in self.suggestions:
- # explanation = self.suggestion_explainer(suggestion, ...)
- # action = self.action(explanation, ...)
- #
- # if action == False:
- # sub_suggestions = self.suggestion_splitter(explanation, ...)
- #
- # for sub_suggestion in sub_suggestions:
- # sub_explanation = self.suggestion_explainer(sub_suggestion, ...)
- # sub_action = self.action(explanation, ...)
- #
- # if sub_action == False:
- # sub_sub_suggestions = self.suggestion_splitter(sub_explanation, ...)
- #
- # for sub_sub_suggestion in sub_sub_suggestions:
- # sub_sub_explanation = self.suggestion_explainer(sub_sub_suggestion, ...)
- # sub_sub_action = self.action(sub_sub_explanation)
- #
- # if sub_sub_action == False:
- # # Do the same....
- #
- # else:
- # pass
- # # Do the action
- #
- # else:
- # pass
- # # Do the action
- #
- # else:
- # pass
- # # Do the action
-
- def action_compleation():
- pass
- # Compleate actions
-
- async def action(
- self,
- suggestion: str,
- os: str,
- commands: str,
- screen_data: str,
- previous_data: str,
- ):
- str_commands = ""
- for command in commands:
- str_commands = str(command) + "\n"
-
- while True:
- executable = await caller.generate_response(
- api_key=self.openai_api_key,
- model="gpt-3.5-turbo",
- messages=[
- {
- "role": "user",
- "content": f"""
-Can you determine if the provided suggestion, along with the given commands and current screen data, is specific enough to be executed on the {os}? Please provide the first command with its expected outcome to complete the suggestion if it is possible. Consider the following information:
-
-Given commands:
-{str_commands}
-
-Previous data:
-{previous_data}
-
-Current screen information:
-{screen_data}
-
-Suggestion:
-{suggestion}
-
-If the suggestion is sufficiently specific and can be carried out on the {os} using the provided commands and current screen data, please type the first command along with its expected outcome, like this:
-1. [command][perameter of command or none] (expected outcome)
-
-If the suggestion is not specific enough, please state "Not specific"
-""",
- }
- ],
- )
-
- if executable == "Not specific" or executable == '"Not specific"':
- return False
-
- else:
- return executable
-
- async def suggestion_explainer(self, suggestion: str):
- previous_info = await self.short_remember(
- f"""
-You have a goal you want to achieve.
-You have already gotten some information on the steps to achieving your goal.
-So, based on the previous steps and information you must ask someone a question that will give you the information to complete your current step to progress toward achieving your goal.
-
-your goal = {self.goal}
-your previous steps and information = >>previous context missing<<
-your current step = {suggestion}
-
-What would that question be? (respond only with the question)
-"""
- )
-
- prompt = f"""
-You have a goal you want to achieve.
-You have already gotten some information on the steps to achieving your goal.
-So, based on the previous steps and information you must ask someone a question that will give you the information to complete your current step to progress toward achieving your goal.
-
-your goal = {self.goal}
-your previous steps and information = {previous_info}
-your current step = {suggestion}
-
-What would that question be? (respond only with the question)
-"""
-
- # print(f'Previous info for {suggestion}: {previous_info}\n \n')
-
- question = await caller.generate_response(
- api_key=self.openai_api_key,
- model="gpt-3.5-turbo",
- messages=[
- {
- "role": "user",
- "content": prompt,
- }
- ],
- )
-
- question = question["choices"][0]["message"]["content"]
-
- await self.save_action(action1=prompt, action2=question, category=0)
-
- # print(f'Prompt for {suggestion}: {previous_info}\n \n')
-
- suggestion_suggestions = await caller.generate_response(
- api_key=self.openai_api_key,
- model="gpt-3.5-turbo",
- messages=[
- {
- "role": "user",
- "content": f"""
-{question}
-""",
- }
- ],
- )
-
- await self.save_action(
- action1=question,
- action2=suggestion_suggestions["choices"][0]["message"]["content"],
- category=0,
- )
-
- # print(f'Explanation for {suggestion}: {suggestion_suggestions}\n \n')
-
- return suggestion_suggestions["choices"][0]["message"]["content"]
-
- async def suggestion_splitter(self, suggestion: str):
- explanation = await self.suggestion_explainer(suggestion)
- previous_data = await self.short_remember(
- f"""
-What are the suggestions in the response based on the given response and previous data?
-
-Previous data: >>previous data missing<<
-Response: {explanation}
-
-Please provide the suggestions sequentially, without any additional text. For instance:
-1. Suggestion
-2. Suggestion
-Additional suggestions mentioned in the response...
-
-If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
-
-If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response and previous data. For example:
-1. Suggestion
-2. Suggestion
-Additional suggestions based on the provided response and previous data...
-"""
- )
-
- prompt = f"""
-What are the suggestions in the response based on the given response and previous data?
-
-Previous data: {previous_data}
-Response: {explanation}
-
-Please provide the suggestions sequentially, without any additional text. For instance:
-1. Suggestion
-2. Suggestion
-Additional suggestions mentioned in the response...
-
-If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
-
-If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response and previous data. For example:
-1. Suggestion
-2. Suggestion
-Additional suggestions based on the provided response and previous data...
-"""
-
- while True:
- sub_suggestions = await caller.generate_response(
- api_key=self.openai_api_key,
- model="gpt-3.5-turbo",
- messages=[
- {
- "role": "user",
- "content": prompt,
- }
- ],
- )
-
- sub_suggestions = sub_suggestions["choices"][0]["message"]["content"]
-
- await self.save_action(action1=prompt, action2=sub_suggestions, category=0)
-
- if sub_suggestions[0:5].lower() in "reject":
- print(
- f"""
-Sub-suggestion {sub_suggestions}\n \n
-"""
- )
-
- else:
- print(
- f"""
-General '{suggestion}' steps:
-{sub_suggestions}\n \n
-"""
- )
-
- return sub_suggestions
-
- async def gpt_memory_capacity(self, data, gpt_role, gpt):
- max_tokens = models_max_tokens[gpt]
- message = {"role": gpt_role, "content": data}
- message_list = [message]
- tokens = await num_tokens_from_messages(message_list)
-
- if tokens < max_tokens:
- return True
-
- else:
- return False
-
- async def save_goal(self):
- goal_summary = await self.generate_title(self.goal, "goal")
-
- memory = Memory()
- new_goal = memory.create_goal_object(goal_summary)
- memory.save_objects_in_db([new_goal])
-
- return new_goal.goal_id
-
- async def save_goal_in_goal(self):
- memory = Memory()
-
- goal_action = memory.create_action_object(
- goal_id=self.goal_id,
- title="Final Goal",
- category="Goal",
- full_data=self.goal,
- important_data=f"The Final Goal is: {self.goal}",
- )
- memory.save_objects_in_db([goal_action])
-
- async def save_action(self, action1: str, action2: str, category: int):
- """
- Category:
- "question=>response" = int 0
- "response=>action" = int 1
- "action=>result" = int 2
- "result=>action" = int 3
- """
-
- categories = [
- "question=>response",
- "response=>action",
- "action=>result",
- "result=>action",
- ]
-
- first = categories[category].split("=")[0]
- second = categories[category].split(">")[-1]
-
- memory = Memory()
-
- data = f'[1. {first}]: "{action1}",\n[2. {second}]: "{action2}"'
- title = await self.generate_title(data, f'"{first} with its {second}"')
- previous_important_data = await self.short_remember(
- f"""
-Given the following context:
->>previous context missing<<
-
-And the input data:
-"{data}"
-
-Please use the provided context to extract and present the most important data from the input.
-""",
- )
-
- important_data = await self.get_important_data(data, previous_important_data)
-
- new_action = memory.create_action_object(
- self.goal_id, title, categories[category], data, important_data
- )
- memory.save_objects_in_db([new_action])
-
- async def short_remember(self, need: str):
- memory = Memory()
-
- previous_important_data = ""
- for action in memory.get_ordered_actions_of_goal(self.goal_id, 100):
- previous_important_data = previous_important_data + "".join(
- f'[{getattr(action, "action_id")}. Action: (Title of action: "{getattr(action, "title")}", Important data of action: "{getattr(action, "important_data")}")]\n'
- )
-
- if len(previous_important_data) <= 0:
- return "Nothing has hppened yet."
-
- previous_data = await caller.generate_response(
- api_key=self.openai_api_key,
- model="gpt-3.5-turbo",
- messages=[
- {
- "role": "user",
- "content": f"""
-Given the following context:
-"{previous_important_data}"
-
-And the input data:
-"{need}"
-
-Please input the necessary, relevant, and concise context from the following context to complete the input. Your input should address the gaps in the provided input data, ensuring accurate responses without any future errors or issues.
-
-Please avoid addressing the prompt directly. Only provide the crucial context. Keep your response minimal and to the point.
-""",
- }
- ],
- )
-
- # print(
- # f'Previous_info: {previous_data["choices"][0]["message"]["content"]}\n \n')
-
- return previous_data["choices"][0]["message"]["content"]
-
- async def get_important_data(self, data: str, previous_data: str):
- important_data = await caller.generate_response(
- api_key=self.openai_api_key,
- model="gpt-3.5-turbo",
- messages=[
- {
- "role": "user",
- "content": f"""
-Given the following context:
-"{previous_data}"
-
-And the input data:
-"{data}"
-
-Please use the provided context to extract and present the most important data from the input.
-""",
- }
- ],
- )
-
- return important_data["choices"][0]["message"]["content"]
-
- async def generate_title(self, data: str, item_category: str):
- title = await caller.generate_response(
- api_key=self.openai_api_key,
- model="gpt-3.5-turbo",
- messages=[
- {
- "role": "user",
- "content": f"""
-Provide a concise title (<75 chars) describing the {item_category}.
-
-{item_category} = "{data}"
-""",
- }
- ],
- )
-
- return title["choices"][0]["message"]["content"]
-
- def get_suggestions(self, suggestions: str):
- suggestions_ = suggestions
- for n in range(0, 40):
- number = 40 - n
- suggestions_ = suggestions_.replace(f"{number}.", "%_%")
-
- suggestions_ = suggestions_.replace("\n", "")
- real_suggestions = suggestions_.split("%_%")
-
- if real_suggestions.count("") > 0:
- real_suggestions.remove("")
-
- return real_suggestions
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/run.py b/DAIA_(GPT-4-Turbo-with-Vision)/run.py
deleted file mode 100644
index cce71f9..0000000
--- a/DAIA_(GPT-4-Turbo-with-Vision)/run.py
+++ /dev/null
@@ -1,300 +0,0 @@
-import asyncio
-
-from pathlib import Path
-from DAIA_GoalTimed.Thinker.thinking import Think
-from utils.openaicaller import caller
-
-
-async def run(api_key):
- goal = input("Please enter your goal for the DAIA here: ")
-
- while len(goal) <= 0:
- print("Goal is empty!")
- goal = input("Please enter your goal for the DAIA here: ")
-
- think_ = Think(key=api_key, goal=goal, goal_id=-1)
- goal_id = await think_.save_goal()
-
- think = Think(key=api_key, goal=goal, goal_id=goal_id)
- await think.save_goal_in_goal()
-
- # personal = input('''
- # (Press ENTER to skip)
- # Would you like to add some personal information to the DAIA? (this is optional, but could be helpful when dealing with goals involving payment and account info)
- # (Y/N) ''')
- #
- # if personal.upper() == 'Y':
- # crypto_wallet_addres = input(
- # 'Enter your crypto wallet address here: ')
- # # Credit card option
- # # ...
- # print('When the DAIA thinks it needs to deposit anything into your accounts, it will first ask you before doing so')
-
- while True:
- prompt = f"""
-You have a goal you want to achieve.
-As your first step you want to know how to achieve your goal. So you must ask someone a question that will give you that information.
-
-your goal = {goal}
-
-What would that question be? (respond only with the question)
-"""
-
- question = await caller.generate_response(
- api_key=api_key,
- model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": prompt}],
- )
-
- question = question["choices"][0]["message"]["content"]
-
- print(f"Goal question: {question}")
- q_agree = input("Do you agree with the question for goal completion? (Y/N)")
-
- if q_agree.upper() == "N":
- print("Retrying...")
-
- else:
- break
-
- goal_help = await caller.generate_response(
- api_key=api_key,
- model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": question}],
- )
-
- goal_help = goal_help["choices"][0]["message"]["content"]
-
- await think.save_action(action1=question, action2=goal_help, category=0)
- previous_data = await think.short_remember(
- f"""
-What are the suggestions in the response, based on the given response and previous data?
-
-Previous data: >> previous context missing <<
-Response: {goal_help}
-
-Please provide the suggestions sequentially, without any additional text. For instance:
-1. Suggestion
-2. Suggestion
-Additional suggestions mentioned in the response...
-
-If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
-
-If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response and previous data. For example:
-1. Suggestion
-2. Suggestion
-Additional suggestions based on the provided response and previous data...
-"""
- )
-
- prompt = f"""
-What are the suggestions in the response, based on the given response and previous data?
-
-Previous data: {previous_data}
-Response: {goal_help}
-
-Please provide the suggestions sequentially, without any additional text. For instance:
-1. Suggestion
-2. Suggestion
-Additional suggestions mentioned in the response...
-
-If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
-
-If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response and previous data. For example:
-1. Suggestion
-2. Suggestion
-Additional suggestions based on the provided response and previous data...
-"""
-
- suggestions = await caller.generate_response(
- api_key=api_key,
- model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": prompt}],
- )
-
- suggestions = suggestions["choices"][0]["message"]["content"]
-
- await think.save_action(action1=prompt, action2=suggestions, category=0)
-
- if suggestions[0:5].lower() in "reject":
- print(
- f"""
-Goal {suggestions}
-"""
- )
-
- else:
- print("Goal accepted!")
- print(
- f"""
-General goal steps:
-{suggestions}
-"""
- )
-
- second_time = False
- while True:
- print(
- "(Press ENTER to skip) WARNING: if you skip, the suggestions will be accepted. This is the LAST MANUAL step, everything from here on is automated"
- )
- agree = input(
- "Do you agree with the current suggestions/processes for your goal? (Y/N)"
- )
-
- if second_time:
- if agree.upper() == "N":
- await think.save_action(
- action1=f'"Sorry, but I dissagree with the current suggestions because: \n{explanation}\nCan you update the suggestions?"',
- action2=f'"Yes, here are the new suggestions: \n{suggestions}"',
- category=0,
- )
-
- explanation = input("Why do you not agree? ")
-
- prompt = f"""
-Please try to correct your suggestions.
-
-Here is why I don't agree with them:
-{explanation}
-"""
-
- suggestion_correction = await caller.generate_response(
- api_key=api_key,
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": suggestion_correction},
- {"role": "user", "content": prompt},
- ],
- )
-
- suggestion_correction = suggestion_correction["choices"][0][
- "message"
- ]["content"]
-
- prompt = f"""
-Please provide the suggestions mentioned in the following response:
-
-Response: {suggestion_correction}
-
-List only the suggestions in a sequential manner, without any additional text. For example:
-1. Suggestion
-2. Suggestion
-Additional suggestions mentioned in the response...
-
-If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
-
-If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response. For example:
-1. Suggestion
-2. Suggestion
-Additional suggestions based on the provided response...
-"""
-
- suggestions = await caller.generate_response(
- api_key=api_key,
- model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": prompt}],
- )
-
- suggestions = suggestions["choices"][0]["message"]["content"]
-
- if suggestions[0:5].lower() in "reject":
- print(
- f"""
-Goal {suggestions}
-"""
- )
-
- else:
- print("Goal accepted!")
- print(
- f"""
-General goal steps:
-{suggestions}
-"""
- )
-
- else:
- await think.save_action(
- action1=f'"Sorry, but I dissagree with the current suggestions because: \n{explanation}\nCan you update the suggestions?"',
- action2=f'"Yes, here are the new suggestions: \n{suggestions}"',
- category=0,
- )
-
- print("/) Ok then, let's go!")
- break
-
- else:
- if agree.upper() == "N":
- second_time = True
- explanation = input("Why do you not agree? ")
-
- prompt = f"""
-Please try to correct your suggestions.
-
-Here is why I don't agree with them:
-{explanation}
-"""
-
- suggestion_correction = await caller.generate_response(
- api_key=api_key,
- model="gpt-3.5-turbo",
- messages=[
- {"role": "user", "content": suggestions},
- {"role": "user", "content": prompt},
- ],
- )
-
- suggestion_correction = suggestion_correction["choices"][0][
- "message"
- ]["content"]
-
- prompt = f"""
-Please provide the suggestions mentioned in the following response:
-
-Response: {suggestion_correction}
-
-List only the suggestions in a sequential manner, without any additional text. For example:
-1. Suggestion
-2. Suggestion
-Additional suggestions mentioned in the response...
-
-If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
-
-If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response. For example:
-1. Suggestion
-2. Suggestion
-Additional suggestions based on the provided response...
-"""
-
- suggestions = await caller.generate_response(
- api_key=api_key,
- model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": prompt}],
- )
-
- suggestions = suggestions["choices"][0]["message"]["content"]
-
- if suggestions[0:5].lower() in "reject":
- print(
- f"""
-Goal {suggestions}
-"""
- )
-
- else:
- print("Goal accepted!")
- print(
- f"""
-General goal steps:
-{suggestions}
-"""
- )
-
- else:
- print("/) Ok then, let's go!")
- break
-
- await think.goal_completer(suggestions)
-
-
-# think.goal_completer(suggestions)
diff --git a/DAIA_GPT4V/DVAI/GPT_4_with_Vision.py b/DAIA_GPT4V/DVAI/GPT_4_with_Vision.py
new file mode 100644
index 0000000..9f08020
--- /dev/null
+++ b/DAIA_GPT4V/DVAI/GPT_4_with_Vision.py
@@ -0,0 +1,74 @@
+from openai import OpenAI
+import base64
+import requests
+
+
+class DVAI:
+ """
+ Digital Vision Artificial Intelligence
+ """
+
+ def __init__(self, key: str):
+ self.api_key = key
+
+ self.client = OpenAI(
+ api_key=key,
+ )
+
+ def gpt_with_vision_by_url(self, image_url, context: str):
+ response = self.client.chat.completions.create(
+ model="gpt-4-vision-preview",
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": context},
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": image_url,
+ },
+ },
+ ],
+ }
+ ],
+ )
+
+ return response.choices[0].message.content
+
+ def gpt_with_vision_by_base64(self, image_path: str, context: str):
+ base64_image = self.encode_image(image_path)
+
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {self.api_key}",
+ }
+
+ payload = {
+ "model": "gpt-4-vision-preview",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {"type": "text", "text": context},
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": f"data:image/jpeg;base64,{base64_image}"
+ },
+ },
+ ],
+ }
+ ],
+ "max_tokens": 1000,
+ }
+
+ response = requests.post(
+ "https://api.openai.com/v1/chat/completions", headers=headers, json=payload
+ )
+
+ return response.json()["choices"][0]["message"]["content"]
+
+ def encode_image(self, image_path):
+ with open(image_path, "rb") as image_file:
+ return base64.b64encode(image_file.read()).decode("utf-8")
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/Memory/memory.py b/DAIA_GPT4V/Memory/memory.py
similarity index 100%
rename from DAIA_(GPT-4-Turbo-with-Vision)/Memory/memory.py
rename to DAIA_GPT4V/Memory/memory.py
diff --git a/DAIA_GPT4V/OS_control/os_controller.py b/DAIA_GPT4V/OS_control/os_controller.py
new file mode 100644
index 0000000..dbf2cde
--- /dev/null
+++ b/DAIA_GPT4V/OS_control/os_controller.py
@@ -0,0 +1,39 @@
+import pyautogui
+import platform
+
+
+class OSController:
+ """
+ Controll the OS using pyautogui, and get its data with the platform library
+ """
+
+ def __init__(self):
+ self.x = pyautogui.position()[0]
+ self.y = pyautogui.position()[1]
+
+ def click(self, x, y):
+ pyautogui.click(x, y)
+
+ def move_cursor_to(self, x, y):
+ pyautogui.moveTo(x, y, duration=0.2)
+
+ def keyboard(self, string):
+ pyautogui.typewrite(string)
+
+ def scroll(self, direction, amount):
+ if direction == "up":
+ pyautogui.scroll(-amount)
+ elif direction == "down":
+ pyautogui.scroll(amount)
+
+ def screenshot(self, path):
+ screenshot = pyautogui.screenshot()
+ screenshot.save(path)
+
+ def get_system_info(self):
+ return {
+ "OS": platform.system(),
+ "Version": platform.release(),
+ "Architecture": platform.processor(),
+ "Hostname": platform.node(),
+ }
diff --git a/DAIA_(GPT-4-Turbo-with-Vision)/Optimizer/optimization.py b/DAIA_GPT4V/Optimizer/optimization.py
similarity index 100%
rename from DAIA_(GPT-4-Turbo-with-Vision)/Optimizer/optimization.py
rename to DAIA_GPT4V/Optimizer/optimization.py
diff --git a/DAIA_GPT4V/Thinker/thinking.py b/DAIA_GPT4V/Thinker/thinking.py
new file mode 100644
index 0000000..86adfd6
--- /dev/null
+++ b/DAIA_GPT4V/Thinker/thinking.py
@@ -0,0 +1,593 @@
+from DAIA_GPT4V.Memory.memory import Memory
+from DAIA_GPT4V.OS_control.os_controller import OSController
+from DAIA_GPT4V.DVAI.GPT_4_with_Vision import DVAI
+from utils.setup import setup
+from openai import OpenAI
+from pathlib import Path
+from random import randint
+
+
+class Think:
+ """
+ The main class for operations involving the GPT for the DAIA
+ """
+
+ def __init__(self, key: str, goal: str, goal_id: int):
+ self.openai_api_key = key
+ self.goal = goal
+ self.goal_id = goal_id
+
+ self.client = OpenAI(
+ api_key=key,
+ )
+
+ def goal_completer(self, suggestions: str):
+ setup()
+
+ # Setup system info and commands
+ os_controller = OSController()
+ system_info = os_controller.get_system_info()
+ commands = [
+ "click[x,y possition]",
+ "move_cursor_to[x,y possition]",
+ "keyboard[string]",
+ ]
+
+ dvai = DVAI(self.openai_api_key)
+
+ first_suggestions = self.get_suggestions(suggestions)
+ for suggestion in first_suggestions:
+ # Take a screenshot and save it
+ screenshot_savepath = Path(
+ f'DAIA/Screenshots/screenshot{"".join([str(e + randint(1, 9)) for e in range(10)])}.png'
+ )
+ os_controller.screenshot(screenshot_savepath)
+
+ # Get the current screen information with the screenshot (the prompt needs improvements)
+ prompt = f"""
+Please state what is in the provided screenshot of the {str(system_info.get('OS'))} OS that relates to {suggestion} of the goal {self.goal}.
+"""
+ screenshot_description = dvai.gpt_with_vision_by_base64(
+ screenshot_savepath, prompt
+ )
+ print(f"Screenshot description: {screenshot_description}")
+
+ executable_commands = self.action(
+ suggestion,
+ str(system_info.get("OS")),
+ commands,
+ screenshot_description,
+ suggestion,
+ )
+ print(executable_commands)
+ break
+
+ def action_compleation():
+ pass
+ # Compleate a action
+
+ def action(
+ self,
+ suggestion: str,
+ os: str,
+ commands: list,
+ screen_data: str,
+ previous_data: str,
+ ):
+ """
+ Check if a suggestion is specific enough to be done with the provided commands on the OS
+
+ (Current state: The prompt needs improvements so that the GPT can know more about the current screen data regarding the suggestion instead of just general data)
+ """
+
+ # Assemble commands as a str
+ str_commands = ""
+ for command in commands:
+ str_commands += str(command) + "\n"
+
+ # The main prompt
+ executable = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": f"""
+Can you determine if the provided suggestion, along with the given commands and current screen data, is specific enough to be executed on the {os} OS? Please provide the commands with thair expected outcome to complete the suggestion if it is possible. Consider the following information:
+
+Given commands:
+{str_commands}
+
+Previous data:
+{previous_data}
+
+Current screen information:
+{screen_data}
+
+Suggestion:
+{suggestion}
+
+If the suggestion is sufficiently specific and can be carried out on the {os} OS using the provided commands, please type the commands along with thair expected outcomes, like this:
+1. command[perameter of command or none] (expected outcome)
+2. command[perameter of command or none] (expected outcome)
+3. command[perameter of command or none] (expected outcome)
+Additional commands with outcomes...
+
+If the suggestion is not specific enough, please state "Not specific"
+""",
+ }
+ ],
+ )
+ executable = executable.choices[0].message.content
+
+ # Check if the response returns commands or just 'Not specific'
+ if executable == "Not specific" or executable == '"Not specific"':
+ return False
+
+ else:
+ return executable
+
+ def suggestion_explainer(self, suggestion: str):
+ """
+ Explain a suggestion.
+
+ Suggestion (Create a account) -> explanation (To create a account do...)
+ """
+
+ # Remmember and the previous data for the prompt with this prompt
+ previous_info = self.short_remember(
+ f"""
+You have a goal you want to achieve.
+You have already gotten some information on the steps to achieving your goal.
+So, based on the previous steps and information you must ask someone a question that will give you the information to complete your current step to progress toward achieving your goal.
+
+your goal = {self.goal}
+your previous steps and information = >>previous context missing<<
+your current step = {suggestion}
+
+What would that question be? (respond only with the question)
+"""
+ )
+
+ # Imput the previous data into the prompt
+ prompt = f"""
+You have a goal you want to achieve.
+You have already gotten some information on the steps to achieving your goal.
+So, based on the previous steps and information you must ask someone a question that will give you the information to complete your current step to progress toward achieving your goal.
+
+your goal = {self.goal}
+your previous steps and information = {previous_info}
+your current step = {suggestion}
+
+What would that question be? (respond only with the question)
+"""
+ # Make GPT generate a question about the suggestion
+ question = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": prompt,
+ }
+ ],
+ )
+ question = question.choices[0].message.content
+ self.save_action(action1=prompt, action2=question, category=0)
+
+ # Make GPT answer its question to generate a explanation
+ suggestion_suggestions = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": f"""
+{question}
+""",
+ }
+ ],
+ )
+ self.save_action(
+ action1=question,
+ action2=suggestion_suggestions.choices[0].message.content,
+ category=0,
+ )
+
+ return suggestion_suggestions.choices[0].message.content
+
+ def suggestion_splitter(self, suggestion: str):
+ """
+ Split a suggestion (or step) into its sub-suggestions (or steps).
+
+ Suggestion (Create a account) -> Suggestions (['Visit the website..', 'Create account...', 'Access services..'])
+ """
+
+ # Explain the suggestion
+ explanation = self.suggestion_explainer(suggestion)
+
+ # Remmember the important previous data for the prompt
+ previous_data = self.short_remember(
+ f"""
+What are the suggestions in the response based on the given response and previous data?
+
+Previous data: >>previous data missing<<
+Response: {explanation}
+
+Please provide the suggestions sequentially, without any additional text. For instance:
+1. Suggestion
+2. Suggestion
+Additional suggestions mentioned in the response...
+
+If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
+
+If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response and previous data. For example:
+1. Suggestion
+2. Suggestion
+Additional suggestions based on the provided response and previous data...
+"""
+ )
+
+ # Get the extracted previous data into the prompt
+ prompt = f"""
+What are the suggestions in the response based on the given response and previous data?
+
+Previous data: {previous_data}
+Response: {explanation}
+
+Please provide the suggestions sequentially, without any additional text. For instance:
+1. Suggestion
+2. Suggestion
+Additional suggestions mentioned in the response...
+
+If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
+
+If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response and previous data. For example:
+1. Suggestion
+2. Suggestion
+Additional suggestions based on the provided response and previous data...
+"""
+ # Use the prompt to extract the suggestions from the response
+ sub_suggestions = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": prompt,
+ }
+ ],
+ )
+ sub_suggestions = sub_suggestions.choices[0].message.content
+ self.save_action(action1=prompt, action2=sub_suggestions, category=0)
+
+ # Check if the response gives any suggestions and if it is a answer
+ if sub_suggestions[0:5].lower() in "reject":
+ print(
+ f"""
+Sub-suggestion {sub_suggestions}\n \n
+"""
+ )
+ return "Rejected"
+
+ else:
+ print(
+ f"""
+General '{suggestion}' steps:
+{sub_suggestions}\n \n
+"""
+ )
+ return sub_suggestions
+
+ def explanation_to_suggestions(self, explanation: str, prev_data: bool):
+ """
+ Split a explanation into suggestions
+
+ Explanation (To create a account do..) -> Suggestions (['Visit the website..', 'Create account...', 'Access services..'])
+ """
+
+ if prev_data:
+ # Remmember the important previous data for the prompt
+ previous_data = self.short_remember(
+ f"""
+What are the suggestions in the response based on the given response and previous data?
+
+Previous data: >>previous data missing<<
+Response: {explanation}
+
+Please provide the suggestions sequentially, without any additional text. For instance:
+1. Suggestion
+2. Suggestion
+Additional suggestions mentioned in the response...
+
+If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
+
+If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response and previous data. For example:
+1. Suggestion
+2. Suggestion
+Additional suggestions based on the provided response and previous data...
+"""
+ )
+
+ # Get the extracted previous data into the prompt
+ prompt = f"""
+What are the suggestions in the response based on the given response and previous data?
+
+Previous data: {previous_data}
+Response: {explanation}
+
+Please provide the suggestions sequentially, without any additional text. For instance:
+1. Suggestion
+2. Suggestion
+Additional suggestions mentioned in the response...
+
+If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
+
+If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response and previous data. For example:
+1. Suggestion
+2. Suggestion
+Additional suggestions based on the provided response and previous data...
+"""
+
+ # Use the prompt to extract the suggestions from the response
+ suggestions = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": prompt,
+ }
+ ],
+ )
+ suggestions = suggestions.choices[0].message.content
+ self.save_action(action1=prompt, action2=suggestions, category=0)
+
+ # Check if the response gives any suggestions and if it is a answer
+ if suggestions[0:5].lower() in "reject":
+ print(
+ f"""
+Sub-suggestion {suggestions}\n \n
+"""
+ )
+ return "Rejected"
+
+ else:
+ print(
+ f"""
+General '{explanation}' steps:
+{suggestions}\n \n
+"""
+ )
+
+ else:
+ # Extract the suggestions from the response (without previous data)
+ prompt = f"""
+Please provide the suggestions mentioned in the following response:
+
+Response: {explanation}
+
+List only the suggestions in a sequential manner, without any additional text. For example:
+1. Suggestion
+2. Suggestion
+Additional suggestions mentioned in the response...
+
+If the response explicitly rejects providing suggestions, please type "Rejected" on the first line of your response, followed by an explanation of why no suggestions or advice were given.
+
+If the response does not include any suggestions or provides information other than suggestions, please generate your own suggestions based on the provided response. For example:
+1. Suggestion
+2. Suggestion
+Additional suggestions based on the provided response...
+"""
+
+ # Use the prompt to extract the suggestions from the response
+ suggestions = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": prompt,
+ }
+ ],
+ )
+ suggestions = suggestions.choices[0].message.content
+ self.save_action(action1=prompt, action2=suggestions, category=0)
+
+ # Check if the response gives any suggestions and if it is a answer
+ if suggestions[0:5].lower() in "reject":
+ print(
+ f"""
+Sub-suggestion {suggestions}\n \n
+"""
+ )
+ return "Rejected"
+
+ else:
+ print(
+ f"""
+General '{explanation}' steps:
+{suggestions}\n \n
+"""
+ )
+
+ return suggestions
+
+ def short_remember(self, need: str):
+ """
+ Remmember a short period of history in detail from the DAIA MemoryDB,
+ and extract the most important data out of the history for the current need prompt
+
+ In the need prompt, you must place a '>>previous data missing<<' string where you want the GPT to input the previous data
+
+ (the main prompt needs major improvements)
+ """
+
+ memory = Memory()
+
+ # Get and format all of the previous action with the limit of 100
+ previous_important_data = ""
+ for action in memory.get_ordered_actions_of_goal(self.goal_id, 100):
+ previous_important_data = previous_important_data + "".join(
+ f'[{getattr(action, "action_id")}. Action: (Title of action: "{getattr(action, "title")}", Important data of action: "{getattr(action, "important_data")}")]\n'
+ )
+
+ # If there is no history yet
+ if len(previous_important_data) <= 0:
+ return "Nothing has hppened yet."
+
+ # The main prompt
+ previous_data = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": f"""
+Given the following context:
+"{previous_important_data}"
+
+And the input data:
+"{need}"
+
+Please input the necessary, relevant, and concise context from the given following context to complete the input data's ">>previous data missing<<" area.
+
+Please avoid addressing the prompt directly. Only input the data that needs to be in the ">>previous data missing<<" area. Keep your response minimal and to the point.
+""",
+ }
+ ],
+ )
+
+ return previous_data.choices[0].message.content
+
+ def get_important_data(self, data: str, previous_data: str):
+ """
+ Extract the important data out of the provided data, based on the previous data
+ """
+
+ important_data = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": f"""
+Given the following context:
+"{previous_data}"
+
+And the current input data:
+"{data}"
+
+Please use the provided context to extract and present the most important data from the input.
+""",
+ }
+ ],
+ )
+
+ return important_data.choices[0].message.content
+
+ def generate_title(self, data: str, item_category: str):
+ """
+ Generate a title <75 chars based on the current data item_category and the data
+ """
+
+ title = self.client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "user",
+ "content": f"""
+Provide a concise title (<75 chars) describing the {item_category}.
+
+{item_category} = "{data}"
+""",
+ }
+ ],
+ )
+
+ return title.choices[0].message.content
+
+ def save_goal(self):
+ "Save a goal into the DAIA MemoryDB"
+
+ goal_summary = self.generate_title(self.goal, "goal")
+
+ # Save the goal
+ memory = Memory()
+ new_goal = memory.create_goal_object(goal_summary)
+ memory.save_objects_in_db([new_goal])
+
+ return new_goal.goal_id
+
+ def save_goal_in_goal(self):
+ """
+ Save a goal under its own goal in the DAIA MemoryDB
+ """
+
+ memory = Memory()
+
+ # Save the goal
+ goal_action = memory.create_action_object(
+ goal_id=self.goal_id,
+ title="Final Goal",
+ category="Goal",
+ full_data=self.goal,
+ important_data=f"The Final Goal is: {self.goal}",
+ )
+ memory.save_objects_in_db([goal_action])
+
+ def save_action(self, action1: str, action2: str, category: int):
+ """
+ Save a action under its category. (The action is made out of 2 actions)
+
+ Category/action types:
+ "question=>response" = int 0
+ "response=>action" = int 1
+ "action=>result" = int 2
+ "result=>action" = int 3
+ """
+
+ categories = [
+ "question=>response",
+ "response=>action",
+ "action=>result",
+ "result=>action",
+ ]
+
+ # Translate the int parameter into its str
+ first = categories[category].split("=")[0]
+ second = categories[category].split(">")[-1]
+
+ memory = Memory()
+
+ # Generate the full_data, title and important data for the action
+ full_data = f'[1. {first}]: "{action1}",\n[2. {second}]: "{action2}"'
+ title = self.generate_title(full_data, f'"{first} with its {second}"')
+ previous_important_data = self.short_remember(
+ f"""
+Given the following context:
+>>previous context missing<<
+
+And the input data:
+"{full_data}"
+
+Please use the provided context to extract and present the most important data from the input.
+""",
+ )
+ important_data = self.get_important_data(full_data, previous_important_data)
+
+ # Save the action
+ new_action = memory.create_action_object(
+ self.goal_id, title, categories[category], full_data, important_data
+ )
+ memory.save_objects_in_db([new_action])
+
+ def get_suggestions(self, suggestions: str):
+ """
+ Extract the suggestions out of a suggestions response from the GPT, and put them into a list
+ """
+
+ suggestions_ = suggestions
+ for n in range(0, 40):
+ number = 40 - n
+ suggestions_ = suggestions_.replace(f"{number}.", "%_%")
+
+ suggestions_ = suggestions_.replace("\n", "")
+ real_suggestions = suggestions_.split("%_%")
+
+ if real_suggestions.count("") > 0:
+ real_suggestions.remove("")
+
+ return real_suggestions
diff --git a/DAIA_GPT4V/run.py b/DAIA_GPT4V/run.py
new file mode 100644
index 0000000..8c0ca8c
--- /dev/null
+++ b/DAIA_GPT4V/run.py
@@ -0,0 +1,170 @@
+from DAIA_GPT4V.Thinker.thinking import Think
+from openai import OpenAI
+
+
+def run(api_key):
+ goal = input("Please enter your goal for the DAIA here: ")
+
+ # Check if goal is empty
+ while len(goal) <= 0:
+ print("Goal is empty!")
+ goal = input("Please enter your goal for the DAIA here: ")
+
+ # Save the goal and get its id (goal_id=anything, -1 is just a example, as this is changed at line 18 by using another Think class inctance)
+ think_ = Think(key=api_key, goal=goal, goal_id=-1)
+ goal_id = think_.save_goal()
+
+ # Save the goal with its id
+ think = Think(key=api_key, goal=goal, goal_id=goal_id)
+ think.save_goal_in_goal()
+
+ # Loop for getting a question for goal compleation, that the user agrees with
+ while True:
+ prompt = f"""
+You have a goal you want to achieve.
+As your first step you want to know how to achieve your goal. So you must ask someone a question that will give you that information.
+
+your goal = {goal}
+
+What would that question be? (respond only with the question)
+"""
+
+ # Set the OpenAI client
+ client = OpenAI(
+ api_key=api_key,
+ )
+
+ question = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": prompt},
+ ],
+ )
+ question = question.choices[0].message.content
+
+ print(f"Goal question: {question}")
+ q_agree = input("Do you agree with the question for goal completion? (Y/N)")
+
+ if q_agree.upper() == "N":
+ print("Retrying...")
+
+ else:
+ break
+
+ # Make the GPT response its question about the goal
+ goal_help = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": question},
+ ],
+ )
+ goal_help = goal_help.choices[0].message.content
+ think.save_action(action1=question, action2=goal_help, category=0)
+
+ suggestions = think.explanation_to_suggestions(goal_help, prev_data=False)
+ print(
+ f"""
+General goal steps:
+{suggestions}
+"""
+ )
+
+ if suggestions == "Rejected":
+ print("The GPT does not want to respond to the question of your goal")
+
+ else:
+ second_time = False
+ while True:
+ print(
+ "(Press ENTER to skip) WARNING: if you skip, the suggestions will be accepted. This is the LAST MANUAL step, everything from here on is automated"
+ )
+ agree = input(
+ "Do you agree with the current suggestions/processes for your goal? (Y/N)"
+ )
+
+ # The code in this if statement only executes if it is the second time
+ if second_time:
+ if agree.upper() == "N":
+ think.save_action(
+ action1=f'"Sorry, but I dissagree with the current suggestions because: \n{explanation}\nCan you update the suggestions?"',
+ action2=f'"Yes, here are the new suggestions: \n{suggestions}"',
+ category=0,
+ )
+
+ # Make the GPT correct the suggestions to the users interest
+ explanation = input("Why do you not agree? ")
+ prompt = f"""
+Please try to correct your suggestions.
+
+Here is why I don't agree with them:
+{explanation}
+"""
+ corrected_suggestions_response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": corrected_suggestions_response},
+ {"role": "user", "content": prompt},
+ ],
+ )
+ corrected_suggestions_response = (
+ corrected_suggestions_response.choices[0].message.content
+ )
+
+ suggestions = think.explanation_to_suggestions(
+ corrected_suggestions_response, prev_data=False
+ )
+ print(
+ f"""
+General goal steps:
+{suggestions}
+"""
+ )
+
+ else:
+ think.save_action(
+ action1=f'"Sorry, but I dissagree with the current suggestions because: \n{explanation}\nCan you update the suggestions?"',
+ action2=f'"Yes, here are the new suggestions: \n{suggestions}"',
+ category=0,
+ )
+
+ print("/) Ok then, let's go!")
+ break
+
+ else:
+ if agree.upper() == "N":
+ second_time = True
+
+ # Make the GPT correct the suggestions to the users interest
+ explanation = input("Why do you not agree? ")
+ prompt = f"""
+Please try to correct your suggestions.
+
+Here is why I don't agree with them:
+{explanation}
+"""
+ corrected_suggestions_response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "user", "content": suggestions},
+ {"role": "user", "content": prompt},
+ ],
+ )
+ corrected_suggestions_response = (
+ corrected_suggestions_response.choices[0].message.content
+ )
+
+ suggestions = think.explanation_to_suggestions(
+ corrected_suggestions_response, prev_data=False
+ )
+ print(
+ f"""
+General goal steps:
+{suggestions}
+"""
+ )
+
+ else:
+ print("/) Ok then, let's go!")
+ break
+
+ think.goal_completer(suggestions)
diff --git a/Design/.$DAIA (GPT Vision).drawio.bkp b/Design/.$DAIA (GPT Vision).drawio.bkp
new file mode 100644
index 0000000..f84192b
--- /dev/null
+++ b/Design/.$DAIA (GPT Vision).drawio.bkp
@@ -0,0 +1,800 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Design/DAIA (GPT Vision) progress.drawio b/Design/DAIA (GPT Vision) progress.drawio
new file mode 100644
index 0000000..5d29b2a
--- /dev/null
+++ b/Design/DAIA (GPT Vision) progress.drawio
@@ -0,0 +1,914 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Design/DAIA (GPT Vision) progress.png b/Design/DAIA (GPT Vision) progress.png
new file mode 100644
index 0000000000000000000000000000000000000000..a8fdc0e11ec90d8bdb5c992d48a90fc76bee759b
GIT binary patch
literal 498336
zcmeEP30zEF8@FW1UiK}Nl%>qpG*Mc$phSginQE%hGMSlnM6V@lS+bRoefjJ=Axl}a
zlP!^@C_+eCzH{!q=gw_rC|bDRb++X0y_u}(B@!uFU22+nt*JCi88FX!?xdHr(!PGZ2
zVvNMk=X&!3h#T1p!ufuFTy2K6fj$k~N_RLvz*7(+0v{cm!C!P5ILx#FpTG}n7XC4m
z&FT)0m@rLu<2lB=6dZwX1lMN2*4zzW)AEeE&1fTu*zC7@c4bQ_@059Sg7%~m?87wCFMe>r?3t#0L|GO&=N@UMvm^dBJ=S
z@FWavW`Av(Irt6!VOo0fx!yvqKll>B1y73I!4b@%9(W*l2ROKiIs+bX^5FW(9v#m26#L-kau|mAmm4AeB-olxBc>}v!X)G|9}0sQ5$ACUzVs$<4IjEZv;*f5r2p~bLp>|S
zA#%wA#)U&u$YR3_;0d{6sOW$qrA-LpiNsLVfUoX?AfOv*VjuXUw}1=zPwc~m$GmxB
zsAaKY@rRgg*k^NaK3=@PRH0@CKiuZiVAfs1Aq%*qusR5wHcmJVe|8K>*-@
zQuHPcasBw-Pz8Dbt_8y}Eg*^cKzhxIAN={Ao{+iZ@{)wxaFYNZtBTnMk{Awy*oG;~
zHXPy*m21#xio-R8TBlHv=Lr-iA<6=w*hkQaZ1_=Z}v@w|;&l8Ko
z2rIx10?bp2EqI}P@d$7xu&Uq(_|28L4t}0h9lxtzM1mk8rmMgiaz%}WUn=Gbfv|wPeeXO%U%?s0
z&-ztuCFJ>W#r$CEU4S$yjOroggQ9xy38HY7gl81pB1nn0LHQ&bxkHLW6v98F5XA*o
z<+!^4dv!Nb!caK?K4lRTMX~s7rBK}34&Flrv0(g
zhWniO-_U_`6!1Z0D{(swjP#8R7)%Zg`-&`h0X0Vjyz<{!oE~KP81O
zE+AYJip1OiPw@13RKN`og-Bd~5Do~zPt+I!I~Tb8LN18c(N&@#Z*M%z06~PcNX!HA
zy`1QQxE&FBcmbpZob`8y6o^d-{$ir5e2;_v~{rz!<2@$7k%LLQ`
z)ChlsWimke4e^@NS|&t4iV9R>(}dmzIS@!ql%+aQY=g-8`z;flaD^yzu?D~(fSE|U
z0u2Skup~!-g@GbQ(Kmk)5+y@>*>FZKfGO={Dgf;XRY{?}6A$8(e6LdEhDV@NAqwPq
zfb^0F_{a+oc>92VF_h9BUIX$77)%WP8Q^+j=p}}HdJqt(E_T;2kdwe%7xYt@_6v^@C#_AoV8#spX&|~QuyZn{@^EBp{R(f
z&14!GRUasi^cT`+hJ;5!)n^Pjt~LV+g90furi`yGqpf%Wp5`#c1@H(DKdwjwSh^Hk
zDj^&MLj4877^QKb6)+(`ocdiL2J8em0skGO@jw<*@}1|&r+)Plc=%#@8hB?S_eMQi
z0=lwkECjN;f_p>Q6=rcO5q2%pmtqs~q|*3O2uC9Lm@S<&!tsw^2<##A1B%H37NmwW
zMBkA_D6hvR3o@5dmwXXWjQ|n@nhJ0w3`k4chahMmq}&9^NQ1cUAk)pXpb8Qc21l89qzwN*&>OP
z_wxzTYW2=JNIekhg8W{VcTR?(NDU}J$SI42%UB_gAYm|B%Jg%tx}?s5W=O4cfX*`=
zbEm&w?f`y~E?B@S9Pknd7L$p5Z&&E)gZmkx1JzRl0mj4JsWQEGIuX28+-s*W7(%Y7
zKsY*9ZqKJe)|!I=6haIP(Rak%m2aszQiht1-WUz}%g%k-hPwQ7h3+Mj^duDpnla?=
zl-C77cLwL<9z}T`ipXl(p!&QYUsL%xP{jTezv26cCe;SXC?e&t5U8q_Ahdy;R4Fnd
zQ~_4NOetbf3bqXOm{Zl($9*@HDYIY#Kr-di%BWtc;##?S)GeKfI0i8l`d(q>_q#O&
zq8UX)3=INchBYsg=MjXJ?@9sE7>+#HsZuqB5%{m#5MJ4s6IoA+jVJv@s*XVCA*%m=
z9}>zN^yMk!lOfDcx{VUl74sV5S6ATWL0IZXRj&B3RP?F5+Ca&HReIF}t8^ODH-!G7
zqwk0lf7$u^?fQp6BPk4Q;1xdxUzYAYQ
zBKa4C9Sj04uU^Lm1-w{dh$!oq6!}X6QyBI_If5GPyIQMkXNSPFl*Wf4>u3HvA4V3=
zcjjVCEaA!{JaaAb0juE{Gy%-!<4Oc&&}q8lC%_coXXx9|xL$Cr2KX1{;eCLr1*jXI
z1Sgy$Pmo`i(mEB1ld9JXlJRMgS5EbUP<3;G*VB>B01OVqN@|okm?F#034$>ZPDoKBh=x=xqJXg!
zfye(IuSMdixI#=FMqA&-=PTf~5XzxUErJxk(zrK2)FMpuI?As_2m(^#rSJGUO6!=A
z&Cz7t0yWB_Gdd3mvjWgrwsIVwLW(V^NGca$a%9aT
zv=KCY;IaQ0ltUpg%=yx=94#3abJ!>;EI%@amrCb=zsnY7M$1SL%CtLlz|N4PD{glv
zT!d1URAqsVRg;rTL*o%j%|NIKS*mqh#oxP+P?$IF2H!koladHp_-2
zvwGaVa*H22vEt%aZn=WIRCco=a89K*tN|q~n4Y0nW*Lo5Nt_U^Vm!GbAF>>WQltxy
zJ95PUGJ=g{fH43atC$ve1DfGPGat}EOR9N^q@)I^aa^r4CG|BD-3(0t30q=UN^57t
zQlK6OFODa5JtpbKdwQS-{RioX%Jj3P
zE4xY7phyi0v8Wyr5>LcI3gBiFK!5d`>I!(h1TI~T*7U~}X1L#myf>L5Il2Mj144_i
zrBOQN)grPL^ra$kG6#f5-V${}jmo{S)xhMbm0n6+z45n9LJ$c&MK8$3Srjn-mc`cX
z@eCkdOaPXp!pam9cagJ@mKMSXy*3dr<1GNr99-^-h%IGLDw5{=y;LSeS_!FKj)M6r
z_t}9HjaR|rPCD|jtI@w^R$9Rng$0niNOb_vv2urU6*N8w68WC-sRFmz0O>O1s!JDh
zDX+nkO%zCsnq14J7(ViMy{*c0i49OhhFA@Kr$_|i45~yR3#m9RF>=r-(b11^jB+gT
zCs7G0u(>R(`MZQG69-@s2%Pe`$x;D6NSjp0T3M#_uqft7?5MQLiY1#rL5>#<^g#FU
zL7h^7M<&O2V{`ojfdLM17r}pI`TsYqv`4b}7gvHJ(Wa;;djXyRgt!FCH
z(XQTlItGnECy-4+?i0nPAn$@|QxKORA^!a^3*2V{l7T`r8JVg`Hj{Lr4Gh?X-|PzW
znLq7AOH37L!)*jFCs!-zvp5iN#}51qi2hwGR%VJ5v3i;24z(GpxVfWn7GgFkXQ5-&
z*s7;WwrJ
zEvF?9U3vg=Ib>B0K%&b(HvpDpP@aXgEZL-ljEbZ{5#?~ZzpHpEt|qEW{#g<)7_l=_
z3COrrg$j@zw(=TJA<3;;Ex`D}EBx7FB)f7W#0SG&x!A5ml0+X|e1Rjl5X<%Bft_W*
z>Mtp05sJ$1XsLy?fkHU?PGAF6tt~Mil|~t2gGL8!;($m$Pnglv#18hgD0fZ5W$p4JgdN*
z%8dEvfK0OKBNAlt6!2tPIAXvPFgtkhwH(+{$l=6|{C#POmn3r{Sds
zfzr*HrP!<@)~R|^&n)EqBlU$;Nm5a5gcl
z46O>F>EjLF5cowVZY0;0svgDds+?9GWn*0BT|S1|%2YgfrSJ-(!EeO4SW-D^4*HHb
zwF)dUxh@8Y5z0)WF%=x+l74=2&dRnFRCEi4;<6x`G*QTIct1CQl0i^l2Wx4RK7bcO
zF*{OrLow3(loD7V&L~+YM;BFQx}PZ>CLxrVqFj(rQI!ii4GEe8m?1z^NY|<|L!?PT
zxf54tY|6jT44I;Gl|}u!9YTE#%g(hB^Yw?>Ar7hmBTNE`>LX6Ad^5!15Mc%y66#~f
zifBm1wn7}K%4P#3+kdu=keA72gMDMb76ZV1u)X0Y@Go}63oM}k^z1_zOr7>l&b!G_&nGaf$n#SW|l
z@dI0T^8&y=%78z?2k>6_JAVHeDNh8snzF>;*A1r#YD(EN5E5;wj>rGqz8)9uOb-
z8;XPS2%(oBwjqlT*mxYE;nM5IzN^>RGh*=F;f35lAGlvVU|VM{276c%ITEIwge1!!8pkP-`d?QY1T1gcypBBmjmeZwV%qWpwD
z0)Ka~Q3{1WQqqhrWdQ2?bRo1{|kxknAi2zx;8
z7o>bsUye+ZVzE8KP0`dNUsn01UN!}z;9M^?^(^I`df60-VoyDthOkwF?d2u6kz!YD
zWsk6>QXZ(3JEc$>ht8Iodp6oKP3cq)TNWl&aK2|Frbc|Pv^s|kcpS1hh3^N;rbnd&
z;n&0M@^6(G;=>p7oIrFAcPb45@zd{#vB^MzpftH6n<8b%<)vyk6oV*^JIk&tpb2KE
zJs~tjVowCngl$9o!w6O`&{Qgd#V`L6&h_2lNvLTUUxY{1Kfs*IddyM}RcY+6%xDwK
zpmZS{-B3!e3697Fj^R60Y})l_=v4B9`u?35(|PgFc*S?+Zo
zP>id;Xb;T@#XGbiHg*A;2yWdV+1rK2br%G|>=a&uCs`MO9pb`YK=mG6BoHPD0)Kn>
z2}Icb4On3x*TaX;3x=5(28|ESmDa_Apa4&@t)D#Kkb-l1@%{WP$rKrr&9-1v+s;)+
zsQwqJX;cwKQ59rYU8Iq%J%4(
z;;_A;EcR>{5C}9I6Qt24O$;r;7iw?tN>y0Z+2dQfWd))Lg>a+;Po*3}%oK-m99gG+
zC{G*&8opvx=Fs$j)dYcp01+rUP+EFPH^!mMvoMH=RDz-N6o_E*A^|)ymrce02oo>_
zMHn97W-^cjR{KJ;=lKhSVNhVplV+C!DWn+X*Rd}p4*od^&LS);oz5mK6je(gl}}|l
zQgtX0*Zr0l2-i4rs%Xos=;siAST#l)zqUXX?HXZ#wkbp0!a$WthzWkV5b<{}TtyX@
z)f+$mMZCj8y><}qFlhz|XH6Xj=?5pu53gfe@67A;C$t2zhwk`8tB$g^m+eKu@Ee{2M787
zfdZkJi!Ej$^gGep9q$GpXElCc6|T9jUY`kY06sMmu9oLBf#N#+G^*Si62}xsfyK#cRt?%VH10ue1J{*5`_ObE*p@v3ojAXOKxVJ{pKNX2v
z$u1B2hWJjAsQgK?PcS?p31G2raB7h%!neR-8B+UqgMB9rSVSl+xs)3F%G+lWE5(xD
z$`XIuU&&{n$e_$JDie`urU|AyH5UASk8u^)H`-f|pmG
zNr63T?Hz|Kw2J|beUllT&2hc^b)Q3b1DU?K-unR2GUC>DDY{e
zjlq;gl{ln!|G6=gtjU^DIkI*Y@x%z#_%pk}Q)<@%Ritzy=nGY?o1PdE0NvlK8)c-=
zfRv1?)Q!>t$zsS1tLVB>8mVR&lCSV{I_3MikzBS+hJ})XkDnVu$@CVv3V?zjAUO1g
zwT?_4g9~I@y4KN&ODvzjZ~_TT3nsC2&o3#!@^4C>p~w6ZrE$kOELSqOMV<2$q3r)s&EXfD(|>0}y3#s^WEk
zl%NqgK`_+BAS(>OBI_}*Z%|C7;4CHwUWh_eeH#7e;jCXruIRM#`ciaM7gurYs@$Ly
zw6WE{M39^liXyU;P45f5AQF!47{v7x0%ZdQi>Ty6C0@FGxKcv^)(;B7zaavlrwB_C
zmMRwrEL{iC9h|8f1Uf_F1!BOTW6=DAJm5C*1dNUy!q+5%FhGcPnj|(I@ZjS6$NTgA
z-FZTAD=Cps9&nu|?c*BA(M@!8jB46QHw3xDzrPNXP9yTt6f9kVwNpj(2@$iG8Jp2*
zf6J0%Qgc%D30@QgwFVS#T#Hcj0GT2qgT{60UZax^>}$YxEFLQFuz0gRLsYbc1MEX
zid5?Wo}9XNW%5CQ0}3@S;w(c0!d<55E;kU~&-#t0cPMIvVvX9-Yb3o0z=T
zL6sd8DM&QvFB@P+wJdOuDeE$X<3gQRHX4CHA$m@5zVa+@prh`q6z_s;HW64Fqrj(w
z;sz^^O_%W&4N<8DMRw(x00Y1QkO>eMD}Y5uP6mZy70S2xfF&$?VxACe+dvcYa106C
zN`$PC2ORw`^#beWK^IjSfC|-lQW!}tKqajY6@ilo=B^}88YBqt{>Mm|#lTD!G6?dL
zFmwiTE>V<0_QO#|Mj`?vifyO{pcB{3zf!tO2k90}tyg1Oqk>KyWF}~cNOnQJJdtJk
zW&i58100AL6g6ay;Se(Z2UvsP!4vV_v2}RvF!jO(|BHBBA(koO1%UmKE4L?BH7}{;
zNEN@yOMjqqs38V&T@W)Uf^qpR3z;gS6O?ULlJsu3bh4i>Vgn`C2?Q}HIswm~;KB|T
za58B4Uh6mp8Gr$w5W9ldBat(O`jTRjlqXza$!L3oAyAwH1+cdR68aC!AqBm$%E4?;NT#mQZ_VBuYOku
zKHP7p`a*L;@iYFm^q2wiEs&s-2PlG$8YNKW962D!t|&mz0oE8}Q8I;`e;}GIWANFC
z6cmb~kdR6-L}^8$7=b}nuVSu<7A3GQWpNLUq`gBXO8FozP~Iq!LC}IC?haV-$sbi>hAP*@*+<=I$2ufSFeUl3;B%(QeFExcaG6lGc0)Z2S(qEEJPFS}m
z5f9#k4=c6_kf1a?K;y%Bfg8-_b8$owt7#I!LBVm52*aAKD=Ub=F_PgCCb9_>9x)=f
zKcgB7MXaVq-MA^jZpDLnOAPZRNGvIhm%@T*K_Yxz5Ed{hD^X>XIatYv+cFACkuyLg
zD5AU~iM=euhrbHN6#(RfO>&7Y4gsE^umS*mOe+sRu1Li90J2ZnFG+&$m0oO9EpUO78>)HJr)@50p|EYv()m$
zq3A9kVx{DEf1KbWvTT+-kl8BhO#e{qG+?_R>jR+(7&y#6Fnl;?P`HD$kwk#*BFqH)6XDGl7dYd6Dt8FRw>f-jab+a;Jf60@CzgjQn}LQpZE*@Qj(rQ_=RQT
zRY(#lu6(OUt8$PRj+mN-@D;?ZRiIVN9P^j8>Tj+*sinE|V)LDCP^FK8pYMliHA2GbLFk1vTN18-U4DjF>500`bZF
z4iS~Ch{y<)AR?_pZUSG0S_heCa!m9PXiVkRI!sxxnbbO9(c39(z?;WDtgIMDw;kWcl4{vF9%TpCN+PaUv32X8Y%>mb&>f(
z)~X8pax?@QlUyo~##CYpNq9&U{fe-9qBWSr*F$~4;17Dh6$gGRSb##~1Ol}J5&j85
z06{DkepKw8>T#ueX%p{(^PmG1Tn1!CZ83j%<*2pX8ehlET-4SMB!kVv~IgbglF
zh#}GYjsmc!26#zA>FW^z4Ps(^1`r1eyu3s_a5kKkvdcWsAio)2Lwye^%uB*+c<&B8
zs}v}`N)8p0eLsEmokjIjiH!0vlxxwH6{)CdZJt(A}&Az~KU3Mm89
z8K8))+$9bKPM|;{i}sdMn&}6<6r|7=GW|uFeP{roO`$SBI;Kdo51tV|rZUaGOno2&
zA-9zIR~GJ*#05d6J2$4yOzs*j
zLtu*d$}6~+-J3}P!g`i0Fh`IL?E+3CP%ms@JLWX;#Ud(Mv154R;}0M&4H;2HUeL!9
z^eM$$c?yBa$^@!{HdMTlI+iSKCnI~6Vr$F4q=J&kVBvfA!lDLOB9nm^GGJv6Fet~0
zh)Wkez;ETQDK4`j;ZNO)mnFT!8B?;9%SWP4>KcnT1x7@%wq*Dr`zz(Q0I;2W@cVAXb*n@-6t{Y8mG77Z?P!6pFdA99-u
zO5kEhC~|7tf*=b;A~$e5pyxr+DUL$EKNoIWDdmw=2s-F*&h8ImjPHtIWvshG*+t|o
z5cFY?u_l#1MB0K%AEaOvi9WK~mHv=cTrsAUb$R#~tScw5etrUhF9Z#6JO`=IudL#5mk?1X(QBfv(
zdkeuPzyNrl*27jOFMzIyrFFA%GXwE16y>Z;%upS=9^iw2lJv`h`~L`DdJk<-5&b6-
zTsAEr<9GcoB#2ncjX+}wLM$II{;sr_4FsXBe#!=d5K{4n0YOGaM6`fBR?=NTu1V$N
zKt@t95Q8r54kph7w9ts-pqDgb`5W@x4YX>@<|a6(AzzKTNJU
zs2>%{pvd1v5?1-H0G$C=I#Ll$xZNIp01c|(VomP*N6s3`to_+yjownh#2PiSL$#oo
zGk|M`@3zXQp7?>_^FK^!NH?ct(2y2E@>>N;gNc}rlsV}^68B1Q6t$+XOdw+xze{;!TDrD^K|@lG
zG>L)>H4PPqBB7=sSmO^DYS~0CD$fW6xjsp*9<31e1I^p@P&N$q`++DN-a-eY4@-3d
zqBFnK3J8r*-trT672ju=KnyEbh@>d&$U^RUi1LMn}GLA~+1B30+(j1rn&^tsl<|Z@L8skyBn-iS-h623ovE>P!^=Q8MVH
zWk5Ou>urlbSY#a(pn{1__yd%fC{QQ}6kr>haKUWiq6|J@D=LTr2pJCux)W90p_uT)
zs-^l&+Set`qCCo#e}NX$Qf6tE9^hEWv;;HhNiEh;%8q1g*|H27hN+x99i2n*nX#5r
ztRFhg8N2*OFcZR{Izw_$)@?zJ6dyWnhst2T_J1sIe`<7yZQa203`>BWwq!?@I-Rj4)K`<2h5{OwyR
zv8$`V>(_)7Z}wAR6h5%n5EpP{@v1YgqfEkA#dT8EoE!aP-QMpa
z)>SA&(!Ms`NXgt@>52{dUjnzF5>E=;0v#YMNk#LJ*Cy-FLR*+@0)r=%Kb_p?7fH)M
zwB-kVN>D`sg?%iAJT8`M#5vacsZ0Sr2Ygb0t}i^LR7nEX)gb`vw}w4GAMEHrBfsI@bOLyxu*Qgb
zyAZGg6*z=Fu4Fd2@HBij%JsnJUSh0tN9+Ty#HJ5>1p!ggLyib5qqsus(LpzuU|QgWk(WzkJl%%LDoTsG7`-jC2
zlD4b}I+95-{fi!c6g?2Y+uy}D473)6WEzCnARnmmm`19Ps2-j0{?SNES#f44o8tcq
zw2f?H_D6=`UtNe)U5KH){4QQF?E@enRJUBJXQbTi#0+^{HV;5#Xt4mgw*N3Et2}%Yz+(hT{5j896)??76gI=dH?{C?~Vz8qvho%2mvSg0JjY;DX^%17mXv;Do?(L
zhIBdALn8|wk(QC2-2$prS$*`03hm23q{9inKS~UqPUGngs5I4T5g0G
zToGJ;Sy1+`rloXOL_{(rq^vA%`d3qyPI{%tGe(wQ327?}ZvNG@rJJ85T|>`r8a0A8a!~
z=r(?Ef7CzcpH9Y6P&%Dsh%78AxX5rR9dZAOEof2%q!Oa(0OF<_FbUt7BrOtTr6a8@
z%7_XSepLHFqj`b1|_-PJn*X!jSFDi(j^;EA5wbE{CWw5RM$EM2Ru1aUL8R0i5xkF$;hPx
z3WI>kwGOiQr|wW*pa5%7NwYGyGnd`jui|7@8f2TrB#BNhjW
zF#fUz3dNHz@(>6;^}W5pmk~lQj{Eo#_AYF|b!63573&8dIt;*U+af#nkm1-?AWFAOU~Hc;PaYgw7{DmmR#%Tsd!
z>nv<|0{_yx&vB5hK<v;Yns?rlxp35oaj$41sxu^lGaB89(~ur#)G
zwuEd2sC2d!Eo-2p!v12y%A~=VXnvHBu(G8Ie=lKK(yAD(h*CbX{sWmLnN69F2+Ha<
z(f{EQ6spps%`07*Jtvfq_-A23ErN8zaym=W3rG2M@jqZfBDDr^zegCrlhKaSLKF0V
z*o1_xGW`%393mD}K6(9zPDtp=lxmqz2P+m)4WRN-_8&YUp)6f0O-E;==tt@3imQj)&SMqDIZ<`ffEvWrOBk~l-etXP9XFH|H&+nS(=fQS1i9Q7N0T2&Hy0)
z{vTZ#434bGk`0fPPZ?KdxMOY2p#eMg6UB(u))q{#s;rud5z*2J!p17GiLK6HN5QEI
zx_OnZSvNqgl=2B{bw)Z0PF7hh%tE!qN=H|ucdEs@DmY!~3?>mDAT2CsEg}jH729b-
zFY=G942BV+B0*KMuafc!YjuV=iY~0=HuD5o4H21@kF5V{{UxXbL#7!c3*m~&N7#Sq
zbqQfvhGZ2YA+bz00a7R*VgH4$62g+rQVFus4V2a!X8kj;6uKh4i|7g#XX1dN(#b2U
zIxVciJ5DHsL3Aa1C@3FYtJARv8cI}cGXgqG(rc2SGPx53MJpo%Oa-41f~PqB2+&4ER3b~0
zw$j$FE4Wssv!v>sm?$$#AuBS-RI;LD3Pe_UDF8OtKM>@|0^CLLAFT5i%?|Fy1olt$
z7J{8w|8Ai+LZxj;sO5lE9Jr?lCLdJ#E1p=RwJW8>y43!264i}bp25~HdLn)}XeR?V
zd=75?S&G`wjHo5pn~4>;;}Yzf$L5zk2ey-a@WF@dI%IXe0y}2HtLaB)hOijm|af
ztjwK5$L6h64RyJ>FYVj%=PK%qDbvD+E@-=a$jG9UD9(d>P
zi;P%x^xfr=5&6*x`iCxM^{>^{tkfT_ZRe;cokreqmYOxyJ5&D9>N^$$c8RT_O8qlU
zuV#b9ohwH+nX6(uh59FIrf;e5Dz=^dR2>VZ<=+sFlNzyw@#xx8hqwc$MCtf#TvA($
z@&MprR%bnGM|~#nx&EkHEsVK+_jPDf>cL`b)M}DHOtn%izH?3Rp;nl`0)$2yjc7j(jZoWF}&BbUv;_W&=sXwu3
z$8=kJ`;@C5X$w{?v`A?#WZKTOo1(H!(=5fKrQO{w#3Q#?A2%SrvCedTFx1|jV48x@
zTuLWyvw9Mki@m)CAFOM29cK!j;zNA%a4UiiOVn0p?P{dm7+g0^7hKnDCFig`@mppy
z@HjKn?*ST2V}joT6UWS-ht2!yQ{YK+3kFW9VN3al(ct9rDXs08wtQ)ONnEL9A(}Kj7F&Ra=
zH*T_$4z8|i^Kl9H#&aqj&FHX6!rCM91OA+QS>V
zk2}J;!d6j#taWvtZ=<@6=bl;iZqb{}&(lNm7Np(Xz#OSvQ#}yOjs6fv2v0b9Q=C
z)SrIPa8}Xs50_NL1N6?=8-7-)Q)hZgP;~Mlvo?Sps@dnBUcqU8cgd9Km4)4T%fdWN
zpA;61J^G~6_R~qXKduQ4+v1kwopN$IM`+yf!jh%?@@w#uFR2b%xp4OVfHMat-rB}~
zJ*Q)=nT@tt%Jm`MiP_yS2CE-EW!2iF&<+x{ayFSTxOD$xV3{o{wWkQi*uj$YP6B)CTW@nEbt)IQ)d_j+S
zm*Q*|X6}F1yFcep=Tkj5t~X05>D24QeQyWGyh}aZ*Ic!|cJEbk-}ZZYCnrpAWZ`i>
zhZhalRdtu@+9S9erRa3NnQ{11_cK8;jhbXEH#I(5JMH<#Fqh=~{e1(@-1)C}+QOI%
zwO=i*o7aC_@tN*LY-X
zq`&nUfBRVe!IEQ^8uxaY`>2OL+nm_u#%tq&D_@7-Qkkm}xoTkirAzjkw)V6AQOUETMQJKr8K{G;clyRr-BUkb~!3IkLjw7o2IkatEYHk}@VdDFymQzlj?mF(`I-3QF6ZY5KU07Ii+o!ymH*T+V<*XfetJ&P;MZQ-3
z&$J8P?-#0S^S0lFL1qCv=Ft3Z_(w-a`x$gMaM`%Ccu$F=>x7fJoP^iT9^__5&Vx5wsBB6iK3sitvHUv*#yU*8mwSDz6okF)!(@%^aU
zhE+6kzOI9ZW#e1hC$>4WF}Pj!i@xL7=^evYz7ctNbnNH$-Z!B~MsVt~_hR;*=vi~;
zo;*wcxZ3yQo773dNTJa1v`+AZ9`nW7EsU4++R?;+TGU3%!KU>FW2!r=>C|y9{Iywc
zy!Tr?PyEo@*lo|b^tSV^Hh*Q~={fh!nnbT9cQ-dp9Xz?it_hn{LrOmOI-UMM!Twsl
z4&HskimA=w&JtRi2GztAY2w{g9X#mhNSX$zm;Tr*xArofadTDXoE0n7v&ZfkH*|VaKrfGz(?tswUmLn6@b&gjMdM5EwJ`2-
zBr5+Ur%ueI=Q9$aU8|v*vCK>yZqn!S`Lu-idY^(cViJxszug?8>$>c}8IPB+GzYh?
z6Qg6X=F6m=buPOeT0|G-J*a!SxVXo*QJ*>wm>t~y8++rv+{az}u+Q0sy}WVg_|;pZ
zgY1vr9z7#G=j^B}doG_zos_O=^VUeqcun&5{4A3?F&(|8_QTzyXK53ZpYz3yb@;=smdqrOd87(i~E<4YtJkwed??+pyuDEV>PU_MBrUpZDx!F=>*)
zwId00CM?o0cNQPLz9+meCpNK%&Z2c2Paf*CXl9g7*QI;@m#`sg*G=)2wwQx9)lsW+
zk`8Yuy<7U(_dSyBuVoC=>lgJlF1O#Tj*qS_iF%%X??}?oB-MAB#{5q$29;R1OPyz*
z@;GL|PX4>wCH;E6wq5v*7WIqVxmacdqn3j5}9Zv!^%<2DTe9
zZ`#-(pZS|@ny+Zq{oB?UbCL<+xOdi`KkAtjfW4b-MOe=sZ++a>xdihDQyJelsF^(!GyKn9g9G&oRR`Z~oy9WDje)(!OYvRhBaSb#!
zFYeazy72Hy!9jlqlSP*=#@+AwKmXUmzn$K+>tmZ-zi00vo@kg9XE$!@U2n6u(
z{<@(SK8>R@+8R!7^dbMj;FRVT@2rPIAzf`!fA7XDwFzC4#=ToIVCE)A3r&4}_1C?7
zSS+3EGib%K8F5=j4oxlGYoHx!wX-m{<8zk*@kU#cPTP(hJFiRB>DJy~Z698{T)?$U
zjnqtcXlbx_0KfStp~<($s+Pu^gEb#N8@*(zeSFW@C9USAuUy->=IyJR#<^|1J%G;d
zFf(RF!={_w|8L_yyY9Ky7JfN8!?uis}}zxcF!-lr#a1A6PglZ^D1HLXMa_OknZ
zzHPB?!-lM!UD>wwc5AYBUA=#$B@}|4wWoe)VQiT`kYP11@6e$~#eVC$+Vs)XsWbik
z{N#Rzbyb}Uay0Fd`n+I7>VIqX?bbHEQ)kuAIeyL2?BCk>?Yq3?Z8P>KUyFXR$u|9!
z$<5T`+FQB~+4St0S6aWd^EE@ej$d@$zgK$Ow;o-)dbK-ysZPVfeF6vH>&F+hIAG%T
z^}(AV!_svVOdpwNC#+m@%6gUA*EjDSI%%7zd-ekoo7Uv`B#V3X8fdgn@8opXkJDW|
z3u;C`v(-RjtXsQT%-EAP#KJswMEB5jtF~QiV{^R0p--Z?eh0qJX<%8P8)>YnVYl^q
z@3{FJV=PHNUE2f8oE|#=$86eU-ia$i4y^WmEof_>
zkbmjwy!emBIge*WrNuAzXdeQ6ZNG#4z+aLlIH4b9)8E;7oz+)8zIXUEo7UYkH|+h?
z{?Qxjf`eN#x_$YO-&}BU=eqspCa;{Y8)}u;;r_=i!2uBsFWEQNA3iZ=?8DPLuO8S}
z{M9*yJ@MEJH5U6;yH)W`Tbd9SYoq3XyYX8O<>pRkVazUi+WfJ;V?jn>t?tr~;J7K-
zs8~beUBC9pD-C&@wS{Z*X5~1R1nhskNqA&t+M=wh{^v9_T-um_sGV=>_+M`8kko}S
z1LvC9Y0c2__!`mQIwi{N{K%3$qwgN}naW&c_2kB_#j6XRc;(%Dr*VE@(+IJN(V_OK
zgO|@KDiYh=>lp8rHg(dRj^AG0npDeresVoEwFR;L*xMu8rNwLy_8qlDedDTgmMPg2
z24tM5T@#Q^Uk&`_&|nM}5Vvo(b57RPtx225dq>$M^-+JnU`v2a%*`&zqP<<_S{a|!
zXm`lig)bU5?~+p-8=T^@c;KM9elA=G7k=}2;NG{Z)3&gMiKsa7uv?D{`)(TbAJ5-6
zJ!@CW?)443HWLHK+^fKQtyTr6eBQgAO+lf01EZc_|Xi7aO{qg>I8nqoyRVgoin0zJk6$WS_^Dzp9$i7
z9{>YIvHH8ZExc)l(OMrRA?m5HGHs6_?HEXV~#UKYgO{aFv)UMBc
zR$QMIXS24SX%qW>`C2Wy4Qey<=GNOI*>?tidFHyV(V-Rtn=*H2_q@S3*)rrIC22F3T&a$erV&LNm;bYWa%syMZJyZJ<^)NeYZic=xN7~Bd(n()_eKp
z?fhQW!Jk)mUywMeUhAyS+~*||32gZwwof>Fdes`f&
z*Q};BM>R++oa3MsW~P@qX=BfHjg?b@tIJHib^rd{9mjM}-fcQ~wfo=>vrTiKTrPT$
zspW1{eCTECkB{#>ylC)wr$u3#Z%uvsuXy@3xW#{$2kD<#J^6Xi;zz;+NY$*)a9Fxy
zL8#a1R$Z@Lt)DTlz{)@?!a=QgzTN4)Tf_Ub?DI@j-F$ap@0mNlz3Y*Xu&O32vHSeb
z%bnFY!dV$ZmbxVAG)oaZKPx=7^10y#&gT`k_SDTje3Sd;Yp7q3==a_S^Mhkr9vfP)
z_nvr{7GQyRwfY;fhIkj`)C)&1HZtn1I%J5M
zi_4QVX48sBojaJd
zd-STBksmVE@`^(0G|2QdI%gDnVR^=`p@ohGD&5lax`pnJ6gNKnVB(yY+PCk|P2JMt
z#>o_pe@p!2c8ootSzZW-{zB+&HyupPl+g3E5EvkP;t
z&wK6LwA0DV_{p=2PcpaO{p4#J`+WAtg*8-F96B$HdOqXjo&^g#%zEx-T+^;*{NxGV
zy*?d0-R8#Jfqgvk4+|}I?}tSUUp$<(X5G4!W+ns7qC=On=#6_0k9&L~Wu&F2^Kip<
zAD(C&dg86t>qWm$gQhuWt{EA9aAB~j>W~?sEBa_(+H5+h%dK>keokLq0dy48{d)HnIhJy8-dO4DL^qfM!?7sIXkv(|3uqW(H>z=>nmug+g{_TyU7$W;9g>h4)S
zYa?}TKD&{uwfhC_NMz&3Nh?NuQk$=)5ff$O*5cA7-7$OGg?)S&v*AULr5Dz}+bdX;
z_FvO34E^mc@lLOkXKIO7iNjVMh)Q!S$=oK`55&o*Z>?Z8oH@>gURW#<1EyC6IA>F%V@<3DGg
z8N>M$Y~Z~En}jX%St@7upKNpVh)zw-*^2|)Cc4Btat$gfwC%7&WBiyJ7OJfe2}I_N
zws&y$&uCdP=~}x+HTNCreCy4aAd{Uwkx^H-ukNA#ZojsRJ14s9m%b~PTLmtjq8?Fvyua@9<03B`tLt-*
z8H|W9{#<8pcJ9!;FP|3GA%t?w<9kAOGdNEY;J;4qR@255l&Mr-BH`phxl+
z*O4Pxb(`1*S_Rx!)reEyczwd~F82A4CTNEqePgr!G5zFqw(r7WcAZ<))*so4Jy2
zdcpkLnw7P^_J{bYF8QkeaAUIZWxJ_S--eA7b@N;7ls+xiG-;zA@c%YFpsiE?(#N1p
zz#Q8Jx^oi4H>6&_qN25BQLEec2OP{<)Or7I_4twNcXnUE+-Kvz_4P%~0qav{ncQ6Z
zxkI{|#-6OJu3lrpv=2WB@2iRO!({l0am7f|x4?U2N|u6q_l+z
zjBdNVZf5rMQP$Oj#t#R~;Sn`i^aGhtTP}-Y)F(nfGe=*~qQD+UqtBoVBv#by&gN
zP4l-#TuRD0x3QCG@s1BSR!!^_ouyWDq5Ab3TL#46{cmlO;rV)MCJRUIKXUMCol!BV
zIg?kc<-9ufuG`5ypFCMJybU?mp7#+>v+-^lw`BI$UQwKfct>{Cvi)RPUQ5AGxmw3(OA02beFtpznKV$ENxvk&_#4`Iv9waR{4|*O!XD
z+5;QCU#r=m6fI4gp(mYREFZvMbag`SY-IG3W^Hl4C>$Cym}_POL+hurar#Y3(p@>@sNZu;%TkclM==PX#fEIK~m
z^P!ESYO5P-atgn&2i!QD*|xjO{~Bt3O24V0!CxAG&1c=g+>*(KEKb{K(}>U|oo4y&
zbF(de5qEc-)uJaaHi0Pqh`#NL@#6z?Za2*?+F1Z?`pWk%eFKdq$u0*~}hMbU89B
zM|3&qdY#s*pRbM4VsxqL6Tse*yt3%Q)_x5W&GI8g-}eJP#FY&yQQbrDipQ}6nvIKN
z>#L~yw6E21RKd}&qnwBBtDk725m|qr>4z}|6W@(`cw$GV&pWd02AsAx-21;Q`qzaU
zT4ufEk?r^AZac5TDPn@{8R
z-`85O;o9BkCVHX5?cY=nX9ljN8`|&N$N8MGR@b#ykam6Lds9=h1FIg+j^3oJwbDLr
z4clg>r`zRYtG=|*Tv2$zX_}5>iT^v_=wZOyI|?xLY`x7_{A^O1uXty@BY*ITR#7_3
zy8CY#VZOS%|J&go-Nw&cd^oHq%dseA%{goZdahR>oj5
zW_hprZ*uQ1t%j_;)@W~!mG%=HH2T$=lsKu$kjW#vsE>O7a-)d4t&)#taQvM0Bhxdlc66VX_Bxjy(8wia(d2X5XOllpPJ6Vr*vR;t
zmQB*-J(sn5+&NHNbjkbeL3QI{cbfV=3pG@YHCqj4sg}0S>k{xDCZ>h@V$}@mg8CMB
zE-*0A`lZ1}(~`VKj?Zl8eHx@OJSwe6$<#|7UW24f
z{ly@{Sgn#j$+S*P^7EOQ)AfOOs-d;dpLD;&gY;wa*0>fgwaoYy*3u#Ua*cM$_r7KE
z&nL{%=y|#cKr7!S9zU=r>rI}?T+P;J1}@1e@Q7J`XtU}NKEF*tar@}k*V7I@xRcr8
zLBQ5z&ZSG7H+Nshe*4gAUbl3W!A@gV@;)TD>(m|^HW-JoEqxoFYGJ&8S=~NrGgR`l
zL%%lUd`Y@)tKm3j!o%Fzdu+TpE&FMTj}<4?_cb-1I@@G|?$>pTo?c%&N5kb<@zRj{
z%>zy67O2diEv^$&KVi4a*4U3(JFYm7;Z5-g+N3)!dX44r$!E2@iuM_N8KmJIrIVW=
zI@WJ5%S>iIj^B*Z>DQ(9q7D7#Ogp`8SnY4_mz+2g{Pw+Zy}hT~>enY+izYs`+_Ymy
zaDz>vRI^4M!+PHO-^BW=v&M8Vz3trkkjjhmug>n$dp)H6ZIcHt#=bu^am3i*HW9}@
zA91=zr>nMM<_^hwn0$pk!_3>|&bIA+f@ZiK_Frk~VHtNS#;H#H+*Q|^>8Z0ke4Dj?
zc;|nSnY0i?2kz7P3*O#|4-PjKt{U0U(mL~nOQ)&31v^UoyN7xfpxCg_?ao_$8H|Y2
zT3xi;-F(_u*Ld{2sLumft<$m#4|l!MF-E`~s~J8crp=5?@%v9S*rhdp`@`KDy96fp^?Ts+GH<7oTi$^#)++$Z6-1rb
z;NQNWa8I30+xbSLR}3#|W*Z%I+9ZU>Jsmq_N#g;ki5*9M&1zQf?X`Zv*E}-bo*6h<
zweFYo4?Cr3^)C5u=9rlvyA*Z!;qp~W%nMG&s&~F7K6Up{lj9c;9By}{$aMVuyXT{vEC`L!w0%e+{(ZB}nKZukDv1fMPbolq*mda;*Fw6zzT#V->o8;+tC5F~=W9w{aQXvQ5%@QE_LOlRM?z(nQ$uOyILc`cWX!
z17ecSt9stsVrsHmb4ZWmc}3eaUDO{myL!bay8}p5S$s=xG~%v?>Fx#(ggX0YkKbDq
zWRm2u5G0=(O?b01Ha3<&cT)cBdUU_Se2%c7c_YsABXt*ngo59Q5>DO3h<#co*Q#uI
z^TR-^P{iOI5vx$Lm*M_<#IP(eE(@^HpC>`ZA^Dr760UhhIini$xnn!u5GxyG;GYKPs6i6%s8H5
zYnRUrytRCGhReyDg#J~c|mF=}#V@1*DRBVMQWcj|36o}p!uNL%9KoV|#*>e2K=
zmmTv$qt3hrp25i_W;-&I3%=HCNc*^a(IDT3Yh9O!_eZ==Pt6=-UT>YU)+#D2VfK3US#>_e
zcZgVf@gX3ptILNZ?cI)}b!5b6S-ixS|Y%W=W^G+
z3bgV)+R`|D{6fx-p}Rp)6Vm%qxbsQZKFbQ%%x<-A-Fu(+C;jKQ+BEM=;)he4SiS4G
zt)H@d`m7yMX?d%5@3`l3wUu3%)6#XqPnw~l=UwkL-g(Qi7tb>m#AWP%!>c!V%<9ca
zb*AeW?wkF1`E!T$m%7LIc<(sa{>G<)HwHG~=aig(GIn?!_Kndh#p3|>GJw|THK@2b
zW%o1|;FKq4Psw{~gx5k$zj?6F<8d1t?D_{Ba3A>bmG1mwd5_-coYZo+>ur|hKg}k&
z#?lt~7SHFJ|F6f2LtA=nxVCz9^6I{G3PORSJg#uD@uO!w9t_Sa=@BZf`KZ^)Jrf={
zwK4k8Z`0X1h6|eQ%I#Ih@k37N>;lffn0I?;ra4WsS-)ZVsxc321gYyAcAvG~b<&W6
z5%#P`c_6Wz(p6X=xLI6#vujCi@gM6tEM3pdjoh+4vsKg{J-*-dW6P&FY+ipbcgCu{
zc8B*Hy4)2n(2Tsf^uj$8)lj!d7h7U&Z+h`+ZWc1$E-AbspM9
z9GZCNRJL6~PRYo{F@2bO#=UVVIIE#G#&L5pZD?onp~o*q*|Z$Dvk!Q0+fG}1I84aN
ziX8doM%vb;>pyrkjk&wFYsqTQukTA%s?ztRi?80<7&$Pd$-V#A
zyh+U1_4x%;W3G13&Ku@(cb(7F(F+%LUAo1&+vcG|*Z6%*Kfc-qq#d5W&pFd90fflg
zhs7S>b@Ptlvw0tS@XouuXN0)i1zh%Cefw+QA$h0kg-tNs8>t%pVdbk$pErcQ@L0NO
z*4Cst+qay}&7J&q&lW44?q0%K4-fBj^6r-Vb@Q%6v)7$98Q|P!>$Pr^HcuMZqyLq-
ztgE*d9MiuDCi=~b^xng7W?Wn~DlWZSc2X99ksE)F|Kcc}z0YGZ7XgH$SFNFE$35FN
zkp3xZbIyZ}ebt+1_h~hySv!9h7riDLoY^f`>77cN-f6_h)hEU`N>9JA>u}S%4NYpT
zIjj$oW`h6iJCp{z-}%cr;;G%x&IQjpMaH=Fyq%SQ;H%ZJe6tguZwJlF*d)A|f5qg;
z&hAqKU0VY%e$2QO@f;9c6y5=zCrC)2Xc}2qdo=B7tgjrtj=rzcBAk?M{=A)*pWHnEH`mSGWIlcjjCf43zgvP=hxjjp5e|Fytj?VQ0XQ
zU7HD2#(sJ*Cu7$noyp(sw(s)1Z%1JHcj^G>JtBReV=|}-=rK9i0j6dFlS$u*@wFl;))!6E$aD2HAiai5`S
z{zvVu4eK6i5nh~Q3}<0lhpFdU4?4L+<5<({8^_F^F=IyTyH!oV*%|RQpj*6g%+dA}
zGY^G6-M2LGU5^#fz0Xo_d@p0>76$XD{fewtv2Fd(U_xO
z@5UG1UKkm2G(~h8yr{p!l&FoD)b1VsZ)|e7`?+gP9i5#1dvN5#xT0w8h$G=0XEf;)
zRa6w3-8GY-XNmvv6B1a}B0)a`B$_w7@Gh?oW8x+cG_*&mb_t?Lg3A;gNXv{sr$<
zkqww$lhblG?%u6#XJ-e1`4f-(cY3_^_`w60O1$eqdVpz;fwp9>MV;1>*?l?UoWQi&
zC)D7CGz1e8oZ|P*q3ii~SC6;u(xb=uM}8h2TKb=E4YCf&Go4fz3*X9O;fXoNCuz7%
z`V_YyJr%3sxH}uf4x<({Ha`-+c;?lW%@WKGgC{eq)x~Pu-2=yFt@`rW`eJc?F<~G3t!t7oV5DYrIvx1WX(9TFXL20c@7mxB9f6!2m43ycYKf5_>hvmk
za(({%sB9g8rh0%pkbfi2jf-1#WAYvxdYN8gacNTVKzxE~{hzY8j*7DT-iIF%Bot)m
z?vPdqX;5M)K^mk4L|Ph#?v9ZLDJ3KY5$Td{kS=MYW9Ww88T>roTJJxvYZi-zGxvR;
zbN1P1?`vPz9@Okt&u{Kr*{*K81p8DvMgw~tK^9QXAH3dbjLY*{uN+t!%a{vIn|66^
zmA3M??j$Ja``*;4%{$@hexeLfQ&U}3l>hC$(80a7|4G*SyRpE}AGolMI%=+Z
z1XlE|Db@l~m^pFp7K1J(!vl(t>&7d}Ou?LTvc)}3gQl(<&*@Y43+OnFar=X<`t{sA
zE_TPTOaL1%6zSNsT?={po~GARh-vQ5ZsVmf*WR7IyqhWHWItVUI(Kq5H$TzCm1T6m
zbm+Z(^5}Kl1VV2|iR>vsc}EnPC;^A=8cFOV%Z#_CXvf=k72w$W&|m?lYWZv+w(Oh_
z8&=%S-D$-{?-?mw67H9%Dpuv?^lXq%*!StM&9D)Qvgw^T*c%y!FQuq9V&|iF|6Rzx
zCAfFghVsC!W4*7+Bp!73c*D`DUtFiyR1sk8_`v9?gfUr42akftBi<|arl4zbRTk^}
zNbwwcE=Z+b4rl1jZf<7Z(?L|>!yBR))uvSu0UN!6@Hy}EO&zVnYa{lg8?Vy`VaT}P
zn}{zCBWw{}gT};+aIuCb^K%0vGO{luEKJ-{fxtpZ>a&L>orN=V<$CtZBz_!Xg)~3N
zD*XA5wt$SRqWD`$$&_PKoc5HDDa!iFk@Ow=rRG>%TZf-M9Jyp%5xtw+VuE6?O1X&9-`
z`0!^ga_L0as7m*g^9s!+?Ve*t?QpalI|Z8Mp@O!lj?9Gq94=4gR(=*EBR&;4xHj#0N<6FShbKSNP$OVXZ*1hHJs=>v3;X>)v8i
zI91M|YQCEo?>IsoHKdNlrDlx|Sl%D@oR0@&NBpSG`iN`=cIitWxqr#LDzpC~^Y*1P#FfaM
zOw6kKJD_LyLsvutSC%P?IJ;YYhs=~J5$;@=hq9JG10r5Xsx2=qxbL>~QyRFr<~sFx
zK!l;`?nENqN1q^r(TOkhtiX^~c-XJnUc)KY{h$Hj#}ByT*+uIKPa)nh8KYD+cLreI`vryyFP@(aW^f!r!uN_4xFvj
zu5w#Se67c7p;~R6Yitu!;{KsEq2We8V_p0|7UkPdcu_`I}GWq(=7)GOa^y;*vP
zJ2yXh^@V|(8(G@9E}bl>=2Q{z(!o+A4Kf9*U1qi)p*(NGpO@&LzRUD0-+|d
zKdkVZ%RqzA_TVl5?^KS)3RGB1a})&v|FWbX`OvwoG}hcV>8U-9pJg&yEv;6XA8;2$
zrx;BJmF(M6=J&SRAg1|YdOIz*N3-K+6+f;*iUfPtbo|RZjQw}QCEbguUiqQ#dVbQA
zf^)gUE<+I^T0>|-xfpP6Z(U5b&|jylu)0vhThANxulhq*p$Hfo?C?s~!p^;5XoaPD
zsUS1mbs&z7cBS{Bdg;LAU9%|E^=o-|z_T-5N_0Pz!9#=RG#;DvK14wl#^|y#5zEFL
zEcJEBqx4ffX293^buS4~X>Xy9LLNTNUySwJ;og6i*6JuSR-}d|W<75xd#4j9zD-mc
ziStUYjx2;!XbtJ+ixPjXlDulUkcrg~Wn&h=RjsHo03tuIlbdl;H86qHI-jT1b@-2$XN^=sZa
z74A>JYs%U++;@Hm&t~4Zh%>EsF^
zgBC=TxE5f)wI>{GV_ly5(#|T|4sXDftR$`*V>}CH`EFXICh(o!|1`G(HMRNf=cCeg
z{;tyvie0K1yPQ#ck{`}pEUaRm;4wxqJmo3Yp(IP8e{-#f$qZ4oj~|s!zk>Xh>(j+e0bw_%Z~IQ0xO2HKg{;1PW0-lR&S05I%^iOmb@U68z%frLLJIEFIAd;6SCTBv?-JfeYw<(UkST8
zu*r@h9k#HE6j(GY9$Hb2u=iQv4_CLEvwA|*bdU2V
z6hdwkCD+HiY`uf`kg@xz6b(C?(u&Ld$H(6vMAYOJ8oWA8;RBrf%~`e=3t}R~!XGZO
z$)-)j&d{zsXb=rj-xN0VgX5tyxe+r0-+$X`OZrQC2bqqNve?+mvZks9;5RXB;~Bld
zAP)_CG%c5>Hr_(}cGQ8r@OQyWh={jv#
zf98|f-yM8oco;dZwv1+98DCg3yutQ82ztS!Z60{bLZ-n5mH3MR
z#W(FvSC><@#_t}vyAs_PQb%3)3*9MmYI-wK-nIva2d?~O
z3~yXexa#-Gt){9-Z!S8-rf1a8p&(*dL2Xt+EMG1^Dt0%^v+Gyes~%W0=Y`D363_
zIj|GaJ8XO&`m^j_4worcXR+!>g>#D?ItsfS7lk2YsGs2oM=|eKw-5!#x^a(NNgyNt
z1gM?U~%un8(
zk{H;SxFa8g1}K#3j*f*rs$iJ2I^Q1u!&VoMH0>gKVA{-D>QfEBYq3whZUn3(Z1mOn
z$Uhbz0JwAx(k?6J&L`J2vw^O-Ek*;@9
zv9v>IzEE%U!p!kM?6JTP9geqmdXe&zI%2IK3iEEt{6&RP50XU;Hsn0K4XT;@03V#|
zZ~7KGo0hR;rBPYL`jhykyJOH*K>wv68eB7J>MI0IsrK_bC^AL%35{i%-?`Xn%^BvR
z$3~9s8=>zvLSei5!f3%i1S`LVJ4+5=%a>_qB|9%B_c`-KlIBOVl@YMO{24N3#1C={
z=UYBEZ0YMbVlO5~>+y}}5OR+0=s+-Zn&{(HnTY6}%&HgRjsCu9^EbdH6h6
zTGSDF5{iF%sjm%yM+{ciQGhJRvc2HbluNKVaNWe4o>E{G^
zjv$4vsW&&V!OkU?_hrkCb5*IsPq0RxpMR3TOs`0fIVKW5_swqOjSh(lu%j_Ew?=Ju
zDr{N2+M7_rb(dC?&g9wnsVrc2n5Ktm>x8?Wm+IE>Icw-kHWS|6Ve$y~jzcoTXyp@4x_b@Yzg(hAO}Jd?bi%6c
zBXn$~`RZy=IzIA)EIY4px8V7~Qht14wy1U^`xC(|vHBao0-5D_X#vUjklZ)h7l65L
zYd1`QJXDh~kh0V;-k!&HTY-;QS(&JB_XE(?Qr*w}QjZ3wir$k#$mm=Z+%oKz1{pth
zh4ji;q^57IC?G<_$-Fg0;^y((3(_#CTv(il=v*wr{BrAM%D{Deqh#jCrfsBOzb2z{
zU+#+H_lQacb{T*~5&5;ZIbiQjO6)LjT>PLz!)oC1Z(TE4YOVZ^@-~QT!Mm!yncYr_
zl{C%Mr+9^8Hw2nD1MpItU9S{ZsLO3g3MdX#_cbNmv8V`@2Ty(hN6Vm6R!u{ca^EOI
z?OFBP62K^e4)sa)hD3TYDoZ~>QD39^B_d29OvK6l@s?7Z0y)hnEat%>uis8EnCRuM
zxjk}Bf!ep!R{cliys|Q7evF1TwPD0^PiBIM2%XmQ1qjj2TnT^|ozXCkwRpclRhpIi
zBY(j2>(EMpTxwwF%ts?R9rx&WVo2+X{E03<+oB2=i!8Sk-8GX~g&|D~NQ8lDjRDhg
z(Ni+VZ)&*@^q{P_Ze$Zv_b`&B>biXsam7~reU_gCplTn)$pI2oipvT<_L$?bRFm-?
z$&a^h$M+qd^B|~;o^-NOm?XSF
zmjaV~hJ61|BvpDKqVD~3q%z3aT&8K5GieeN+!P&I*f}9bk8IfAx1=;1p$}8FSZd{PQ9RIaXDZoOzHHlq)m6
zmXnot`XOFoE@Jtb@?OETgL8kB`qjM+7n(K_K#<58xBwF*GjE9^b~*UacJ@P46@URl
z{$2_ok?_D_FnZeFn~$0ErRglUlj9?XO60{)?T`Vxw>egPvg$m0yNb_|avWMWZ&bLX
z{qoOOPM*=!mZ@1;bi(#gif$hG7UcAZ>seT*tHAl0kbOo?2SuAQAKwbp_)_~QB;4Y?
z33O+4V0Lg|axce#yBVVGlr+=WzNXZF5E^FIKDAW(!C$crDnP(1i1*$z`dUl`FBtfO
z_NM&W@GNZ32Hgn6>5Fh{-X!&-HOd7DLT+cE_>T%fM
z6(M&Z)Y_phrOBodMQ#!pp%}@P9GEfAOydXSsu3CZfW7)mxUe4=-9BsZzj
zA(->>oDc#Sn(escqeliOg&w$1a?T0oj^t0&+eg^Sn-IHdPT8~pQE^a#_H4MZ%)5Gt
z5f<|BV`k(Ilm6>;E@=_J?<*Pitb9G~$+%t_*sco(zNq6H&+@-q08~FY0KLDs1dr=5
ze%?R{5w7{?)nLjIv(b~MxwDYHSA*$)y^JLgkKk?kkGre$6yCTOkp>SQ{)1UNSzx#A<7v>Zo
z?(C^|4bb3{j)!JpwHR=V+LfdePz;yqPmJROhy?t1Rr88XK(6NNEc!GhNdkJ{P84q4
z35b)B5`Ea3Cj8BCd*2&17
z>>BfI{1qS?`bK!`v|BWrvNUyK3*NfRWe#$e4?Yt7RjHa~hXXH2_@XkyaJ~RS!Ut07
zda)N#2~b**&y%>X&;|onyaoN@&MhH!l7}+kQ4VaC1hb
z54&dcXt3jDic0f~iHWf#q3Qq=`Rn=ZHTDRE`RTE;`-h(-7FXcb%Bun-|9P?}FqZ&)
zU*#<5;)zY2Uq}ds)$9*K@DH~4tWnC@nW&%qDTz(LJ6mc{Pbqk>BQA1P6nXxSOI>?~
z3KGeS@Hr`a`=?=HVILC4Fh@t#LqbEJdwPmA9BfZ^ibcThK?)o;M39Vj6@AZq-z
zfWdBXM(w!$(SKvZ!fNJwU&s~$dBFL;0&)-Ig)KBz*s@iM-ONVB_Ypn3j}&4yKr%5U
zFK{=vP89ID%P*ZcfAv_n`<^fLToDL2{4&x(uBsU=VgSv%4F7;;zm4~2JTkw;9
zy}RN6s?~lix5+P~{H^uBi?uYnZ}icb7q1Z07n5Y7MkJgW4ruLB$dQ@Ho(7~;f{
zRD9axBb+9hxvrzjc7m+5vN%4*yb8khHxADuXmywIq6eA3-ifUlH~cfGMe!(-uc5G5
zY&bn6F)oQIi@Nk#y%I+PMTmd8eQr2$shFuaFJD?jQ~rhw5=Nr`qpa8ggk>!+j8(zP
z`XklJPrt;`a@Im5m`y*q->7Aqw$by6Vma|fKH|)48mu>&sL8}AXU=-{Lzob|?a7^C
zcf$^*98PHyClbppOEl8CNl0%UMz)Dm1Sh{
z?LQ(J7+X>j^}WyaG3;n_b8mFVB2{qIr@j3hx$&he8QTZblBmT4mnj=Zl_ZJF&4t5`1HVfX`Kd8%r?4EL@?`9_+3W(GZa{-vW
zbnVFVk$(C{;+pn1TiPfJrH=1^OelTqVGr##;?$O%+1me%%QXBe^XP!5;
zi84cz%kCn21Ax1tGExa#=JYX6=;3cysGyhE@XLzHVF@sFd(5EuXcUz1
z&sH2?1~39}C3Wp05WpLHmsL|O^!%eD>E;LeoY!xhmx|=4HUe6VV;fXprBC<$^`t8};vL_cqJ=&fB
zh$E+LhvADtb1oYRFLq>ZyE_+GYHC=)etbpfJ`lHwxlrlcV}->%`VgfS&nFNIrK59`
zOTiPcQ|^;4FT2Xk`X!QM@-RwH$w4#cWkC%k2mFQGi?U%>Yd17rb!^2g>rVKS*7!rV
z#KJhfvwrQjo$B#?qxvd;vTYzDgOI);pZm*zR1rY6%XsGk@*zxlW+iU126_*D>o+yk
ztnbpna9zQy^E74U;#O0laaRWinh`AUq3=3z?tCHTeZ^(!bv5Tw`9B
ze!2tn-xmwKHl^IYTU#U^yZdGy#@$jCwz1$`GsjRvXsv}oRW))V1vX{oGK>{n4%U(a
zvB5zXAgjvo<}QkPtfKLL9}9_-^0-g^B%%wm2v$y!!zKiTLGCUZ#j?%5{^eBD-=CX7
z)`o>}V%s&HPwR!7+$x-RX2T3bJmS`^J|9iM{(Q|Y7rB;kNfolq)7+k}fq*kPV4EFw
zxEA(6JFT2Pnw*Vm6waD!onviat&BL8i`bU^Ig4sv%=vR`+Wn&3r9=4XWKS8Q&7XhI
zbbxS{&wAP?H-qDiU`l;ru)(bC>*@c{PQ7NRIiI0zPClJY*%2b(BoV;D_ccx52z`Fl
zUxr2u&Z*p{elT#upa>SM^bg6bf^sPOkj{o95(0p_sT(=>=_YY-ygx&s6KNR0I{cbC_`5bY$Ir5J{oTH1~k70@D92D=_R976FrwXVIwp9M6B0sNDYxMPew25{WTyNU^
zJ5P)rKBN~uHX!$fndK&a7jK7j)_YW-eC}}9gM1q<18EkB?&kCTS+lHZH5y-nd5?^G
z7URQ#iCP!~o{J;V-$Ze8J*W5k^E0zYzcap?&&jj|5@*v23T6@}$1-msF2k9B50O0)
z#^mN)k0{^lE^aJsH3wFgBk`n)C=aGp%<@&K
zrs4D-lf;-mM10?*5x!^|K6W7QrP?}FA!5f_xV8#SO1jlSdUDW6cJ`(x`vNuJ43^(9
zuCS(Vx*-OUUTUW)Qu~kE_oXi$w~0QH^Vj<-QF4~9=&zy-G86G@DspOD6l@_
z4Zo%Mm9Nr0mJ!m{nd(NL^Ad+4-v0$W4r(}C!LaoBR{#k=&Lj1cus6xGxkU0ft)wqZ
z{s+|537{sTZ6({wtu66aA~`QV)NG7zJe}D20taT>_7dz#e3T}B>~y~lrr0^@_Y>k@sqY*6@VJ&OSkyxZ#RxmFV7{rKQ6U2yyZDq*&*;g9ahQ=*;bRy3c17GM4uV=
z0_^DQaN269WPHu>cA<2MzMYwsBwNagS@=ffr*E?e3B3)6(L`=q`b;y>fdaI3Q$_Zq
zs2csP7|L>{C>0OoarX`D#sB70-1n)wyPeC#ikGp<|jM7LJN<+uW
zR@#LtMW@Ix#r?Sc&VPU_0yQbJ#V!v}+?I?j^qM}PQ2`r@_}R;7pO+Ytx#L*}MIOD?
zt{)6m_d3sn4h&`jV*&-t9!2e0sQss}3v!z7<^P-;QDIlCY%Vh1r?de;~p}5{(Ru
z)_tWV^w6RHwE6Yv{6Xu+|A~{C8KCT4u|4a|IDS%et~KpQ|IlyMy#1TyrW%WC(A+|s48XA
zx>OF0uKD5-8g4mhlkMSH5u968wD;gnPWY!|nGT1_ciV?nAig@C&Jh7bZjjp!W!|UN
zjHXr^og-N&wWS>dS-B4sW#1^_X+89Wy8D?5A}39T2jL0>gIonbDe54sBh90bxU
zH0k;+{%HjcE&9DHxTIZq$8_N}DQ
z)nwQR1sKtpPj27Gpzg9i_x~|nl$fJ7CL-;w5Dq5i(|~cn92YW647781P~ooL(HjOz
z)d$WtQPpXj$HG^$h`{`3*0+OzaU1Jbm@75WbIJxtbAs~l(5Q22nDkA5B-IH5goly)
z=Lv^AAQVs|kU74}0~!3E;_Bdz9;xPcAX7_#d>}~u|JN>N$e<}pKT|QI*u{#K?}l+w
zy&$rhHLemAeJZ;rT{L~Wr?IDtnF>Wv;j@)&dmvz$TpH>AFq6Sy0Nvg?8&vImI~P?b
zXXO^xnmY7lnlR%MB3yioAlY@DqN4XkgZFo9N8dHjJ|6;mnD{mANCog4)k7b-K!v>9
z!cS{QJ>SADo=@HIWkGqh&>VCUU#Dn@=tX!!aSUi-_5n*3*(mQrwR=Nwj)SMsrrt)KU
zCFyqU=LedIHSg@ln&z?H)*x#7aik`#LoB^w`q3Tcp(kLq8z=65pGwi+9p!gR3*igU
zz7XvW0QYJer#3_d+#v_Y+@{8=-jD>9@*VvsFsl~$m8nvhnKJ$Q?T3x2JtF6x-KL~V
zAq!neH~J7{%eGUy{7xY?|H>ORj3bqVM9rnAIWLF)5wX5^;=IEGt7A0r6!K2z@UF8x
zLcgnRSv|89F*FdflLKH9OC6D((9mcc!>plKaLN*i+#ipYBGScC;lC{v3mu$s;3a<0
zsU8xO2lh{L`Z7ja-1|ZM1LG1Y5V@C}eR{@#TY!4$%HlwjGk7wPNBq!r(xoD}!$a#6
z&|K(A_NETv{job5e**^Bu-fDy9da(ef6m4F*k1B`R#-gSLT)Z*PL32k3kx3U`k>ae
zHt847EPT+A!0ZUce6}TP*A;`OKFW_7ru0Q@Km$={GSGkr&hs`A;fDV76ws^Ye+CvZ
zKT9%^^2hW0H5gQyr)^~(8-Ya
ztmmxA_x=0RQqO6e#2pnxK(oU6ZdO`42Nu=MYBWgGCd&gs+*R>OE`{OO9>(c~$_nsw
zgnk{u_lvuCYFKsWBj*RN5`XZN&D#;}K^FeJ41it|<0L0P0r}@+I-yvmFPRMulc$xE
zU+TUNwPV0VGP+!Cu5pYz;LjZ$d5*WI;pK(nq>H8<$7!8CLT~xEcgsu$Aj53Pc9xIz
z@FNftmPSCG$4h+}g!&@hr;*EKr{Krv7$0fdgUd(+%xK*v07{vfr#s
zM~b@xDI&r~ny8UmGx_=EPmlLys8`ctL(&X11aRITeqrZDgXfOo*6q%nwpfyg+LOe@
z#$q!U6c!GGrWzQM^TU5K7N!$*BWiaoA!2pSa6q;_?OPxB&ew`28X1%gEcac~CUf-{coVkoR
zKBck+_&g-V!oSQaNz|_H#{GFLi!4Fx3l^snX@-t?rX!jE^N#jAb0PdyACY`OmeLB`Iky5Ckjo&DARK9WE=umHjyn#@$+P9s;j5EI2^ptl(^1GeUiw9
zNo-IMa6UDUUz3e8Z?^VVGU`P<(NMT+jf4^302pyW)`Nj1;@e^kFEsc*>Ml(9xyqTv~*{c0&W=9vj_Q-8b-0SG%Cg^kF#vB
z$Ea-16p5LDKwCmaDwTaIW1gSi@jnbA>%b@NhFSI{cudG@A>)@W+{gAwc^mW`#nm7J
zvSab(6)wcdpbBjPP*Lyfyc$mJUQT^$TEN1v{nHV`&+k52OMT7{y%svLA|N6#x;Reb7#*wF{yFw_dT?T5ZER{Rjrm+UXnq{bry9YZH8`4%A-bCc^s(y5~yV-riq>!vx<`LOB{
z-}?QssS_r#9^c&2(a6sJ_p;mLg=~VG(5ba}(j1<8igZ?!sclDP#DqB?`=JRETEAH*
zMx5`wPtcBE3zwthruuO6rC63hTFU)!EmZq0;DfSmD+sW%*s{uHWeTo3}}
zD~H$u+_OvxH-bFP*T!raKtTVV0!E=U>wvCN^4fAz_ZKdJqdOShfDAB7;I6fhg)7TN
zSNSv2iLuFp9uHEVJ0POf6_0qz(%5^k-0o&q^Ent_@xR{
zd>ezF{I!q^LV*9|x5F=y6ZWmi`f=Pl7blcIeXr0X%HHBo3mRE9S02R2#RVW`2HK`h
z67S!0)|{+G0T}#~W(n2WWFa%oAIj$IEA&Xa?YTy3_mi!bSrK<-gSq3S@55y##bjop
zg>s&a?n?4C4PKn_AH5ImG#MGUGp3a~>$$Q;Gh|Al#-*C%^cS-}D(a|vrqnzmQb6rg
z61I`VMlod~qJo$(VVe30-@Tc4ui9d6{F(945qqMB6UOj8Yd17H(aqBv9zsx!lL)P3
zmETrCB%L5+QhvutL&;KE<344|;yvG&2A6m^FzTr|U;oU@zwzdX`p&=|Y~b5I_)zM1
z>&7Q*Wc0AdYx0nCDfDw*nQYIXNhQ22C~IUdByOv!3emXAitqclZLSGfO8QO_65X->sEMO(egth
z*?CRT`^dHsoHnTNFjU!^)Cu9&RDp*9?-JFg-cs1h8rbK1LVXpwCW@U%epSV`VWIeT
zB7N*@$hQ{xLsKQ
zRmLeP)08v)8$rc0_8(%?B72Jz-oA5sE-f8ID>)>XL(9uch>d-eS5)*uU7gUv!s3;V
zYIseJpoA?CAK&Nd{G5FjIyziJLLgdx)Y{rg!^ugsx3?!FBjbZnlD*8Z^v}Tlcl)@6
zcv8QD>}8%XMM6?K`LKJyH`0hT{*+7!5K&pYRbyw-WkhpsF0T@|CCXf1N8ts
zK};c3p%h)z^txBif2Lr>&a5Q+b-d>`Diy;+@jkh7d2FNeoiX25e9`;O
zD8>z^mJe9EPcG!Pe(#y+xIizEHMIbwF{XxM=%K!>rWu7S4Bng<@^SavGoNJsq+gI~
zot}{RICP@5Rq_R&dC!XVmALYOV~;`lXnYLC`ozh+Tu7*O=nLx!Er^C+$8?7{s=U2@
z45+F!PE={Nwzt1fSp%FU2;^*zp5tRmu_Hg+6|}V6jOe?jvzKI7*HCj~Cz$`L
z)b!po*t7>6odZXm1*~{97dC<}Q9-W{=p=jHVf#3vWP=(8#|U5!c!c(2aMy?M!`Nw&
ze&@jZK(uxhP%d#Z%vDVS@AE|Qo*k}k3850PU99&KqRA+2Itcv(-(i2{e8Re?xk}S35%S->r
zN)y0V^zMpHEcv%nA*(XrSYB&aNx=kvWxY9jwY_iE^)ofcvtR%K)bTYd8xHYbm;yJK
zsT_`YfKW0OGyN{QW#sDE@5a+YP4hBi!IV=#2q9&c{g(@1d`54q%A<|l-k@a%_xR%9v>j~qbA5u=Mr{BG7B5NI&KujYt)2@2hswWsBpu!N#
zyx1|*KlAwWM)oGaN<|0!rih@iE9@)mj9
zJ`6M^%NPP-jju(x-|0uEy*!p8^Wbu=vjvXJ@lp~vyG2$Zk`4|S*bFko(vttk4!`
z5UW=q|0PyR@+4CIbZN7sj7;alO2ABvIF$BHV&YRq@i2L^53>SOrPR=s=fuM2bs>+V
zMgG|+dT7e%q_>S`&j{1aDasFtb@jf3rUZ3W@u1GtOXrvqS*85jfL|r4BDzJED@hQI
zzMtCvcbWZ%m{cDb*Ob*i3F4{_+}&&icFS6#d|z)UGB|YlC8Vm~0b0XUpii29x7ypb
zx@SQ8RaZwAL7svVI@ak$26zT}`(r?)Qx;}-PRgVb=L1Q1*8WHfWOV6YKLne%+(Guu
zR9PDuM_nfV1v>gc05J%h4FCKCqW649UZ@x~ACT(x#~gtQc9E%EI1!R$h0F93S^O8i(-0SkY=nq>%;uEC
zWiC6q21F*gEfMpdx{-Os1EB)6#`d1_~lM`f>^ZO4Ew+MuNnYP?6ONhPw
zt9KJ=nR9ty(zYu;xSgFLEqR4Tc1<4+?mlhE^#`?Kxgsji5H}xPaRm>Y1nfwvRRk+L
zIba(PSStyix$r2E`7rkE`@o-8NF>;sL
zAruL3A^8&0gb9Fg>bF`!B{I-_pW77Z;-ema<$%3MT=d5DJI`ed*^UQbYE?Sd&vl%<
z6UU;m$&-RnW2IxbJWd2OaL^G8^li5d)H4RdsZq!j=6IbJenw4rZydLO=NSrs!9%XJ
z=Yka+p+wS6OV3WE`^5=_qnG51flT(N3rLO{xF7t10Oz-xRymvOzu%p3dK!x#afmuc
zsjov5oz}x?9g>t+59I5E`f&V^C@@=}$X1L2JWzetTolA024E1Ugr=R?(oOheoY67^
z^9R93qXjCLH8~i;IPuDt0YR3(U(?d%nh$HGjtSiHJa0AZdKUhI^?OzX(DwWHsf{zK
zAVzrY`jMpeZeV^dCWRWN!NUqkct_Hhx)qbT@}3`WhOcmZq+?>5GLUdc{Ri@hs{{mj
z!QbrLpuvT8S8VRDf#e;^FNet!tbBW+6EJR4d@k1HkqG!snC?n&iuzdh|>UK
z7pk8{5*&2xS;5IZ18?$T{N~(3c6pGmOF*c3>ZFT9E7?9zbVYufsZ^P4F}UNMcu>Od%=RGAP#5A;69>doqeLKPkOuQzWlF0#Be`uB&6G$b}ATZc|i`lf{BO;I99?0ec
zHy>s)FKs&KZLNTgTw$vDWPYH=7XJno6?OV9Ex#C=5{6(P6Sx{`nosJVuBnV5sqh2;
z_xG_8prtTvm)Ynd#=y>LP;|X4jTM;NfXKp){#W3(#(l|P)|!LG-Q2i)J&t&n|K!g1
zl!PVC5{VW&XtA1GHQwlX_LMNP4;M?R+pl!LTq^!#dGI9-
zh>t$mm60T_XkJ=Q6e*PxcJtyaZSZlPyK~BtiU%upYeSPk4(B}0=m#s@ksOE#fxDOi
z1YqPJ?4f2P62K_EJ`sYs{46hyMf#v|(f$Di55e|SrDRLc-e*fIk;<*sP`s^6Ypw(i
zy#TZWF(Y3;w+8B}%X3*U_g?2S+XXDJDi$R9?B5q1_}&MlY}gQd6ZFlbSHSuEbpn6@
z1zxynU`su3qy88Rw6|Xj%n1LwM5l+jMFE`~ODB!N-o;N5_ce
znvBy3baee_sK7U@*Uqm0lHP&0}IYE6Uy8=s&mn2Yl|157~|c*pRj8=3@W-u@64r
zYiC;OUjbS1!)Iv!JxmIS&=*V?Q^n&_zOZ|e$363RhKsW!`nS>6OaeS3fl^vl@c|B(qV!~R)j3*dA>)6pkNlpW
z_h|5tH1GOjy!YfTLJ6GOQP2I+=i{}xmf)4M&j;}BZ#MR|RZ^RNe&b3OR>?Rvm5%-2^FcF#s*6R%_nzIY{LYb=t%?Bg2mw
z5g}oC4kM{!bxQkJ_xL=N<)BC-y+LAwxA9lyf=e9kw7@s$KZD{@}Tnz(ei
z>M8j?9QQ>1p8g;zT);;jJK|
zhc6_s%y^_P9(9qIdh25qR-P%u-2E?;M-B6lK)_g?{&d{QWZC#rh%zMr1
zj@l_CMmoc1qbP+axpMdSUH2G?jk=ea5rdb3?T$@uwAcif;!$i|7yDX-!h*XpxWX0#
z>+S`7vrSV5>E{I%P9w*j-9Ew(>gX}+`)%}fhF>=t9Q55`chCQ)BO!NeD&c>wKMCrC
zrlqeF%J)FC@?_%04Jj4J*+Bs2PR`#BR0EOn5k}mUd~{l@f6O9Y-FY`V
zeRG-Bdga>qE=7mUfKwCW@vPuAQS0R5D=n=xTxr+=7R7fFrfj1`V~bhG4=G^k$b
zo@rIej>V>?s
zaw3g5GODL^G;Z#6QKW^l?OLynPEsCF*e94?AExm?&OOGm(bzT?`Y%s`hhX;2Q>HH|
zxg59TI`?RdswsiWX~<(*UgdMtcTd_Zi9{mu61xUOubTJ#xVYNn5P8IE@hgl=8BSx#
zqYa61&rAwu2iPS?rrs*Lu6r$Ih2WzK^0EYfB_5_l-rTZ(UDrtKrFo
zHINa}ADVsk$W{5-yKacL04eTY(QwFn^r06(K93ApSiD*|8%Gv&KvO_M<{5Ym%zG^w
z9lQVhqe%&WxX;dX<-q#vJ_8!3@s?;r%Fs-eX~#HpXFZwM^{)N9jK$4YTYtYw=Jrgu
zLyLR#huA>9Siig-W`1&1y_r5eSgXYK8+N5t>DNK==M|UR`56`IPY>x1y?~BOx?P2;
z&ZIY!li)CaCzacePkS_OIjPhU$}NP5T|wyQfriHTXe>G^uHhW4^c@b0-~w^8s$
zCwiCWdIld$-*Aau;8jM{l7a>=skDA0rxuE#`<+W0MUiX^^BW4WfGQwK0IL=%_jJY%y`ri1tO>~MG>QYKA6y^jKWu&uYId^7bN
z)^@@+xcj?7`gKl-p6NvodwZXs>Rx5@dJk8F{lw>8Ytsh2hI3cCHmT!~vid)v^X7bf
z_g$_JtPH$=Q(hTd_Q3i1YvbqIm&B3h^WWFJ5{hz`w7w=wVz?rScdk-mXnF+VcW91$
zf_m9t{42ZTX_yOoFOW3jnZPc)SWXT9ur8UXv<67hs*Omc5e`L;4=1a!CZFr)NPN{U
zBP!|j7^St+1x<`p3Uc|n!b^Zwz)0Ku*yyU}h(!1;ZkW8>ht#;Y*)i3E9wks9>_kIh
zgjui!%qt@Jmx`b(y?!0qrI=;^D0>sH;Q3ZtH^b=3-Vq7+uZ#_oU2(vE+o>!n5RVx(
zQY~O+&l?7GWZVSUv;Vtod|xrYVm5`P5_0?!>ZG>u@lch;K88+RCjg4cw9f2Z@o{K{-JVwYL43h#(XW8Lp<=?6$GJb&^DPBe
zH=zbPpq1TmfSjJ{(zScP_k+ef3^`|2
zBsc%g+K9$C@!i7Q?_H~`l-(9m90p-TdEZA?Li{EcwjS4qP>8(j)_$FG_SijiCaPZh
zf@YOnw?3{$@USF-Wb(hVD=
z8|m7BA|(h&OA9Cn64IL%q*EHCyFt37Q@XpmL%Q$W>N)Ruzx&<)_dk2H*IIMUIiB&1
zF`gQY|A51%h}c<=VR=3f#;8E?Cmbw+h)oGk0SF_~2?-3b1F2a;kSbThS?O2{YZsc%
zpcYR-be6xtuOzu83NJ2nR}{TfcG>J+S#i<9Vq9q3Dgs*7*ZsYX;HDFHUIn`Kc0c
zd*vTYgkvt9w}w;Y^shBLuK-9mL8+>smKlpS9q=7V3%%(2b3P&`^M$tR$ECTVd-CFry=B`Zc9MT$Hx1eH^#^W
zqG)jcpWpZfGfR1&Zh*sfx+hw2OGez*eI=VeE(#Q6y59Xt3bo7t1!GN4$`h*kJy0T7
zzfmRf#;!4;n-2X{Jbi{y9G&qGocZ!}2ayRSVbWXIUlM?F>=(1k>i+?mNiKj#$ez2g
zt9Ez*F?;a$|A*TH08`S4O`mYI#7_siLJoTl%CLXF6fpu+A+HX+SC?_1$s73U|E@yj
z`QV|8qk?6$mU0OME3hAm0@GfMpfoAABpB1gF`u
zc?!zwUOj+p<&B5_r!12sF9plTqukY9j}+XQZ&nKb{o4=TPnpe88y3Pd5b&EF@xKo!5yMa06wf_EZ1JEfNDS$m^>@eRL|M8Dtz#>0QF%xJJwKHc^1
z+Q(WscNtL_H{tf4FPhrnt>_~cw`*$kEe-qEQ%kxB?!MWB`*(3yjK;qu9}uPrOq2Y3
zRW5+YHt@*SB`%NY&{G9m5WD`nVucek??L30p6^dh$7{Op>{fVvyB-<-|6P0Ehu7Yp
zl*ztw8xJF+3$mlgLAi0;gUJffrB{jmv%WwH1%7>SVti~!jGNo%dr9WEch{ZN@!w`-
zChLkoQ&Ll|pxEAuhztyj7AaYX6IBx5R>pc>pYK>Q0qDy&!RPV!uvYjbyy}CKV@7`+
zk$r@ik?=gKb&U57B3iZ228sJqR2~4r1Kitmr<$d`OA%z-Sk0E3<34uA3p`S(w
zeTF^dn?sKr%yE>-spd*R;olQ)vogWuI9WTj%aT4hNcQfpvr^52PU7U2EYX=cz1w)TKZg40O7(?On@OK?il-|t8tTw
z>y16dA{g!r@}Ej*SOyVFj#g_(mJC6EkD7&F8HOJC0pG1bxc1CvT~!@#^piCGy#oSP
z3EhfnivD7GKJn7t)~3_h)+57&!dxRh^nb08r(eKbfi2v@6nI`(iMO|F5`_JZNqiou
zDaqQ1K*M)ylC&Y^{1%M)@w?AU|DVqd3xb7$9wSXOW$Nm1r=;9KVH?)Lg7ejXozTvh
zTibq@KBL0Z{rUTU4o<*X!Jp;*Pw7t^B@$hH@W-g_*E
zO9wVE1PGWQa47xwbJ}$m+f4==Wcv>Hzu#D79`GSNL7LNT=*4hJbWwIyE2KA84fre%
z)+f`33CP(Bu-@S|gyzAVs%q}?XGAW1Wf7AsS01fE4pNt8-baDLW^Am=8cqYrNK
zp30%%V%I)3NcZv0Y4I(7&ouR+xgpY4AI+XBrHlX3
zcy)b_*udkhZva#Wa&`+{pQK48oRgP?Di;VWmd^#+G$X+yt>D|A96_`0<
zkkztB;7)JU8X4sGastHI*4Ap-<5)}+-2c%&+V$$9%8N_xVtB{eWrNCbajF1pwSnBM&o&bMx4nv+
zSf^Y}lH485m^y=IV`#0=Od)>Y*P+&~kC(bgba{)bH4qys0OX=qoVfI~MzXO1(!j&imPVq(r<#_oazSHjV$
zA%AQDEq=(Xs|t&Q&E8IH?>qeeY<0lTN(+AK3o;9W=VnG;1tvidFd?&P%&rtf2>y*2jVb5D=0gPRE@n)dlNbeayH`82(#Z^T
zThVH|YV7Ylj2s;LH-oh`2?6vovgav~)-CJ^tN&fXfOmBl`HTuibkfoO#n*S;>X}vj
zEA8XsyG#ixKG<8K@j<62^@jS#Ib1NnW;rK-6>BE3Csg}!=~({*slR)sim2`kfIr`-
zPur$S`t#Hj(PTd&d;F~hs5$f7XOA2G2gW0Mev)&~+@*y670@L{8&E%#*(E9`4%=-R
zr0z0Cl513CO!;O8Dh{Dt?4qH)X{&EQ2@JYj7R1bd&L+1cto;}JO7RQyjUA4%$6XYw
zg7ptI*2ZC8?W~;F96sUIDp4T?J^9sfoP*$s_xINUDNM=7hvIm;BVnn41?K2g2CDq|
zJx}{lz@-N;n5Lq?X$o`68RRw+d{n1jD>Y@hWL9B)PeT4AdxW>bmIz
z+Q9iv=sL>E6w!yjE%{{9oKL~#JWG>(vQ~{=XnOPxR!GPRQu+YQ=6TA2*51n+HrYBs
zBFlkH=%6rn4%WjZhGd;jIXbV@6(H{dCODH3exI(d5@RTZFXGti@)RLO&+18(_x3i6
zmCJ+-Pwa~aX3E}qwChl98IXW}*h073->Xjn`HTr>!t58qcSX=9yRmw#1+~22xBY`S
zq?R1%x8kxOr-1f)_;wd+@2~VxNIIlNrASnhN*6DNVCnew8ohgz%>Hwj
zBCp5y`Bb>2cM^t0$m}5TOO1sMbJBV4hXZPqQN4`D@bNncptK7tf%mwZ>7=1iME
zxZhe${245s*H(YGc6<;Kgis-FUu<0<)tbi5o~cs~t;_y=HQ&y$1%jNyHZ5)^TLLPg
z0ylwLdp{cA*rl;gP#!lTgs$rysb7&^eg0R_7kMI@6i4Up&U|m&hUuOlhZLLn2L!Kj
z7B-JBShP~%e{f3HdABXF8l?za*h9``eY05Y5^-|$4ykvf_czQB#6=}1`i0WF-??ga
z^I<~^whv*=yc+=7qWjtK+(VL^DO!E`_o0Y_ctrt26m83T&g=Xh>t5jgwroN=+1rIE
zF{F-|so@U^1){;iEkOjh$c;m6W!*l-R8u*FqstbNDH$L%6W4(#4*4%?KhZZ=AITrh
z#763E4ZJRuV{X4$VtXI2WFTsGC4You%r2;wX1QS&Kwgo-MzyG8wwuL;u
zLH|KF`PznHjx4xZXDuDyU@@qM=FCYaxR|~lJb0)jH%%dLP8(_+3v4+rDDDI10ELKH
ze4uvI&s8Tq4|T7jA1DVt4AC*H{u{7&@TyQ?S3CiOd&RMt$bs6#gu7DMjRtiZmYO?O%%lavT8z%HF
zCojSC`xH&s0XD-cal5^z!|9|dt6&(?gY;_i!)4)&l-p0q<7G7{#U)Fxl$JFnKg<-8
z?{m;lDN%5g;FwO4v{iKBFErN6KDbJYbNllgn*u-uh2#y~XrI!!